code stringlengths 101 5.91M |
|---|
_module()
class ClevrDataset(MInstrDataset):
def __init__(self, *args, scene_graph_file, version, **kwargs):
super().__init__(*args, **kwargs, placeholders=(IMAGE_PLACEHOLDER, QUESTION_PLACEHOLDER))
self.scene_graph_file = scene_graph_file
self.version = version
(qtype, atype) = version.split('-')
assert (qtype in ['q'])
assert (atype in ['a', 's', 'bs'])
self.qtype = qtype
self.atype = atype
if (scene_graph_file is None):
self.scene_graph = None
else:
self.scene_graph = [line for line in open(scene_graph_file, 'r', encoding='utf8')]
def get_raw_item(self, index):
question = json.loads(self.data[index])
if (self.scene_graph is None):
scene = None
else:
scene = json.loads(self.scene_graph[question['image_index']])
return (question, scene)
def __getitem__(self, index):
(question, scene) = self.get_raw_item(index)
img_path = question['image_filename']
image = self.get_image(img_path)
if (self.atype == 'a'):
boxes = []
answer = f"The answer is {question['answer']}."
answer_boxes_seq = []
elif (self.atype == 's'):
(answer, boxes, answer_boxes_seq) = clevr_ss_cot(obj=question, scene=scene, add_ref=False)
answer += f" The answer is {question['answer']}."
elif (self.atype == 'bs'):
(answer, boxes, answer_boxes_seq) = clevr_ss_cot(obj=question, scene=scene, add_ref=True)
answer += f" The answer is {question['answer']}."
else:
assert False
if (self.qtype == 'q'):
query_boxes_seq = []
final_query = self.get_template().replace(QUESTION_PLACEHOLDER, question['question'])
else:
assert False
ret = {'image': image, 'target': {'points': boxes}, 'conversations': [{'from': 'human', 'value': final_query, 'points_seq': query_boxes_seq}, {'from': 'gpt', 'value': answer, 'points_seq': answer_boxes_seq}]}
return ret |
def set_device(model, device):
if (type(device) is int):
if (device > 0):
torch.cuda.set_device((device - 1))
model.cuda((device - 1))
floatTensor = torch.cuda.FloatTensor
longTensor = torch.cuda.LongTensor
elif (type(device) is list):
devices = [(i - 1) for i in device]
torch.cuda.set_device(devices[0])
model = nn.DataParallel(model, device_ids=devices).cuda()
floatTensor = torch.cuda.FloatTensor
longTensor = torch.cuda.LongTensor
return (model, floatTensor, longTensor) |
class Ticktock(MutableSequence):
_keylist = ['UTC', 'TAI', 'ISO', 'JD', 'MJD', 'UNX', 'RDT', 'CDF', 'GPS', 'DOY', 'eDOY', 'leaps']
if HAVE_ASTROPY:
_keylist.append('APT')
_keylist_upper = [key.upper() for key in _keylist]
_isoformatstr = {'seconds': '%Y-%m-%dT%H:%M:%S', 'microseconds': '%Y-%m-%dT%H:%M:%S.%f'}
def __init__(self, data, dtype=None, isoformat=None):
self._isofmt = (isoformat or self._isoformatstr['seconds'])
if isinstance(data, Ticktock):
dtype = data.data.attrs['dtype']
self.data = data.data
else:
try:
spacepy.datamodel.dmarray(data)[0]
except IndexError:
self.data = spacepy.datamodel.dmarray([data])
else:
self.data = spacepy.datamodel.dmarray(data)
if (not isinstance(dtype, Callable)):
if isinstance(self.data[0], (str, bytes)):
dtype = 'ISO'
elif isinstance(self.data[0], datetime.datetime):
dtype = 'UTC'
elif (HAVE_ASTROPY and isinstance(self.data[0], astropy.time.Time)):
dtype = 'APT'
self.data = (astropy.time.Time([data]) if (data.shape == ()) else data)
elif (self.data[0] > .0):
dtype = 'CDF'
elif (dtype is None):
raise ValueError('Unable to guess dtype from data; please specify dtype.')
if (dtype.upper() not in Ticktock._keylist_upper):
raise ValueError(((('data type ' + dtype) + ' not provided, only ') + str(Ticktock._keylist)))
else:
dtype_func = np.vectorize(dtype)
self.data = dtype_func(self.data)
self.UTC = no_tzinfo(self.data)
if (not hasattr(self.data, 'attrs')):
self.data.attrs = {}
try:
self.data.attrs['dtype'] = dtype.upper()
except AttributeError:
self.data.attrs['dtype'] = str(dtype_func)
else:
self.update_items('data')
if (dtype.upper() == 'TAI'):
self.TAI = self.data
elif (dtype.upper() == 'JD'):
self.JD = self.data
elif (dtype.upper() == 'MJD'):
self.MJD = self.data
elif (dtype.upper() == 'UNX'):
self.UNX = self.data
elif (dtype.upper() == 'RDT'):
self.RDT = self.data
elif (dtype.upper() == 'CDF'):
self.CDF = self.data
elif (dtype.upper() == 'UTC'):
self.UTC = no_tzinfo(self.data)
elif (dtype.upper() == 'APT'):
self.APT = self.data
def __str__(self):
return ((('Ticktock( ' + str(self.data)) + ', dtype=') + str((self.data.attrs['dtype'] + ')')))
__repr__ = __str__
def __getstate__(self):
odict = self.__dict__.copy()
return odict
def __setstate__(self, dict):
self.__dict__.update(dict)
return
def __getitem__(self, idx):
return Ticktock(self.UTC[idx])
def __setitem__(self, idx, vals):
tmp = Ticktock(vals)
if (len(tmp) > 1):
self.data[idx] = tmp.__getattribute__(self.data.attrs['dtype'])[:]
else:
self.data[idx] = tmp.__getattribute__(self.data.attrs['dtype'])[0]
self.update_items('data')
def __delitem__(self, idx):
self.data = np.delete(self.data, idx)
self.update_items('data')
def __len__(self):
return len(self.data)
def __gt__(self, other):
if isinstance(other, datetime.datetime):
other = Ticktock(other, 'UTC')
return (self.UNX > other.UNX)
def __lt__(self, other):
if isinstance(other, datetime.datetime):
other = Ticktock(other, 'UTC')
return (self.UNX < other.UNX)
def __ge__(self, other):
if isinstance(other, datetime.datetime):
other = Ticktock(other, 'UTC')
return (self.UNX >= other.UNX)
def __le__(self, other):
if isinstance(other, datetime.datetime):
other = Ticktock(other, 'UTC')
return (self.UNX <= other.UNX)
def __eq__(self, other):
if isinstance(other, datetime.datetime):
other = Ticktock(other, 'UTC')
return (self.UNX == other.UNX)
def __ne__(self, other):
if isinstance(other, datetime.datetime):
other = Ticktock(other, 'UTC')
return (self.UNX != other.UNX)
def __sub__(self, other):
if isinstance(other, datetime.timedelta):
newobj = Ticktock((self.UTC - other), 'UTC')
elif isinstance(other, Ticktock):
if ((not (len(other) == len(self.data))) and (not (len(other) == 1))):
raise ValueError('Ticktock lengths are mismatched, subtraction is not possible')
same = True
if (len(other) == 1):
same = False
if same:
return [datetime.timedelta(seconds=(t - other.TAI[i])) for (i, t) in enumerate(self.TAI)]
else:
return [datetime.timedelta(seconds=(t - other.TAI[0])) for t in self.TAI]
elif hasattr(other, '__iter__'):
if (not isinstance(other[0], datetime.timedelta)):
raise TypeError('Data supplied for addition is of the wrong type')
if ((not (len(other) == len(self.data))) or (len(other) == 1)):
raise TypeError('Data supplied for addition is of the wrong shape')
same = True
if (len(other) == 1):
same = False
if same:
newUTC = [(utc - o) for (utc, o) in zip(self.UTC, other)]
else:
newUTC = [(utc - other) for utc in self.UTC]
newobj = Ticktock(newUTC, 'UTC')
else:
raise TypeError('unsupported operand type(s) for -: {0} and {1}'.format(type(other), type(self)))
return newobj
def __add__(self, other):
if isinstance(other, datetime.timedelta):
newobj = Ticktock((self.UTC + other), 'UTC')
elif hasattr(other, '__iter__'):
if (not isinstance(other[0], datetime.timedelta)):
raise TypeError('Data supplied for addition is of the wrong type')
if ((not (len(other) == len(self.data))) or (len(other) == 1)):
raise TypeError('Data supplied for addition is of the wrong shape')
same = True
if (len(other) == 1):
same = False
if same:
newUTC = [(utc + o) for (utc, o) in zip(self.UTC, other)]
else:
newUTC = [(utc + other) for utc in self.UTC]
newobj = Ticktock(newUTC, 'UTC')
else:
raise TypeError('unsupported operand type(s) for +: {0} and {1}'.format(type(other), type(self)))
return newobj
def __radd__(self, other):
return self.__add__(other)
def __getattr__(self, name):
if (name not in Ticktock._keylist):
raise AttributeError('data type {} not provided, only {}'.format(str(name), str(Ticktock._keylist)))
if (name.upper() == 'TAI'):
self.TAI = self.getTAI()
if (name.upper() == 'UTC'):
self.UTC = self.getUTC()
if (name.upper() == 'ISO'):
self.ISO = self.getISO()
if (name.upper() == 'JD'):
self.JD = self.getJD()
if (name.upper() == 'MJD'):
self.MJD = self.getMJD()
if (name.upper() == 'UNX'):
self.UNX = self.getUNX()
if (name.upper() == 'RDT'):
self.RDT = self.getRDT()
if (name.upper() == 'CDF'):
self.CDF = self.getCDF()
if (name.upper() == 'DOY'):
self.DOY = self.getDOY()
if (name.upper() == 'EDOY'):
self.eDOY = self.geteDOY()
if (name.upper() == 'GPS'):
self.GPS = self.getGPS()
if (name.upper() == 'APT'):
self.APT = self.getAPT()
if (name == 'leaps'):
self.leaps = self.getleapsecs()
return getattr(self, name)
def insert(self, idx, val, dtype=None):
fmt = self.data.attrs['dtype']
if (not dtype):
dum = Ticktock(val)
else:
dum = Ticktock(val, dtype=dtype)
ival = getattr(dum, fmt)
self.data = np.insert(self.data, idx, ival)
self.update_items('data')
def remove(self, idx):
del self[idx]
def sort(self, kind='quicksort'):
idx = self.argsort(kind=kind)
self.data = self.data[idx]
self.update_items('data')
def argsort(self, kind='quicksort'):
return np.argsort(self.TAI, kind=kind)
def isoformat(self, fmt=None):
if (fmt is None):
print(('Current ISO output format is %s' % self._isofmt))
print('Options are: {0}'.format([(k, Ticktock._isoformatstr[k]) for k in list(Ticktock._isoformatstr.keys())]))
else:
try:
self._isofmt = Ticktock._isoformatstr[fmt]
self.update_items('data')
except KeyError:
raise ValueError('Not a valid option: Use {0}'.format(list(Ticktock._isoformatstr.keys())))
def update_items(self, attrib):
keylist = dir(self)
keylist.remove('data')
if (attrib != 'data'):
keylist.remove(attrib)
attrib = attrib.upper()
if (attrib != self.data.attrs['dtype']):
cls = type(self)
dt = self.data.attrs['dtype']
self.data = getattr(cls(getattr(self, attrib), dtype=attrib), dt)
if (self.data.attrs['dtype'] in ('TAI', 'GPS', 'JD', 'MJD', 'RDT', 'CDF', 'UNX', 'ISO', 'APT')):
if (self.data.attrs['dtype'] == 'ISO'):
if ('UTC' in keylist):
del self.UTC
if ('ISO' in keylist):
del self.ISO
del keylist[keylist.index('ISO')]
self.TAI = self.getTAI()
if (('UTC' in keylist) and (self.data.attrs['dtype'] != 'ISO')):
self.UTC = self.getUTC()
else:
self.UTC = self.getUTC()
if ('TAI' in keylist):
self.TAI = self.getTAI()
for key in keylist:
if (key.upper() == 'ISO'):
self.ISO = self.getISO()
if (key.upper() == 'JD'):
self.JD = self.getJD()
if (key.upper() == 'MJD'):
self.MJD = self.getMJD()
if (key.upper() == 'UNX'):
self.UNX = self.getUNX()
if (key.upper() == 'RDT'):
self.RDT = self.getRDT()
if (key.upper() == 'CDF'):
self.CDF = self.getCDF()
if (key.upper() == 'DOY'):
self.DOY = self.getDOY()
if (key.upper() == 'EDOY'):
self.eDOY = self.geteDOY()
if (key.upper() == 'GPS'):
self.GPS = self.getGPS()
if (key.upper() == 'APT'):
self.APT = self.getAPT()
if (key == 'leaps'):
self.leaps = self.getleapsecs()
return
def convert(self, dtype):
newdat = getattr(self, dtype)
return Ticktock(newdat, dtype)
def append(self, other):
otherdata = getattr(other, self.data.attrs['dtype'])
return Ticktock(np.append(self.data, otherdata), dtype=self.data.attrs['dtype'])
def getCDF(self):
if (self.data.attrs['dtype'] == 'CDF'):
self.CDF = self.data
return self.CDF
CDFofTAI0 = .0
naive_tai = _tai_real_to_naive(self.TAI)
cdf = ((naive_tai * 1000.0) + CDFofTAI0)
self.CDF = spacepy.datamodel.dmarray(cdf, attrs={'dtype': 'CDF'})
return self.CDF
def getDOY(self):
DOY = [((utc.toordinal() - datetime.date(utc.year, 1, 1).toordinal()) + 1) for utc in self.UTC]
self.DOY = spacepy.datamodel.dmarray(DOY, attrs={'dtype': 'DOY'}).astype(int)
return self.DOY
def geteDOY(self):
eDOY = [(utc.toordinal() - datetime.date(utc.year, 1, 1).toordinal()) for utc in self.UTC]
eDOY = [((((edoy + (utc.hour / 24.0)) + (utc.minute / 1440.0)) + (utc.second / 86400.0)) + (utc.microsecond / .0)) for (edoy, utc) in zip(eDOY, self.UTC)]
self.eDOY = spacepy.datamodel.dmarray(eDOY, attrs={'dtype': 'eDOY'})
return self.eDOY
def getJD(self):
if (self.data.attrs['dtype'] == 'JD'):
self.JD = self.data
return self.JD
JDofTAI0 = 2436205.0
self.JD = (_days1958(self.TAI, leaps='rubber') + JDofTAI0)
return self.JD
def getMJD(self):
if (self.data.attrs['dtype'] == 'MJD'):
self.MJD = self.data
return self.MJD
self.MJD = (_days1958(self.TAI, leaps='rubber') + 36204.5)
return self.MJD
def getUNX(self):
if (self.data.attrs['dtype'] == 'UNX'):
self.UNX = self.data
return self.UNX
naive_tai = _tai_real_to_naive(self.TAI)
UNXofTAI0 = (- .0)
unx = (naive_tai + UNXofTAI0)
self.UNX = spacepy.datamodel.dmarray(unx, attrs={'dtype': 'UNX'})
return self.UNX
def getRDT(self):
if (self.data.attrs['dtype'] == 'RDT'):
self.RDT = self.data
return self.RDT
RDTTAI0 = 714780.0
RDT = (_days1958(self.TAI, leaps='drop', midnight=True) + RDTTAI0)
RDT[(RDT < 577736.0)] -= 10
self.RDT = spacepy.datamodel.dmarray(RDT, attrs={'dtype': 'RDT'})
return self.RDT
def getUTC(self):
nTAI = len(self.data)
if (self.data.attrs['dtype'].upper() == 'UTC'):
UTC = self.data
elif (self.data.attrs['dtype'].upper() == 'ISO'):
self.ISO = self.data
(_, UTC, _) = dtstr2iso(self.data, fmt=self._isofmt)
elif (self.data.attrs['dtype'].upper() in ('TAI', 'GPS', 'JD', 'MJD', 'RDT', 'CDF', 'UNX', 'APT')):
TAI0 = datetime.datetime(1958, 1, 1, 0, 0, 0, 0)
UTC = [(datetime.timedelta(seconds=float((tait - (864000 if (tait < (- .0)) else 0)))) + TAI0) for tait in self.TAI]
for i in np.arange(nTAI):
idx = (np.searchsorted(TAIleaps, self.TAI[i], side='right') - 1)
UTC[i] = (UTC[i] - datetime.timedelta(seconds=(secs[idx] if (idx > 0) else 0)))
if (int(self.TAI[i]) == TAIleaps[idx]):
UTC[i] = UTC[i].replace(second=59, microsecond=999999)
else:
warnstr1 = 'Input data type {0} does not support calculation of UTC times'.format(self.data.attrs['dtype'])
warnstr2 = 'Valid input dtypes are: {0}'.format(', '.join([kk for kk in Ticktock._keylist if (kk not in ['DOY', 'eDOY', 'leaps'])]))
raise TypeError('{0}\n{1}'.format(warnstr1, warnstr2))
self.UTC = spacepy.datamodel.dmarray(UTC, attrs={'dtype': 'UTC'})
return self.UTC
def getGPS(self):
if (self.data.attrs['dtype'] == 'GPS'):
self.GPS = self.data
return self.GPS
GPS0 =
self.GPS = spacepy.datamodel.dmarray((self.TAI - GPS0), attrs={'dtype': 'GPS'})
return self.GPS
def getAPT(self):
if (self.data.attrs['dtype'] == 'APT'):
self.APT = self.data
return self.APT
if (not HAVE_ASTROPY):
raise RuntimeError('Import of astropy.time failed.')
GPS0 =
self.APT = astropy.time.Time((self.TAI - GPS0), scale='tai', format='gps')
self.APT.attrs = {'dtype': 'APT'}
return self.APT
def getTAI(self):
if (self.data.attrs['dtype'] == 'TAI'):
self.TAI = self.data
return self.TAI
if (self.data.attrs['dtype'] == 'GPS'):
GPS0 =
self.TAI = spacepy.datamodel.dmarray((self.data + GPS0), attrs={'dtype': 'TAI'})
return self.TAI
if (self.data.attrs['dtype'] == 'MJD'):
MJDofTAI0 = 36204.5
self.TAI = spacepy.datamodel.dmarray(_days1958totai((np.require(self.data, dtype=np.float64) - MJDofTAI0), leaps='rubber', midnight=False), attrs={'dtype': 'TAI'})
return self.TAI
if (self.data.attrs['dtype'] == 'JD'):
JDofTAI0 = 2436205.0
self.TAI = spacepy.datamodel.dmarray(_days1958totai((np.require(self.data, dtype=np.float64) - JDofTAI0), leaps='rubber', midnight=False), attrs={'dtype': 'TAI'})
return self.TAI
if (self.data.attrs['dtype'] == 'RDT'):
RDTofTAI0 = 714780.0
tai = _days1958totai((np.require(self.data, dtype=np.float64) - RDTofTAI0), leaps='drop', midnight=True)
tai[(tai < (- .0))] += 864000
self.TAI = spacepy.datamodel.dmarray(tai, attrs={'dtype': 'TAI'})
return self.TAI
if (self.data.attrs['dtype'] == 'CDF'):
CDFofTAI0 = .0
tai = ((self.data - CDFofTAI0) / 1000.0)
tai = _tai_naive_to_real(tai)
self.TAI = spacepy.datamodel.dmarray(tai, attrs={'dtype': 'TAI'})
return self.TAI
if (self.data.attrs['dtype'] == 'UNX'):
UNXofTAI0 = (- .0)
tai = (self.data - UNXofTAI0)
tai = _tai_naive_to_real(tai)
self.TAI = spacepy.datamodel.dmarray(tai, attrs={'dtype': 'TAI'})
return self.TAI
if (self.data.attrs['dtype'] == 'APT'):
GPS0 =
self.TAI = spacepy.datamodel.dmarray((self.data.gps + GPS0), attrs={'dtype': 'TAI'})
return self.TAI
if (self.data.attrs['dtype'] == 'ISO'):
(isoout, UTC, offset) = dtstr2iso(self.data, self._isofmt)
if ('UTC' not in dir(self)):
self.UTC = spacepy.datamodel.dmarray(UTC, attrs={'dtype': 'UTC'})
if ('ISO' not in dir(self)):
self.ISO = spacepy.datamodel.dmarray(isoout, attrs={'dtype': 'ISO'})
else:
UTC = self.UTC
offset = None
TAI0 = datetime.datetime(1958, 1, 1, 0, 0, 0, 0)
leapsec = self.getleapsecs()
TAItup = [((utc - TAI0) + datetime.timedelta(seconds=int(ls))) for (utc, ls) in zip(UTC, leapsec)]
if (offset is not None):
TAI = [(((tai.days * 86400) + tai.seconds) + ((tai.microseconds + offset[i]) / 1000000.0)) for (i, tai) in enumerate(TAItup)]
else:
TAI = [(((tai.days * 86400) + tai.seconds) + (tai.microseconds / 1000000.0)) for tai in TAItup]
TAI = spacepy.datamodel.dmarray(TAI, attrs={'dtype': 'TAI'})
TAI[(TAI < (- .0))] += (86400 * 10)
self.TAI = TAI
return self.TAI
def getISO(self):
if (self.data.attrs['dtype'] == 'ISO'):
self.ISO = spacepy.datamodel.dmarray(dtstr2iso(self.data, fmt=self._isofmt)[0], attrs={'dtype': 'ISO'})
return self.ISO
nTAI = len(self.data)
self.TAI = self.getTAI()
try:
iso = [utc.strftime(self._isofmt) for utc in self.UTC]
except ValueError:
iso = [utc.replace(year=1900).strftime(self._isofmt.replace('%Y', str(utc.year))) for utc in self.UTC]
self.ISO = spacepy.datamodel.dmarray(iso, attrs={'dtype': 'ISO'})
for i in range(nTAI):
if (int(self.TAI[i]) in TAIleaps):
tmpdt = self.UTC[i].replace(microsecond=int(((self.TAI[i] % 1) * 1000000.0)))
(a, b, c) = tmpdt.strftime(self._isofmt).split(':')
cnew = c.replace('59', '60')
self.ISO[i] = ((((a + ':') + b) + ':') + cnew)
return self.ISO
def getleapsecs(self):
if (self.data.attrs['dtype'] == 'TAI'):
idx = (np.searchsorted((TAIleaps + 1), self.data, side='right') - 1)
return secs[idx]
tup = self.UTC
if isinstance(tup, datetime.datetime):
tup = [tup]
nTAI = 1
aflag = False
else:
nTAI = len(tup)
aflag = True
self.TAIleaps = TAIleaps
leaps = ([secs[0]] * nTAI)
leap_dates = [datetime.datetime(int(y), int(m), int(d)) for (y, m, d, s) in zip(year, mon, day, secs)]
for (i, itup) in enumerate(tup):
ind = bisect.bisect_right(leap_dates, tup[i])
leaps[i] = (secs[(ind - 1)] if (ind > 0) else 0)
if (aflag == False):
self.leaps = int(leaps[0])
return int(leaps[0])
else:
self.leaps = np.array(leaps, dtype=int)
return self.leaps
def now(cls):
try:
dt = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
except AttributeError:
dt = datetime.datetime.utcnow()
return Ticktock(dt, 'UTC')
def today(cls):
warnings.warn('today() returns UTC day as of 0.2.2.', DeprecationWarning)
try:
dt = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
except AttributeError:
dt = datetime.datetime.utcnow()
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
return Ticktock(dt, 'UTC') |
class ShowInterfaceStatistics(InformationWindow):
(COLUMN_INTERFACE, COLUMN_TX_PACKETS, COLUMN_TX_BYTES, COLUMN_TX_PACKET_RATE, COLUMN_TX_BIT_RATE, COLUMN_RX_PACKETS, COLUMN_RX_BYTES, COLUMN_RX_PACKET_RATE, COLUMN_RX_BIT_RATE) = range(9)
def __init__(self, visualizer, node_index, statistics_collector):
InformationWindow.__init__(self)
self.win = Gtk.Dialog(parent=visualizer.window, flags=Gtk.DialogFlags.DESTROY_WITH_PARENT, buttons=(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE))
self.win.connect('response', self._response_cb)
self.win.set_title(('Statistics for node %i' % node_index))
self.visualizer = visualizer
self.statistics_collector = statistics_collector
self.node_index = node_index
self.viz_node = visualizer.get_node(node_index)
self.table_model = Gtk.ListStore(*([str] * 13))
treeview = Gtk.TreeView(self.table_model)
treeview.show()
self.win.vbox.add(treeview)
def add_column(descr, colid):
column = Gtk.TreeViewColumn(descr, Gtk.CellRendererText(), text=colid)
treeview.append_column(column)
add_column('Interface', self.COLUMN_INTERFACE)
add_column('Tx Packets', self.COLUMN_TX_PACKETS)
add_column('Tx Bytes', self.COLUMN_TX_BYTES)
add_column('Tx pkt/1s', self.COLUMN_TX_PACKET_RATE)
add_column('Tx bit/1s', self.COLUMN_TX_BIT_RATE)
add_column('Rx Packets', self.COLUMN_RX_PACKETS)
add_column('Rx Bytes', self.COLUMN_RX_BYTES)
add_column('Rx pkt/1s', self.COLUMN_RX_PACKET_RATE)
add_column('Rx bit/1s', self.COLUMN_RX_BIT_RATE)
self.visualizer.add_information_window(self)
self.win.show()
def _response_cb(self, win, response):
self.win.destroy()
self.visualizer.remove_information_window(self)
def update(self):
node = ns.network.NodeList.GetNode(self.node_index)
stats_list = self.statistics_collector.get_interface_statistics(self.node_index)
self.table_model.clear()
for (iface, stats) in enumerate(stats_list):
tree_iter = self.table_model.append()
netdevice = node.GetDevice(iface)
interface_name = ns.core.Names.FindName(netdevice)
if (not interface_name):
interface_name = ('(interface %i)' % iface)
self.table_model.set(tree_iter, self.COLUMN_INTERFACE, interface_name, self.COLUMN_TX_PACKETS, str(stats.txPackets), self.COLUMN_TX_BYTES, str(stats.txBytes), self.COLUMN_TX_PACKET_RATE, str(stats.txPacketRate), self.COLUMN_TX_BIT_RATE, str(stats.txBitRate), self.COLUMN_RX_PACKETS, str(stats.rxPackets), self.COLUMN_RX_BYTES, str(stats.rxBytes), self.COLUMN_RX_PACKET_RATE, str(stats.rxPacketRate), self.COLUMN_RX_BIT_RATE, str(stats.rxBitRate)) |
class DQN(RLAlgorithm):
def __init__(self, env_spec, policy, qf, replay_buffer, exploration_policy=None, steps_per_epoch=20, min_buffer_size=int(10000.0), buffer_batch_size=64, rollout_batch_size=1, n_train_steps=50, max_path_length=None, max_eval_path_length=None, qf_lr=_Default(0.001), qf_optimizer=tf.compat.v1.train.AdamOptimizer, discount=1.0, target_network_update_freq=5, grad_norm_clipping=None, double_q=False, reward_scale=1.0, smooth_return=True, name='DQN'):
self._qf_optimizer = qf_optimizer
self._qf_lr = qf_lr
self._name = name
self._target_network_update_freq = target_network_update_freq
self._grad_norm_clipping = grad_norm_clipping
self._double_q = double_q
self._target_qf = qf.clone('target_qf')
self._min_buffer_size = min_buffer_size
self._qf = qf
self._steps_per_epoch = steps_per_epoch
self._n_train_steps = n_train_steps
self._buffer_batch_size = buffer_batch_size
self._discount = discount
self._reward_scale = reward_scale
self._smooth_return = smooth_return
self.max_path_length = max_path_length
self._max_eval_path_length = max_eval_path_length
self.env_spec = env_spec
self.rollout_batch_size = rollout_batch_size
self.replay_buffer = replay_buffer
self.policy = policy
self.exploration_policy = exploration_policy
self.sampler_cls = OffPolicyVectorizedSampler
self.init_opt()
def init_opt(self):
action_dim = self.env_spec.action_space.n
self.episode_rewards = []
self.episode_qf_losses = []
with tf.name_scope(self._name):
action_t_ph = tf.compat.v1.placeholder(tf.int32, None, name='action')
reward_t_ph = tf.compat.v1.placeholder(tf.float32, None, name='reward')
done_t_ph = tf.compat.v1.placeholder(tf.float32, None, name='done')
with tf.name_scope('update_ops'):
target_update_op = tensor_utils.get_target_ops(self._qf.get_global_vars(), self._target_qf.get_global_vars())
self._qf_update_ops = tensor_utils.compile_function(inputs=[], outputs=target_update_op)
with tf.name_scope('td_error'):
action = tf.one_hot(action_t_ph, action_dim, on_value=1.0, off_value=0.0)
q_selected = tf.reduce_sum((self._qf.q_vals * action), axis=1)
if self._double_q:
target_qval_with_online_q = self._qf.get_qval_sym(self._target_qf.input, self._qf.name)
future_best_q_val_action = tf.argmax(target_qval_with_online_q, 1)
future_best_q_val = tf.reduce_sum((self._target_qf.q_vals * tf.one_hot(future_best_q_val_action, action_dim, on_value=1.0, off_value=0.0)), axis=1)
else:
future_best_q_val = tf.reduce_max(self._target_qf.q_vals, axis=1)
q_best_masked = ((1.0 - done_t_ph) * future_best_q_val)
target_q_values = (reward_t_ph + (self._discount * q_best_masked))
loss = tf.compat.v1.losses.huber_loss(q_selected, tf.stop_gradient(target_q_values))
loss = tf.reduce_mean(loss)
with tf.name_scope('optimize_ops'):
qf_optimizer = make_optimizer(self._qf_optimizer, learning_rate=self._qf_lr)
if (self._grad_norm_clipping is not None):
gradients = qf_optimizer.compute_gradients(loss, var_list=self._qf.get_trainable_vars())
for (i, (grad, var)) in enumerate(gradients):
if (grad is not None):
gradients[i] = (tf.clip_by_norm(grad, self._grad_norm_clipping), var)
optimize_loss = qf_optimizer.apply_gradients(gradients)
else:
optimize_loss = qf_optimizer.minimize(loss, var_list=self._qf.get_trainable_vars())
self._train_qf = tensor_utils.compile_function(inputs=[self._qf.input, action_t_ph, reward_t_ph, done_t_ph, self._target_qf.input], outputs=[loss, optimize_loss])
def train(self, runner):
last_return = None
runner.enable_logging = False
for _ in runner.step_epochs():
for cycle in range(self._steps_per_epoch):
runner.step_path = runner.obtain_samples(runner.step_itr)
for path in runner.step_path:
path['rewards'] *= self._reward_scale
last_return = self.train_once(runner.step_itr, runner.step_path)
if ((cycle == 0) and (self.replay_buffer.n_transitions_stored >= self._min_buffer_size)):
runner.enable_logging = True
log_performance(runner.step_itr, obtain_evaluation_samples(self.policy, runner.get_env_copy()), discount=self._discount)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
paths = samples_to_tensors(paths)
epoch = (itr / self._steps_per_epoch)
self.episode_rewards.extend(paths['undiscounted_returns'])
last_average_return = np.mean(self.episode_rewards)
for _ in range(self._n_train_steps):
if (self.replay_buffer.n_transitions_stored >= self._min_buffer_size):
qf_loss = self.optimize_policy(None)
self.episode_qf_losses.append(qf_loss)
if (self.replay_buffer.n_transitions_stored >= self._min_buffer_size):
if ((itr % self._target_network_update_freq) == 0):
self._qf_update_ops()
if ((itr % self._steps_per_epoch) == 0):
if (self.replay_buffer.n_transitions_stored >= self._min_buffer_size):
mean100ep_rewards = round(np.mean(self.episode_rewards[(- 100):]), 1)
mean100ep_qf_loss = np.mean(self.episode_qf_losses[(- 100):])
tabular.record('Epoch', epoch)
tabular.record('Episode100RewardMean', mean100ep_rewards)
tabular.record('{}/Episode100LossMean'.format(self._qf.name), mean100ep_qf_loss)
return last_average_return
def optimize_policy(self, samples_data):
del samples_data
transitions = self.replay_buffer.sample_transitions(self._buffer_batch_size)
observations = transitions['observations']
rewards = transitions['rewards']
actions = self.env_spec.action_space.unflatten_n(transitions['actions'])
next_observations = transitions['next_observations']
dones = transitions['terminals']
if isinstance(self.env_spec.observation_space, akro.Image):
if (len(observations.shape[1:]) < len(self.env_spec.observation_space.shape)):
observations = self.env_spec.observation_space.unflatten_n(observations)
next_observations = self.env_spec.observation_space.unflatten_n(next_observations)
(loss, _) = self._train_qf(observations, actions, rewards, dones, next_observations)
return loss
def __getstate__(self):
data = self.__dict__.copy()
del data['_qf_update_ops']
del data['_train_qf']
return data
def __setstate__(self, state):
self.__dict__ = state
self.init_opt() |
def read_in_data(f):
df = pd.read_csv(f)
df['timestamp'] = pd.to_datetime(df['timestamp'], infer_datetime_format=True)
return df |
def network_score(model, sess, encoder_output, target_tokens):
score = 0.0
states = None
cnt = 0
for (feed, pick) in zip(list(target_tokens)[:(- 1)], list(target_tokens)[1:]):
(scores, _, states) = model.decode(sess, encoder_output, np.array([feed]), None, states)
score += float(scores[(0, 0, pick)])
cnt += 1
return (score / cnt) |
class TestUtilFactorize(unittest.TestCase):
def test_prod(self):
for fs in util.factorize(24, 3):
self.assertEqual(util.prod(fs), 24)
for fs in util.factorize(1024, 3):
self.assertEqual(util.prod(fs), 1024)
def test_limits(self):
for fs in util.factorize(1024, 3, limits=(10, 20)):
self.assertLessEqual(fs[0], 10)
self.assertLessEqual(fs[1], 20)
self.assertEqual(util.prod(fs), 1024)
def test_len(self):
val = (((2 * 3) * 5) * 7)
self.assertEqual(len(list(util.factorize(val, 2))), (2 ** 4))
self.assertEqual(len(list(util.factorize(val, 3))), (3 ** 4))
for val in [24, 1024, (((2 ** 4) * (3 ** 5)) * (5 ** 2))]:
fs = list(util.factorize(val, 2))
self.assertEqual(len(fs), len(set(fs)))
def test_factors(self):
factors2 = set()
for fs in util.factorize(24, 2):
factors2.update(fs)
self.assertSetEqual(factors2, set([1, 2, 3, 4, 6, 8, 12, 24]))
factors3 = set()
for fs in util.factorize(24, 3):
factors3.update(fs)
self.assertSetEqual(factors2, factors3)
def test_perm(self):
fs_ord = set()
fs_unord = set()
for fs in util.factorize(512, 3):
fs_ord.add(fs)
fs_unord.add(frozenset(fs))
cnt = 0
for fs in fs_unord:
if (len(fs) == 3):
cnt += math.factorial(3)
elif (len(fs) == 2):
cnt += 3
else:
cnt += 1
self.assertEqual(len(fs_ord), cnt) |
def _get_thread_context():
context = [threading.current_thread()]
if greenlet:
context.append(greenlet.getcurrent())
return hash(tuple(context)) |
def _solve_gbuf_reside(nested_loop_desc, resource, reside_dce):
ldce = [reside_dce]
llpe = []
lfacc = []
if (ldce[0] == de.FIL):
llpe += [le.IFM, le.OFM, le.BAT]
ldce += [de.OFM, de.IFM]
lfacc += [1.0, 2.0, 1.0]
elif (ldce[0] == de.IFM):
llpe += [le.IFM, le.BAT, le.OFM]
ldce += [de.OFM, de.FIL]
lfacc += [1.0, 2.0, 1.0]
else:
assert (ldce[0] == de.OFM)
llpe += [le.OFM, le.BAT, le.IFM]
ldce += [de.IFM, de.FIL]
lfacc += [2.0, 1.0, 1.0]
lnum = [nested_loop_desc.loopcnt[lpe] for lpe in llpe]
lsgbuf = [nested_loop_desc.usize_gbuf_of(dce) for dce in ldce]
lsregf = [nested_loop_desc.usize_regf_of(dce) for dce in ldce]
(size_gbuf, size_regf) = (resource.size_gbuf, resource.size_regf)
def goal_opt1(tx0, ty0):
lnumloops = [(lnum[0] * lnum[1]), (lnum[1] * lnum[2]), (lnum[0] * lnum[2])]
ltloops = [1, tx0, ty0]
return sum((util.prod(tpl) for tpl in zip(lnumloops, lsgbuf, lfacc, ltloops)))
def constraints_opt1(tx0, ty0):
if ((((lnum[0] // tx0) * (lnum[1] // ty0)) * lsgbuf[0]) > size_gbuf):
return False
if (min((((lnum[0] // tx0) * (lsregf[0] + lsregf[2])) + lsregf[1]), (((lnum[1] // ty0) * (lsregf[0] + lsregf[1])) + lsregf[2])) > size_regf):
return False
return True
min_goal = float('inf')
for (tx0_, _) in util.factorize(lnum[0], 2):
for (ty0_, _) in util.factorize(lnum[1], 2):
if (not constraints_opt1(tx0_, ty0_)):
continue
goal = goal_opt1(tx0_, ty0_)
if (goal < min_goal):
min_goal = goal
(tx0, ty0) = (tx0_, ty0_)
def goal_opt2(tx2, ty2):
tz2 = (((size_regf - ((tx2 * ty2) * lsregf[0])) * 1.0) / ((ty2 * lsregf[1]) + (tx2 * lsregf[2])))
if (tz2 < 0):
return (- float('inf'))
tz2_adj = util.closest_factor(lnum[2], tz2)
if (tz2_adj[0] <= tz2):
return tz2_adj[0]
return (- float('inf'))
(tx2, ty2) = ((lnum[0] // tx0), (lnum[1] // ty0))
tz2 = goal_opt2(tx2, ty2)
if math.isinf(tz2):
txy2_cands = [(1, (lnum[1] // ty0)), ((lnum[0] // tx0), 1)]
(tx2, ty2) = max(txy2_cands, key=(lambda txy2: goal_opt2(*txy2)))
tz2 = goal_opt2(tx2, ty2)
assert (not math.isinf(tz2))
tz0 = (lnum[2] // tz2)
tx1 = ((lnum[0] // tx0) // tx2)
ty1 = ((lnum[1] // ty0) // ty2)
bl_ord_0 = ([0] * le.NUM)
bl_ord_0[llpe[0]] = 2
bl_ord_0[llpe[1]] = 1
bl_ord_0[llpe[2]] = 0
bl_ord_1 = ([0] * le.NUM)
bl_ord_1[llpe[0]] = (0 if (tx1 > 1) else 1)
bl_ord_1[llpe[1]] = (1 if (tx1 > 1) else 0)
bl_ord_1[llpe[2]] = 2
if (tz0 == 1):
tx0 *= tx1
tx1 = 1
ty0 *= ty1
ty1 = 1
bl_ord_0 = bl_ord_1
lp_ts = ([None] * le.NUM)
lp_ts[llpe[0]] = (tx0, tx1, tx2)
lp_ts[llpe[1]] = (ty0, ty1, ty2)
lp_ts[llpe[2]] = (tz0, 1, tz2)
bl_ts = tuple(zip(*lp_ts))
bl_ords = (tuple(bl_ord_0), tuple(bl_ord_1))
return (bl_ts, bl_ords) |
class SentenceRE(nn.Module):
def __init__(self, model, train_loader, val_loader, test_loader, ckpt, max_epoch=100, lr=0.1, weight_decay=1e-05, opt='sgd', add_subject_loss=False, loss_func=PARALoss(), metric=F1Metric()):
super().__init__()
self.metric = metric
self.add_subject_loss = add_subject_loss
self.max_epoch = max_epoch
self.train_loader = train_loader
self.val_loader = val_loader
self.test_loader = test_loader
self.model = model
self.parallel_model = nn.DataParallel(self.model)
self.loss_func = loss_func
self.subject_loss = torch.nn.CrossEntropyLoss()
params = self.parameters()
self.lr = lr
if (opt == 'sgd'):
self.optimizer = optim.SGD(params, lr, weight_decay=weight_decay)
elif (opt == 'adam'):
self.optimizer = optim.Adam(params, lr, weight_decay=weight_decay)
elif (opt == 'adamw'):
from transformers import AdamW
params = list(self.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
grouped_params = [{'params': [p for (n, p) in params if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01, 'lr': lr, 'ori_lr': lr}, {'params': [p for (n, p) in params if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0, 'lr': lr, 'ori_lr': lr}]
self.optimizer = AdamW(grouped_params, correct_bias=False)
else:
raise Exception("Invalid optimizer. Must be 'sgd' or 'adam' or 'adamw'.")
if torch.cuda.is_available():
self.cuda()
self.ckpt = ckpt
def train_model(self, warmup=True, metric='acc'):
best_metric = 0
global_step = 0
for epoch in range(self.max_epoch):
self.train()
print(('=== Epoch %d train ===' % epoch))
avg_loss = AverageMeter()
avg_acc = AverageMeter()
t = tqdm(self.train_loader)
data_idx = 0
for (iter, data) in enumerate(t):
if torch.cuda.is_available():
for i in range(len(data)):
try:
data[i] = data[i].cuda()
except:
pass
subject_label = data[(- 1)]
label = data[(- 2)]
args = data[0:2]
(logits, subject_label_logits, attx) = self.parallel_model(*args)
loss = self.loss_func(logits, label)
if self.add_subject_loss:
subject_loss = self.subject_loss(subject_label_logits.transpose(1, 2), subject_label)
loss += subject_loss
l = list(logits.detach().cpu().numpy())
data_idx += len(l)
avg_loss.update(loss.item(), 1)
t.set_postfix(loss=avg_loss.avg)
if (warmup == True):
warmup_step = 300
if (global_step < warmup_step):
warmup_rate = (float(global_step) / warmup_step)
else:
warmup_rate = 1.0
for param_group in self.optimizer.param_groups:
param_group['lr'] = (self.lr * warmup_rate)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
global_step += 1
print(('=== Epoch %d val ===' % epoch))
if (self.val_loader is not None):
result = self.eval_model(self.val_loader)
if (result[metric] > best_metric):
print('Best ckpt and saved.')
folder_path = '/'.join(self.ckpt.split('/')[:(- 1)])
if (not os.path.exists(folder_path)):
os.mkdir(folder_path)
torch.save(self.parallel_model, self.ckpt)
best_metric = result[metric]
else:
torch.save(self.parallel_model, self.ckpt)
print(('Best %s on val set: %f' % (metric, best_metric)))
def eval_model(self, eval_loader):
self.eval()
self.metric.reset()
data_idx = 0
with torch.no_grad():
t = tqdm(eval_loader)
for (iter, data) in enumerate(t):
if torch.cuda.is_available():
for i in range(len(data)):
try:
data[i] = data[i].cuda()
except:
pass
label = data[(- 1)]
args = data[0:2]
(logits, _, attx) = self.parallel_model(*args)
l = list(logits.cpu().numpy())
self.metric.eval(l, eval_loader.dataset.data[data_idx:(data_idx + len(l))])
data_idx += len(l)
t.set_postfix(f1=self.metric.get_result()['without_na_micro_f1'])
print(self.metric.get_result())
return self.metric.get_result()
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict) |
def get_thread_siblings_list():
path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list'
thread_siblings_list = []
pattern = re.compile('(\\d+)\\D(\\d+)')
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(map(int, res[0]))
thread_siblings_list.append(pair)
return thread_siblings_list |
class UnramifiedExtensionFieldFloatingPoint(UnramifiedExtensionGeneric, pAdicFloatingPointFieldGeneric):
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation='FLINT'):
self._shift_seed = None
self._exact_modulus = exact_modulus
self._implementation = implementation
if (implementation == 'NTL'):
raise NotImplementedError
Zpoly = _make_integral_poly(exact_modulus, poly.base_ring().prime(), prec)
cache_limit = min(prec, 30)
self.prime_pow = PowComputer_flint_maker(poly.base_ring().prime(), cache_limit, prec, prec, True, Zpoly, prec_type='floating-point')
UnramifiedExtensionGeneric.__init__(self, poly, prec, print_mode, names, qAdicFloatingPointElement)
from .qadic_flint_FP import pAdicCoercion_ZZ_FP, pAdicCoercion_QQ_FP
self.register_coercion(pAdicCoercion_ZZ_FP(self))
self.register_coercion(pAdicCoercion_QQ_FP(self))
def _coerce_map_from_(self, R):
if (isinstance(R, UnramifiedExtensionRingFloatingPoint) and (R.fraction_field() is self)):
from sage.rings.padics.qadic_flint_FP import pAdicCoercion_FP_frac_field
return pAdicCoercion_FP_frac_field(R, self)
return super()._coerce_map_from_(R) |
.parametrize('seed', [311])
.parametrize('clear_buffer', [True, False])
def test_graph_forward_clear_buffer(seed, clear_buffer):
nn.clear_parameters()
x = nn.Variable((2, 10))
h = PF.affine(x, 10, name='hidden')
y1 = PF.affine(h, 10, name='out1')
y2 = PF.affine(h, 10, name='out2')
rng = np.random.RandomState(seed)
data = rng.randn(*x.shape)
x.d = data
y1.forward()
y2.forward()
ref_y1 = y1.d.copy()
ref_y2 = y2.d.copy()
nn.forward_all([y1, y2], clear_buffer=clear_buffer)
assert_allclose(y1.d, ref_y1)
assert_allclose(y2.d, ref_y2) |
class RootCauseDetector():
def __init__(self, data_obj: TabularData, var_names: List[str], time_metric_name: str='time', prior_knowledge: Optional[PriorKnowledge]=None):
assert (time_metric_name in var_names), 'Time metric not found in the data!'
self.data_obj = data_obj
self.var_names = var_names
self.time_metric_name = time_metric_name
self.prior_knowledge = prior_knowledge
def run(self, pvalue_thres: float=0.05, max_condition_set_size: int=4, return_graph: bool=False):
(data_obj, var_names) = (self.data_obj, self.var_names)
if (self.prior_knowledge is None):
forbidden_links = {self.time_metric_name: [var_name for var_name in var_names if (var_name != self.time_metric_name)]}
prior_knowledge = PriorKnowledge(forbidden_links=forbidden_links)
else:
prior_knowledge = self.prior_knowledge
CI_test = PartialCorrelation()
pc = PC(data=data_obj, prior_knowledge=prior_knowledge, CI_test=CI_test, use_multiprocessing=False)
result = pc.run(pvalue_thres=pvalue_thres, max_condition_set_size=max_condition_set_size)
graph_est = {n: [] for n in result.keys()}
for key in result.keys():
parents = result[key]['parents']
graph_est[key].extend(parents)
inv_map = invert_graph_and_remove_duplicates(graph_est)
root_causes = inv_map[self.time_metric_name]
print(f'The root cause(s) of the incident are: {root_causes}')
if return_graph:
return (root_causes, inv_map)
else:
return root_causes |
def check_module_initialized(mod):
assert isinstance(mod, torch.nn.Module)
if (not hasattr(mod, '_parameters')):
raise RuntimeError("'{}' has not been initialized, did you forget to call 'super()'?".format(torch.typename(type(mod)))) |
def Dsk(k, y, tol=1.49e-08, rtol=1.49e-08, maxiter=50, miniter=1):
if (y > 1):
raise ValueError('sk(k,y) called with y={:f}.Value of y must be less than 1.')
maxiter = max((miniter + 1), maxiter)
val = np.inf
err = np.inf
for n in xrange(miniter, (maxiter + 1)):
newval = _Dsk_integral_fixed_quad(k, y, n)
err = abs((newval - val))
val = newval
if ((err < tol) or (err < (rtol * abs(val)))):
break
else:
warnings.warn(('maxiter (%d) exceeded. Latest difference = %e' % (maxiter, err)))
return val |
class Berkovich_Cp_Projective(Berkovich_Cp):
Element = Berkovich_Element_Cp_Projective
def __init__(self, base, ideal=None):
if (base in ZZ):
if base.is_prime():
from sage.rings.padics.factory import Qp
base = ProjectiveSpace(Qp(base), 1)
else:
raise ValueError('non-prime passed into Berkovich space')
if ((base in NumberFields()) or isinstance(base, sage.rings.abc.pAdicField)):
base = ProjectiveSpace(base, 1)
if (not is_ProjectiveSpace(base)):
try:
base = ProjectiveSpace(base)
except (TypeError, ValueError):
raise ValueError('base of projective Berkovich space must be projective space')
if (not isinstance(base.base_ring(), sage.rings.abc.pAdicField)):
if (base.base_ring() not in NumberFields()):
raise ValueError('base of projective Berkovich space must be projective space over Qp or a number field')
else:
if (ideal is None):
raise ValueError('passed a number field but not an ideal')
if (base.base_ring() is not QQ):
if (not isinstance(ideal, NumberFieldFractionalIdeal)):
raise ValueError('ideal was not a number field ideal')
if (ideal.number_field() != base.base_ring()):
raise ValueError(('passed number field ' + ('%s but ideal was an ideal of %s' % (base.base_ring(), ideal.number_field()))))
prime = ideal.smallest_integer()
else:
if (ideal not in QQ):
raise ValueError('ideal was not an element of QQ')
prime = ideal
if (not ideal.is_prime()):
raise ValueError('passed non prime ideal')
self._base_type = 'number field'
else:
prime = base.base_ring().prime()
ideal = None
self._base_type = 'padic field'
if (base.dimension_relative() != 1):
raise ValueError('base of projective Berkovich space must be projective space of dimension 1 over Qp or a number field')
self._p = prime
self._ideal = ideal
Parent.__init__(self, base=base, category=TopologicalSpaces())
def base_ring(self):
return self.base().base_ring()
def _repr_(self):
if (self._base_type == 'padic field'):
return ('Projective Berkovich line over Cp(%s) of precision %s' % (self.prime(), self.base().base_ring().precision_cap()))
else:
return ('Projective Berkovich line over Cp(%s), with base %s' % (self.prime(), self.base().base_ring()))
def _latex_(self):
return ('\\text{Projective Berkovich line over } \\Bold{C}_{%s}' % self.prime()) |
()
('workspace', default='-')
('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)
('-c', '--channel', default=[], multiple=True, type=click.Tuple([str, str]), metavar='<PATTERN> <REPLACE>...')
('-s', '--sample', default=[], multiple=True, type=click.Tuple([str, str]), metavar='<PATTERN> <REPLACE>...')
('-m', '--modifier', default=[], multiple=True, type=click.Tuple([str, str]), metavar='<PATTERN> <REPLACE>...')
('--measurement', default=[], multiple=True, type=click.Tuple([str, str]), metavar='<PATTERN> <REPLACE>...')
def rename(workspace, output_file, channel, sample, modifier, measurement):
with click.open_file(workspace, 'r', encoding='utf-8') as specstream:
spec = json.load(specstream)
ws = Workspace(spec)
renamed_ws = ws.rename(channels=dict(channel), samples=dict(sample), modifiers=dict(modifier), measurements=dict(measurement))
if (output_file is None):
click.echo(json.dumps(renamed_ws, indent=4, sort_keys=True))
else:
with open(output_file, 'w+', encoding='utf-8') as out_file:
json.dump(renamed_ws, out_file, indent=4, sort_keys=True)
log.debug(f'Written to {output_file:s}') |
def collect_segments(beat_df, beat_dict):
cur_seg = 'NA'
for (i, beat) in beat_df.iterrows():
if ((beat['form'] != cur_seg) and (i != (len(beat_df) - 1))):
beat_key = (int(beat['bar']), int(beat['beat']))
if (cur_seg != 'NA'):
beat_dict[beat_key].patch_segment_tag(end_seg=cur_seg, start_seg=beat['form'])
else:
beat_dict[beat_key].patch_segment_tag(start_seg=beat['form'])
cur_seg = beat['form']
if (i == (len(beat_df) - 1)):
beat_key = (int(beat['bar']), int(beat['beat']))
beat_dict[beat_key].patch_segment_tag(end_seg=cur_seg)
return beat_dict |
def test_str():
stream = io.StringIO()
ak.Array(['hello', 'world']).show(stream=stream, formatter={'str': '<STRING {!r}>'.format})
assert (stream.getvalue() == "[<STRING 'hello'>,\n <STRING 'world'>]\n")
stream.seek(0)
ak.Array(['hello', 'world']).show(stream=stream, formatter={'str_kind': '<STRING {!r}>'.format})
assert (stream.getvalue() == "[<STRING 'hello'>,\n <STRING 'world'>]\n") |
def unique_pitch(pianoroll):
total_num = pianoroll.shape[0]
count = 0
num_bar = 8
num_note = 16
for i in range(total_num):
count_per_bar = 0
for j in range(num_bar):
bar = pianoroll[i][(j * num_note):((j + 1) * num_note)]
count_per_bar += np.unique(np.where((bar == 1))[1]).shape[0]
count_per_bar = (count_per_bar / num_bar)
count += count_per_bar
return (count / total_num) |
class Music(SequenceDataset):
_name_ = 'music'
def d_input(self):
return 1
def d_output(self):
return 256
def l_output(self):
return (self.sample_rate * self.sample_len)
def n_tokens(self):
return (256 if self.discrete_input else None)
def init_defaults(self):
return {'sample_len': 1, 'sample_rate': 16000, 'train_percentage': 0.88, 'discrete_input': False}
def init(self):
return
def setup(self):
from src.dataloaders.music import _Music
self.music_class = _Music(path=default_data_path, sample_len=self.sample_len, sample_rate=self.sample_rate, train_percentage=self.train_percentage, discrete_input=self.discrete_input)
self.dataset_train = self.music_class.get_data('train')
self.dataset_test = self.music_class.get_data('test')
self.dataset_val = self.music_class.get_data('val')
def _return_callback(cls, return_value, *args, **kwargs):
(x, y, *z) = return_value
return (x, y.long(), *z) |
def refillPointsOnOneBoundary(boundary, index):
pointsNumber = screen[0]
if ((boundary == 0) or (boundary == 2)):
pointsNumber = screen[1]
for i in range(pointsNumber):
found = False
for _ in range(maxPoints):
if (points[index][0] == (- 100)):
found = True
if (boundary == 0):
points[index] = vec2(((- dt) * refillVelThresh), ((i + 0.5) / screen[1]))
elif (boundary == 1):
points[index] = vec2(((i + 0.5) / screen[0]), ((- dt) * refillVelThresh))
elif (boundary == 2):
points[index] = vec2((1 + (dt * refillVelThresh)), ((i + 0.5) / screen[1]))
elif (boundary == 3):
points[index] = vec2(((i + 0.5) / screen[0]), (1 + (dt * refillVelThresh)))
break
index += 1
if (index >= maxPoints):
index = 0
if (not found):
break
return index |
def resize_and_convert(img, size, format, resample):
img = trans_fn.resize(img, size, resample)
img = trans_fn.center_crop(img, size)
buffer = BytesIO()
img.save(buffer, format=format, quality=100)
val = buffer.getvalue()
return val |
def test_named_record_fields_int32_float64_parameters():
t = RecordType([NumpyType('int32'), NumpyType('float64')], ['one', 't w o'], parameters={'__record__': 'Name', 'p': [123]})
assert (str(ak.types.from_datashape(str(t), highlevel=False)) == str(t)) |
def gen_graph(branches, g=None, init_root=0, pre=''):
num_branches = [branch2num(i, init_root) for i in branches]
all_nodes = [j for branch in num_branches for j in branch]
all_nodes = np.unique(all_nodes)
all_nodes = all_nodes.tolist()
if (g is None):
g = ig.Graph()
for k in all_nodes:
g.add_vertex((pre + str(k)))
t = []
for j in range(len(branches)):
branch = branch2num(branches[j], init_root)
for i in range((len(branch) - 1)):
pair = [branch[i], branch[(i + 1)]]
if (pair not in t):
t.append(pair)
g.add_edge((pre + str(branch[i])), (pre + str(branch[(i + 1)])))
return (g, max(all_nodes)) |
class FullTensorProductOfSuperCrystals(FullTensorProductOfCrystals):
class Element(TensorProductOfSuperCrystalsElement):
pass |
def reconstruct_tree(tree, sequence, transition_scheme=TransitionScheme.IN_ORDER, unary_limit=UNARY_LIMIT, reverse=False):
model = SimpleModel(transition_scheme=transition_scheme, unary_limit=unary_limit, reverse_sentence=reverse)
states = model.initial_state_from_gold_trees([tree])
assert (len(states) == 1)
assert (states[0].num_transitions() == 0)
for (idx, t) in enumerate(sequence):
assert t.is_legal(states[0], model), 'Transition {} not legal at step {} in sequence {}'.format(t, idx, sequence)
states = parse_transitions.bulk_apply(model, states, [t])
result_tree = states[0].constituents.value
if reverse:
result_tree = result_tree.reverse()
return result_tree |
def read_pfm(filename):
file = open(filename, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().decode('utf-8').rstrip()
if (header == 'PF'):
color = True
elif (header == 'Pf'):
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match('^(\\d+)\\s(\\d+)\\s$', file.readline().decode('utf-8'))
if dim_match:
(width, height) = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if (scale < 0):
endian = '<'
scale = (- scale)
else:
endian = '>'
data = np.fromfile(file, (endian + 'f'))
shape = ((height, width, 3) if color else (height, width))
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return (data, scale) |
class MaskMapper():
def __init__(self):
self.labels = []
self.remappings = {}
self.coherent = True
def clear_labels(self):
self.labels = []
self.remappings = {}
self.coherent = True
def convert_mask(self, mask, exhaustive=False):
labels = np.unique(mask).astype(np.uint8)
labels = labels[(labels != 0)].tolist()
new_labels = list((set(labels) - set(self.labels)))
if (not exhaustive):
assert (len(new_labels) == len(labels)), 'Old labels found in non-exhaustive mode'
for (i, l) in enumerate(new_labels):
self.remappings[l] = ((i + len(self.labels)) + 1)
if (self.coherent and (((i + len(self.labels)) + 1) != l)):
self.coherent = False
if exhaustive:
new_mapped_labels = range(1, ((len(self.labels) + len(new_labels)) + 1))
elif self.coherent:
new_mapped_labels = new_labels
else:
new_mapped_labels = range((len(self.labels) + 1), ((len(self.labels) + len(new_labels)) + 1))
self.labels.extend(new_labels)
mask = torch.from_numpy(mask).float()
return (mask, new_mapped_labels)
def remap_index_mask(self, mask):
if self.coherent:
return mask
new_mask = np.zeros_like(mask)
for (l, i) in self.remappings.items():
new_mask[(mask == i)] = l
return new_mask |
class FlaxAutoModelForVision2Seq(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
_start_docstrings('CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD\n (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits` ', CAMEMBERT_START_DOCSTRING)
class CamembertForQuestionAnswering(RobertaForQuestionAnswering):
config_class = CamembertConfig |
class ResNeXt(nn.Module):
def __init__(self, last_stride, bn_norm, with_ibn, with_nl, block, layers, non_layers, baseWidth=4, cardinality=32):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.baseWidth = baseWidth
self.inplanes = 64
self.output_size = 64
self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False)
self.bn1 = get_norm(bn_norm, 64)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], 1, bn_norm, with_ibn=with_ibn)
self.layer2 = self._make_layer(block, 128, layers[1], 2, bn_norm, with_ibn=with_ibn)
self.layer3 = self._make_layer(block, 256, layers[2], 2, bn_norm, with_ibn=with_ibn)
self.layer4 = self._make_layer(block, 512, layers[3], last_stride, bn_norm, with_ibn=with_ibn)
self.random_init()
if with_nl:
self._build_nonlocal(layers, non_layers, bn_norm)
else:
self.NL_1_idx = self.NL_2_idx = self.NL_3_idx = self.NL_4_idx = []
def _make_layer(self, block, planes, blocks, stride=1, bn_norm='BN', with_ibn=False):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), get_norm(bn_norm, (planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, bn_norm, with_ibn, self.baseWidth, self.cardinality, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, bn_norm, with_ibn, self.baseWidth, self.cardinality, 1, None))
return nn.Sequential(*layers)
def _build_nonlocal(self, layers, non_layers, bn_norm):
self.NL_1 = nn.ModuleList([Non_local(256, bn_norm) for _ in range(non_layers[0])])
self.NL_1_idx = sorted([(layers[0] - (i + 1)) for i in range(non_layers[0])])
self.NL_2 = nn.ModuleList([Non_local(512, bn_norm) for _ in range(non_layers[1])])
self.NL_2_idx = sorted([(layers[1] - (i + 1)) for i in range(non_layers[1])])
self.NL_3 = nn.ModuleList([Non_local(1024, bn_norm) for _ in range(non_layers[2])])
self.NL_3_idx = sorted([(layers[2] - (i + 1)) for i in range(non_layers[2])])
self.NL_4 = nn.ModuleList([Non_local(2048, bn_norm) for _ in range(non_layers[3])])
self.NL_4_idx = sorted([(layers[3] - (i + 1)) for i in range(non_layers[3])])
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool1(x)
NL1_counter = 0
if (len(self.NL_1_idx) == 0):
self.NL_1_idx = [(- 1)]
for i in range(len(self.layer1)):
x = self.layer1[i](x)
if (i == self.NL_1_idx[NL1_counter]):
(_, C, H, W) = x.shape
x = self.NL_1[NL1_counter](x)
NL1_counter += 1
NL2_counter = 0
if (len(self.NL_2_idx) == 0):
self.NL_2_idx = [(- 1)]
for i in range(len(self.layer2)):
x = self.layer2[i](x)
if (i == self.NL_2_idx[NL2_counter]):
(_, C, H, W) = x.shape
x = self.NL_2[NL2_counter](x)
NL2_counter += 1
NL3_counter = 0
if (len(self.NL_3_idx) == 0):
self.NL_3_idx = [(- 1)]
for i in range(len(self.layer3)):
x = self.layer3[i](x)
if (i == self.NL_3_idx[NL3_counter]):
(_, C, H, W) = x.shape
x = self.NL_3[NL3_counter](x)
NL3_counter += 1
NL4_counter = 0
if (len(self.NL_4_idx) == 0):
self.NL_4_idx = [(- 1)]
for i in range(len(self.layer4)):
x = self.layer4[i](x)
if (i == self.NL_4_idx[NL4_counter]):
(_, C, H, W) = x.shape
x = self.NL_4[NL4_counter](x)
NL4_counter += 1
return x
def random_init(self):
self.conv1.weight.data.normal_(0, math.sqrt((2.0 / ((7 * 7) * 64))))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() |
class DistroConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props['distro_name'] = node_name
def child_constructor(self):
distro = self.find_prop('distro_name')
next_nodes = {'xenial': XenialCompilerConfigNode, 'bionic': BionicCompilerConfigNode}
return next_nodes[distro] |
def add_wsl_losses(model, prefix=''):
add_cls_pred((prefix + 'rois_pred'), (prefix + 'cls_prob'), model, prefix='')
classes_weight = None
cpg = None
if (cfg.WSL.CPG or cfg.WSL.CSC):
cpg_args = {}
cpg_args['tau'] = cfg.WSL.CPG_TAU
cpg_args['max_iter'] = max(cfg.WSL.CPG_MAX_ITER, cfg.WSL.CSC_MAX_ITER)
cpg_args['cpg_net_name'] = (model.net.Proto().name + '_cpg')
cpg_args['pred_blob_name'] = cfg.WSL.CPG_PRE_BLOB
cpg_args['data_blob_name'] = cfg.WSL.CPG_DATA_BLOB
model.net.CPG(['labels_oh', (prefix + 'cls_prob')], ['cpg_raw'], **cpg_args)
model.net.CPGScale(['cpg_raw', 'labels_oh', (prefix + 'cls_prob')], 'cpg', tau=cfg.WSL.CPG_TAU)
cpg = 'cpg'
if cfg.WSL.CSC:
if ((not cfg.MODEL.MASK_ON) or True):
loss_gradients = add_csc_loss(model, 'cpg', (prefix + 'cls_prob'), (prefix + 'rois_pred'), (prefix + 'rois'), loss_weight=1.0, prefix='')
else:
loss_gradients = {}
else:
add_cross_entropy_loss(model, (prefix + 'cls_prob'), 'labels_oh', (prefix + 'cross_entropy'), weight=classes_weight, cpg=cpg)
loss_cls = model.net.AveragedLoss([(prefix + 'cross_entropy')], [(prefix + 'loss_cls')])
loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls])
model.Accuracy([(prefix + 'cls_prob'), 'labels_int32'], (prefix + 'accuracy_cls'))
model.AddLosses([(prefix + 'loss_cls')])
model.AddMetrics((prefix + 'accuracy_cls'))
if cfg.WSL.CENTER_LOSS:
center_dim = 4096
rois_pred = (prefix + 'rois_pred')
loss_gradients_center = add_center_loss('labels_oh', rois_pred, (prefix + 'drop7'), center_dim, model)
loss_gradients.update(loss_gradients_center)
if cfg.WSL.MIN_ENTROPY_LOSS:
loss_gradients_ME = add_min_entropy_loss(model, (prefix + 'rois_pred'), 'labels_oh', (prefix + 'loss_entropy'), cpg=cpg)
loss_gradients.update(loss_gradients_ME)
return loss_gradients |
def keyword_none(A: dace.float32[N], B: dace.float32[N], C: dace.pointer(dace.int32)):
if (C is None):
B[:] = A[:] |
.parametrize('name,dataset_class', [('sinusoid', Sinusoid), ('harmonic', Harmonic)])
def test_toy_helpers(name, dataset_class):
dataset_fn = getattr(helpers, name)
dataset = dataset_fn(shots=5, test_shots=15)
assert isinstance(dataset, dataset_class)
task = dataset[0]
assert isinstance(task, OrderedDict)
assert ('train' in task)
assert ('test' in task)
(train, test) = (task['train'], task['test'])
assert isinstance(train, Task)
assert isinstance(test, Task)
assert (len(train) == 5)
assert (len(test) == 15) |
('version', add_help_option=False)
_context
def version_command(ctx):
ctx.obj.process_input_flag = False
click.echo(click.style(('PySceneDetect %s' % scenedetect.__version__), fg='yellow'))
ctx.exit() |
_spec([HookScope.GLOBAL])
def process_call_kwargs(context: HookContext, case: Case, kwargs: dict[(str, Any)]) -> None: |
def split_data(data, max_len):
new_x = []
new_y = []
for v in data:
x = v[:(- 1)]
y = v[1:]
if (len(x) < 1):
continue
padded_len = (max_len - len(x))
if (padded_len > 0):
x.extend(([0] * padded_len))
y.extend(([0] * padded_len))
new_x.append(x[:max_len])
new_y.append(y[:max_len])
return (new_x, new_y) |
def test_validate_series(df_phone: pd.DataFrame) -> None:
srs_valid = validate_phone(df_phone['messy_phone'])
srs_check = pd.Series([True, True, True, True, True, True, True, True, True, True, True, True, False, False, False, False, False], name='messy_phone')
assert srs_check.equals(srs_valid) |
def program_generator(size: int, factor: float) -> DaceProgram:
(dace.float64[size], dace.float64[size], size=size, factor=factor)
def lib_reuse(input, output):
(_[0:size])
def tasklet(i):
(a << input[i])
(b >> output[i])
b = (a * factor)
return lib_reuse |
def stylize():
device = ('cuda' if torch.cuda.is_available() else 'cpu')
net = transformer.TransformerNetwork()
net.load_state_dict(torch.load(STYLE_TRANSFORM_PATH))
net = net.to(device)
with torch.no_grad():
while 1:
torch.cuda.empty_cache()
print('Stylize Image~ Press Ctrl+C and Enter to close the program')
content_image_path = input('Enter the image path: ')
content_image = utils.load_image(content_image_path)
starttime = time.time()
content_tensor = utils.itot(content_image).to(device)
generated_tensor = net(content_tensor)
generated_image = utils.ttoi(generated_tensor.detach())
if PRESERVE_COLOR:
generated_image = utils.transfer_color(content_image, generated_image)
print('Transfer Time: {}'.format((time.time() - starttime)))
utils.show(generated_image)
utils.saveimg(generated_image, 'helloworld.jpg') |
class SymlinkLockFile(LockBase):
def __init__(self, path, threaded=True, timeout=None):
LockBase.__init__(self, path, threaded, timeout)
self.unique_name = os.path.split(self.unique_name)[1]
def acquire(self, timeout=None):
timeout = (timeout if (timeout is not None) else self.timeout)
end_time = time.time()
if ((timeout is not None) and (timeout > 0)):
end_time += timeout
while True:
try:
os.symlink(self.unique_name, self.lock_file)
except OSError:
if self.i_am_locking():
return
else:
if ((timeout is not None) and (time.time() > end_time)):
if (timeout > 0):
raise LockTimeout(('Timeout waiting to acquire lock for %s' % self.path))
else:
raise AlreadyLocked(('%s is already locked' % self.path))
time.sleep(((timeout / 10) if (timeout is not None) else 0.1))
else:
return
def release(self):
if (not self.is_locked()):
raise NotLocked(('%s is not locked' % self.path))
elif (not self.i_am_locking()):
raise NotMyLock(('%s is locked, but not by me' % self.path))
os.unlink(self.lock_file)
def is_locked(self):
return os.path.islink(self.lock_file)
def i_am_locking(self):
return (os.path.islink(self.lock_file) and (os.readlink(self.lock_file) == self.unique_name))
def break_lock(self):
if os.path.islink(self.lock_file):
os.unlink(self.lock_file) |
def openpose():
print('Starting OpenPose')
os.chdir('bin\\openpose')
subprocess.Popen('bin\\OpenPoseDemo.exe --hand --write_json ..\\..\\Keypoints --net_resolution 128x128 --number_people_max 1', shell=True)
os.chdir('..\\..')
dirName = 'Keypoints'
fileName = 'PSL\\_keypoints.json'
try:
os.mkdir(dirName)
shutil.copy(fileName, dirName)
print('Directory ', dirName, ' Created ')
except FileExistsError:
print('Directory ', dirName, ' already exists') |
class GPT3(LM):
def __init__(self, model: str='gpt-3.5-turbo-instruct', api_key: Optional[str]=None, api_provider: Literal[('openai', 'azure')]='openai', api_base: Optional[str]=None, model_type: Literal[('chat', 'text')]=None, **kwargs):
super().__init__(model)
self.provider = 'openai'
default_model_type = ('chat' if ((('gpt-3.5' in model) or ('turbo' in model) or ('gpt-4' in model)) and ('instruct' not in model)) else 'text')
self.model_type = (model_type if model_type else default_model_type)
if (api_provider == 'azure'):
assert (('engine' in kwargs) or ('deployment_id' in kwargs)), 'Must specify engine or deployment_id for Azure API instead of model.'
assert ('api_version' in kwargs), 'Must specify api_version for Azure API'
assert ('api_base' in kwargs), 'Must specify api_base for Azure API'
openai.api_type = 'azure'
if kwargs.get('api_version'):
openai.api_version = kwargs['api_version']
if api_key:
openai.api_key = api_key
if api_base:
if OPENAI_LEGACY:
openai.api_base = api_base
else:
openai.base_url = api_base
self.kwargs = {'temperature': 0.0, 'max_tokens': 150, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, **kwargs}
if (api_provider != 'azure'):
self.kwargs['model'] = model
self.history: list[dict[(str, Any)]] = []
def _openai_client(self):
return openai
def basic_request(self, prompt: str, **kwargs):
raw_kwargs = kwargs
kwargs = {**self.kwargs, **kwargs}
if (self.model_type == 'chat'):
kwargs['messages'] = [{'role': 'user', 'content': prompt}]
kwargs = {'stringify_request': json.dumps(kwargs)}
response = chat_request(**kwargs)
else:
kwargs['prompt'] = prompt
response = completions_request(**kwargs)
history = {'prompt': prompt, 'response': response, 'kwargs': kwargs, 'raw_kwargs': raw_kwargs}
self.history.append(history)
return response
_exception(backoff.expo, ERRORS, max_time=1000, on_backoff=backoff_hdlr)
def request(self, prompt: str, **kwargs):
if ('model_type' in kwargs):
del kwargs['model_type']
return self.basic_request(prompt, **kwargs)
def _get_choice_text(self, choice: dict[(str, Any)]) -> str:
if (self.model_type == 'chat'):
return choice['message']['content']
return choice['text']
def __call__(self, prompt: str, only_completed: bool=True, return_sorted: bool=False, **kwargs) -> list[dict[(str, Any)]]:
assert only_completed, 'for now'
assert (return_sorted is False), 'for now'
response = self.request(prompt, **kwargs)
choices = response['choices']
completed_choices = [c for c in choices if (c['finish_reason'] != 'length')]
if (only_completed and len(completed_choices)):
choices = completed_choices
completions = [self._get_choice_text(c) for c in choices]
if (return_sorted and (kwargs.get('n', 1) > 1)):
scored_completions = []
for c in choices:
(tokens, logprobs) = (c['logprobs']['tokens'], c['logprobs']['token_logprobs'])
if ('<|endoftext|>' in tokens):
index = (tokens.index('<|endoftext|>') + 1)
(tokens, logprobs) = (tokens[:index], logprobs[:index])
avglog = (sum(logprobs) / len(logprobs))
scored_completions.append((avglog, self._get_choice_text(c)))
scored_completions = sorted(scored_completions, reverse=True)
completions = [c for (_, c) in scored_completions]
return completions |
class FeatureFunction():
def __init__(self):
pass
def inform(self, train, dev, test):
raise NotImplementedError('Not Implemented Here')
def lookup(self, data):
return self.process(data)
def process(self, data):
pass
def load_vocab(self, mname):
pass
def save_vocab(self, mname):
pass |
def move_to(obj, old_pt, pt):
dx = (pt.getX() - old_pt.getX())
dy = (pt.getY() - old_pt.getY())
obj.move(dx, dy) |
def CheckComment(comment, filename, linenum, error):
match = _RE_PATTERN_TODO.match(comment)
if match:
leading_whitespace = match.group(1)
if (len(leading_whitespace) > 1):
error(filename, linenum, 'whitespace/todo', 2, 'Too many spaces before TODO')
username = match.group(2)
if (not username):
error(filename, linenum, 'readability/todo', 2, 'Missing username in TODO; it should look like "// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
if ((middle_whitespace != ' ') and (middle_whitespace != '')):
error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space') |
def load_checkpoint(model, filename, map_location=None, strict=False, logger=None):
checkpoint = _load_checkpoint(filename, map_location)
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif (isinstance(checkpoint, dict) and ('state_dict' in checkpoint)):
state_dict = checkpoint['state_dict']
else:
raise RuntimeError('No state_dict found in checkpoint file {}'.format(filename))
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for (k, v) in checkpoint['state_dict'].items()}
if hasattr(model, 'module'):
load_state_dict(model.module, state_dict, strict, logger)
else:
load_state_dict(model, state_dict, strict, logger)
return checkpoint |
def run_cython_lint(files):
if (not files):
return (0, '')
res = subprocess.run((['cython-lint', '--no-pycodestyle'] + list(files)), stdout=subprocess.PIPE, encoding='utf-8')
return (res.returncode, res.stdout) |
def fix_random_seeds(seed: int=1337) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True |
class MPNetForTokenClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def assert_install_itrex():
assert is_itrex_available(), 'To run int8 or k-bits model on cpu, please install the `intel-extension-for-transformers` package.You can install it with `pip install intel-extension-for-transformers`.' |
class LinearMasked(torch.nn.Module):
def __init__(self, adj):
super(LinearMasked, self).__init__()
self.weights = torch.nn.Parameter(torch.Tensor(np.zeros_like(adj)))
self.adj = torch.Tensor(adj)
def forward(self, data, mask):
out = torch.matmul(data, (self.adj * self.weights))
return torch.mean((torch.sum((0.5 * (((data - out) * mask) ** 2)), dim=0) / torch.sum(mask, dim=0))) |
def resnet101(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(torch.load(model_urls['resnet101'], map_location='cpu'))
return model |
def _vi_tables(im_true, im_test, table=None, ignore_labels=()):
check_shape_equality(im_true, im_test)
if (table is None):
pxy = contingency_table(im_true, im_test, ignore_labels=ignore_labels, normalize=True)
else:
pxy = table
px = np.ravel(pxy.sum(axis=1))
py = np.ravel(pxy.sum(axis=0))
px_inv = sparse.diags(_invert_nonzero(px))
py_inv = sparse.diags(_invert_nonzero(py))
hygx = ((- px) _xlogx((px_inv pxy)).sum(axis=1))
hxgy = ((- _xlogx((pxy py_inv)).sum(axis=0)) py)
return list(map(np.asarray, [hxgy, hygx])) |
class ApplyResultObj(ctypes.c_void_p):
def __init__(self, obj):
self._as_parameter_ = obj
def from_param(obj):
return obj |
class DAVIS_Test(data.Dataset):
def __init__(self, root, output_size=None, img_set='2017/val.txt', max_obj_n=11, single_obj=False):
self.root = root
self.single_obj = single_obj
dataset_path = os.path.join(root, 'ImageSets', img_set)
self.dataset_list = list()
self.output_size = output_size
with open(os.path.join(dataset_path), 'r') as lines:
for line in lines:
dataset_name = line.strip()
if (len(dataset_name) > 0):
self.dataset_list.append(dataset_name)
self.to_tensor = TF.ToTensor()
self.normalize = TF.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.to_onehot = mytrans.ToOnehot(max_obj_n, shuffle=False)
def __len__(self):
return len(self.dataset_list)
def __getitem__(self, idx):
video_name = self.dataset_list[idx]
img_dir = os.path.join(self.root, 'JPEGImages', '480p', video_name)
mask_dir = os.path.join(self.root, 'Annotations', '480p', video_name)
img_list = sorted(glob(os.path.join(img_dir, '*.jpg')))
mask_list = sorted(glob(os.path.join(mask_dir, '*.png')))
first_mask = load_image_in_PIL(mask_list[0], 'P')
(original_w, original_h) = first_mask.size
if self.output_size:
(out_h, out_w) = self.output_size
if (original_h < out_h):
(h, w) = (original_h, original_w)
else:
h = out_h
w = int(((original_w / original_h) * out_h))
else:
(h, w) = (original_h, original_w)
first_mask = first_mask.resize((w, h), Image.NEAREST)
first_mask_np = np.array(first_mask, np.uint8)
if self.single_obj:
first_mask_np[(first_mask_np > 1)] = 1
obj_n = (first_mask_np.max() + 1)
video_len = len(img_list)
frames = torch.zeros((video_len, 3, h, w), dtype=torch.float)
masks = torch.zeros((1, obj_n, h, w), dtype=torch.float)
(mask, _) = self.to_onehot(first_mask_np)
masks[0] = mask[:obj_n]
for i in range(video_len):
img = load_image_in_PIL(img_list[i], 'RGB')
img = img.resize((w, h), Image.BILINEAR)
frames[i] = self.normalize(self.to_tensor(img))
info = {'name': video_name, 'num_frames': video_len, 'original_size': (original_h, original_w)}
return (frames, masks, obj_n, info) |
def get_all_csv(base_dir, verbose=False):
data = []
delimiter = ','
for dir_name in get_dirs(base_dir):
for data_file_name in os.listdir(dir_name):
if data_file_name.endswith('.csv'):
full_path = os.path.join(dir_name, data_file_name)
if verbose:
print('Reading {}'.format(full_path))
data.append(np.genfromtxt(full_path, delimiter=delimiter, dtype=None, names=True))
return data |
def replace_math_functions(input_string):
output_string = input_string
for func in MATH_TRANSPILATIONS:
output_string = output_string.replace('{}('.format(func), '{}('.format(MATH_TRANSPILATIONS[func]))
return output_string |
class AnnotatedObjectsOpenImages(AnnotatedObjectsDataset):
def __init__(self, use_additional_parameters: bool, **kwargs):
super().__init__(**kwargs)
self.use_additional_parameters = use_additional_parameters
self.categories = load_categories(self.paths['class_descriptions'])
self.filter_categories()
self.setup_category_id_and_number()
self.image_descriptions = {}
annotations = load_annotations(self.paths['annotations'], self.min_object_area, self.category_mapping, self.category_number)
self.annotations = self.filter_object_number(annotations, self.min_object_area, self.min_objects_per_image, self.max_objects_per_image)
self.image_ids = list(self.annotations.keys())
self.clean_up_annotations_and_image_descriptions()
def get_path_structure(self) -> Dict[(str, str)]:
if (self.split not in OPEN_IMAGES_STRUCTURE):
raise ValueError(f'Split [{self.split} does not exist for Open Images data.]')
return OPEN_IMAGES_STRUCTURE[self.split]
def get_image_path(self, image_id: str) -> Path:
return self.paths['files'].joinpath(f'{image_id:0>16}.jpg')
def get_image_description(self, image_id: str) -> Dict[(str, Any)]:
image_path = self.get_image_path(image_id)
return {'file_path': str(image_path), 'file_name': image_path.name} |
def pod_requests_sgx(pod: V1Pod) -> bool:
for container in pod.spec.containers:
for demands in (container.resources.limits, container.resources.requests):
if (isinstance(demands, dict) and ('intel.com/sgx' in demands.keys())):
return True
return False |
class LogFloatParam(RandomHyperparameter):
def __init__(self, name, min_value, max_value, *, offset=0):
super(LogFloatParam, self).__init__(name)
self._linear_float_param = LinearFloatParam(('log_' + name), math.log(min_value), math.log(max_value))
self.offset = offset
def generate_next_value(self):
return ((math.e ** self._linear_float_param.generate()) + self.offset) |
class Log():
def __init__(self, verbose: bool=False):
self.verbose = verbose
self.log = ''
def print_log(self, *args):
if self.verbose:
print(*args)
self.log += (' '.join(map(str, args)) + '\n') |
def get_boxes_idx(boxes_list, refs):
def get_idx(boxes_list, box):
if (box in boxes_list):
return boxes_list.index(box)
else:
boxes_list.append(box)
return (len(boxes_list) - 1)
idx = [get_idx(boxes_list, box) for box in refs]
return idx |
class Response():
def __init__(self, template: str, task_config: TaskConfig, bot_config: BotConfig, entity_manager: EntityManager, user_name=''):
self._responses = load_response(template)
self.bot_name = bot_config.bot_name
self.user_name = user_name
self.personality = None
self.follow_up = False
self.task_config = task_config
self.entity_manager = entity_manager
def confirm_user(self):
if (not self.user_name):
res = choice(self._responses['greetings']['unknown']).replace('<bot name>', self.bot_name)
else:
res = choice(self._responses['greetings']['known']).replace('<User>', self.user_name)
return res
def greeting(self):
return choice(self._responses['greetings']['start']).replace('<bot name>', self.bot_name).replace('<user name>', ('there' if (self.user_name == '') else self.user_name))
def goodbye(self):
return choice(self._responses['goodbye'])
def okay(self):
return choice(self._responses['okay'])
def take_time(self):
return choice(self._responses['take_time'])
def let_me_know(self):
return choice(self._responses['let_me_know'])
def recommend(self, task):
return choice(self._responses['recommend']).replace('<Task>', task)
def forward_to_human(self):
return choice(self._responses['forward_to_human'])
def got_intent(self, task, first=True):
if first:
return choice(self._responses['got_intent_first']).replace('<Task>', task)
else:
return choice(self._responses['got_intent_middle']).replace('<Task>', task)
def task_repeat_response(self, task):
if self.task_config[task].repeat_response:
return choice(self.task_config[task].repeat_response)
else:
return choice(self._responses['task_repeat_response']).replace('<Task>', self.task_config[task].description)
def got_intent_2(self, task):
return choice(self._responses['got_intent_2']).replace('<Task>', task)
def got_sub_task(self, task):
return choice(self._responses['got_sub_task']).replace('<Task>', task)
def confirm_intent(self, task):
return choice(self._responses['confirm_intent']).replace('<Task>', task)
def got_invalid_request(self):
return choice(self._responses['got_invalid_request'])
def ask_info(self, task, entity):
if (self.task_config and (task in self.task_config) and (entity in self.task_config[task].entities) and self.task_config[task].entities[entity].prompt and self.task_config[task].entities[entity].prompt[0]):
res_tmp = choice(self.task_config[task].entities[entity].prompt)
else:
res_tmp = choice(self._responses['ask_info']).replace('<Info>', entity).replace('_', ' ')
if self.entity_manager.suggest_entity_value(entity):
extraction_methods = self.entity_manager.get_extraction_methods(entity)
entity_values = []
for (method, value) in extraction_methods.items():
if (method == 'fuzzy_matching'):
entity_values.extend(value)
if entity_values:
res_tmp += ('\n' + choice(self._responses['suggest_entity_value']))
for i in range(len(entity_values)):
res_tmp += (('- ' + str(entity_values[i])) + '\n')
return res_tmp.strip()
def entity_response(self, task, entity, info):
if (task and entity and (task in self.task_config) and (entity in self.task_config[task].entities) and self.task_config[task].entities[entity]['response'] and self.task_config[task].entities[entity]['response'][0]):
return choice(self.task_config[task].entities[entity]['response']).replace('<info>', info)
else:
return ''
def task_finish_response(self, tasks, tasks_success, info=''):
if (not tasks):
return ''
assert (len(tasks) == len(tasks_success)), ('tasks and tasks_success should have the same length.' + 'tasks has a length of {} and tasks_success has a length of {}'.format(len(tasks), len(tasks_success)))
task = tasks[0]
task_success = tasks_success[0]
if (task in self.task_config):
task_description = self.task_config[task].description
if task_success:
if self.task_config[task].finish_response.success:
return choice(self.task_config[task].finish_response['success']).replace('<info>', info).strip()
else:
return choice(self._responses['task_finish_response']['success']).replace('<Task>', task_description)
elif self.task_config[task].finish_response.failure:
return choice(self.task_config[task].finish_response['failure']).replace('<info>', info).strip()
else:
return choice(self._responses['task_finish_response']['failure']).replace('<Task>', task_description)
else:
return ''
def query_res(self, info: str):
return choice(self._responses['query_res']).replace('<Info>', info)
def cannot_recognize_entity(self, entity):
return choice(self._responses['cannot_recognize_entity']).replace('<Info>', entity)
def confirm_info(self, entity, value, more=False, propose=False):
if propose:
return choice(self._responses['confirm_info_with_proposal']).replace('<Info>', entity).replace('<Value>', value)
elif more:
return choice(self._responses['confirm_info']).replace('<Info>', entity).replace('<Value>', (value + ' more'))
else:
return choice(self._responses['confirm_info']).replace('<Info>', entity).replace('<Value>', value)
def confirm_retrieved_info(self, entity, value):
return choice(self._responses['confirm_retrieved_info']).replace('<Info>', entity.replace('_', ' ')).replace('<Value>', value)
def ask_spelling(self, spell_type):
return choice(self._responses['ask_spelling'][spell_type])
def inform_user(self, entity='', info='None'):
return choice(self._responses['inform_user']).replace('<Entity>', entity).replace('<Info>', info)
def verify(self, entity, val, success):
if success:
return choice(self._responses['verify_success']).replace('<Info>', entity).replace('<Val>', val)
else:
return choice(self._responses['verify_failed']).replace('<Info>', entity).replace('<Val>', val)
def updated(self, entity='', val=''):
return choice(self._responses['update_success']).replace('<Info>', entity).replace('<Val>', val)
def insert(self, info):
return choice(self._responses['insert']).replace('<Info>', info)
def delete(self, info):
return choice(self._responses['delete']).replace('<Info>', info)
def continue_current(self, task):
return choice(self._responses['continue_current']).replace('<Task>', task)
def notify(self):
return choice(self._responses['notification'])
def task_finished(self, task):
res = choice(self._responses['task_finished']).replace('<Task>', task)
return res
def confirm_finish(self):
return choice(self._responses['confirm_finish'])
def confirm_satisfied(self):
return choice(self._responses['confirm_satisfied'])
def followup(self, task):
return choice(self._responses['follow_up']).replace('<Task>', task)
def welcome_back(self):
return choice(self._responses['welcome_back'])
def help_with_prev_task(self, task: str):
return choice(self._responses['help_with_prev_task']).replace('<Task>', task)
def qa(self):
return choice(self._responses['qa'])
def api(self, answer: str):
return choice(self._responses['api']).replace('<info>', answer)
def multi_entity(self, cur_entity: str, multiple_entities_pool: list):
entities = [str(entity) for entity in multiple_entities_pool]
last_e = entities[(- 1)]
pool_str = ((','.join(entities[:(- 1)]) + ' and ') + last_e)
return choice(self._responses['multi_entity']).replace('<Entity>', cur_entity).replace('<Pool>', pool_str)
def suggest_tasks(self, tasks: list):
tasks_des = ''
for i in range(len(tasks)):
tasks_des += (((str((i + 1)) + '. ') + tasks[i]) + '\n')
return choice(self._responses['suggest_tasks']).replace('<tasks>', tasks_des) |
class _WordRegex(Word):
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if (not result):
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return (loc, result.group()) |
class ThePileScenario(Scenario):
name = 'the_pile'
description = 'The Pile'
tags = ['language_modeling']
def __init__(self, subset: str):
super().__init__()
self.pile_subsets = {'ArXiv', 'BookCorpus2', 'Books3', 'DM Mathematics', 'Enron Emails', 'EuroParl', 'FreeLaw', 'Github', 'Gutenberg (PG-19)', 'HackerNews', 'NIH ExPorter', 'OpenSubtitles', 'OpenWebText2', 'PhilPapers', 'Pile-CC', 'PubMed Abstracts', 'PubMed Central', 'StackExchange', 'USPTO Backgrounds', 'Ubuntu IRC', 'Wikipedia (en)', 'YoutubeSubtitles'}
assert (subset in self.pile_subsets)
self.subset = subset
(None)
def load_and_cache_all_subsets(self, data_jsonl, output_path):
subsets: Dict[(str, List)] = {subset: [] for subset in self.pile_subsets}
with htrack_block('Loading'):
hlog(f'Loading all data from {data_jsonl}')
with open(data_jsonl) as f:
data = [json.loads(line) for line in f]
hlog('Classifying the documents into subsets')
for doc in data:
subsets[doc['meta']['pile_set_name']].append([doc['text']])
with htrack_block('Caching'):
hlog(f'Caching subsets to {output_path}')
for subset in subsets:
subset_path = os.path.join(output_path, (subset + '.csv'))
with open(subset_path, 'w') as f:
writer = csv.writer(f)
writer.writerows(subsets[subset])
def get_instances(self, output_path: str) -> List[Instance]:
data_jsonl = os.path.join(output_path, 'data')
ensure_file_downloaded(source_url=' target_path=data_jsonl, unpack=True)
subset_path = os.path.join(output_path, (self.subset + '.csv'))
if (not os.path.exists(subset_path)):
self.load_and_cache_all_subsets(data_jsonl, output_path)
instances = []
hlog(f'Reading {subset_path}')
with open(subset_path, 'r') as f:
csv.field_size_limit(sys.maxsize)
reader = csv.reader(f)
for row in reader:
instance = Instance(input=Input(text=row[0]), references=[], split=TEST_SPLIT)
instances.append(instance)
DATASET_NAMES_DICT = {'Github': 'github', 'ArXiv': 'arxiv', 'Wikipedia (en)': 'wikipedia', 'OpenSubtitles': 'opensubtitles', 'OpenWebText2': 'openwebtext2', 'Gutenberg (PG-19)': 'gutenberg', 'DM Mathematics': 'dm-mathematics', 'Enron Emails': 'enron', 'Books3': 'bibliotik', 'PubMed Abstracts': 'pubmed-abstracts', 'YoutubeSubtitles': 'youtubesubtitles', 'HackerNews': 'hackernews', 'Pile-CC': 'commoncrawl', 'EuroParl': 'europarl', 'USPTO Backgrounds': 'uspto', 'FreeLaw': 'freelaw', 'NIH ExPorter': 'nih-exporter', 'StackExchange': 'stackexchange', 'PubMed Central': 'pubmed-central', 'Ubuntu IRC': 'ubuntu-irc', 'BookCorpus2': 'bookcorpus', 'PhilPapers': 'philpapers'}
DATASETS_WITHOUT_SPLIT = ['ubuntu-irc', 'bookcorpus', 'philpapers']
short_name = DATASET_NAMES_DICT[self.subset]
if (short_name not in DATASETS_WITHOUT_SPLIT):
url = f'
indices = sorted(list(set(requests.get(url).json())))
instances = [instances[i] for i in indices]
return instances |
class SchemaInteractionATISModel(ATISModel):
def __init__(self, params, input_vocabulary, output_vocabulary, output_vocabulary_schema, anonymizer):
ATISModel.__init__(self, params, input_vocabulary, output_vocabulary, output_vocabulary_schema, anonymizer)
if self.params.use_schema_encoder:
schema_encoder_num_layer = 1
schema_encoder_input_size = params.input_embedding_size
schema_encoder_state_size = params.encoder_state_size
if params.use_bert:
schema_encoder_input_size = self.bert_config.hidden_size
self.schema_encoder = Encoder(schema_encoder_num_layer, schema_encoder_input_size, schema_encoder_state_size)
if self.params.use_schema_self_attention:
self.schema2schema_attention_module = Attention(self.schema_attention_key_size, self.schema_attention_key_size, self.schema_attention_key_size)
if self.params.use_utterance_attention:
self.utterance_attention_module = Attention(self.params.encoder_state_size, self.params.encoder_state_size, self.params.encoder_state_size)
if params.use_encoder_attention:
self.utterance2schema_attention_module = Attention(self.schema_attention_key_size, self.utterance_attention_key_size, self.utterance_attention_key_size)
self.schema2utterance_attention_module = Attention(self.utterance_attention_key_size, self.schema_attention_key_size, self.schema_attention_key_size)
new_attention_key_size = (self.schema_attention_key_size + self.utterance_attention_key_size)
self.schema_attention_key_size = new_attention_key_size
self.utterance_attention_key_size = new_attention_key_size
if self.params.use_schema_encoder_2:
self.schema_encoder_2 = Encoder(schema_encoder_num_layer, self.schema_attention_key_size, self.schema_attention_key_size)
self.utterance_encoder_2 = Encoder(params.encoder_num_layers, self.utterance_attention_key_size, self.utterance_attention_key_size)
self.token_predictor = construct_token_predictor(params, output_vocabulary, self.utterance_attention_key_size, self.schema_attention_key_size, self.final_snippet_size, anonymizer)
if (params.use_schema_attention and params.use_query_attention):
decoder_input_size = (((params.output_embedding_size + self.utterance_attention_key_size) + self.schema_attention_key_size) + params.encoder_state_size)
elif params.use_schema_attention:
decoder_input_size = ((params.output_embedding_size + self.utterance_attention_key_size) + self.schema_attention_key_size)
else:
decoder_input_size = (params.output_embedding_size + self.utterance_attention_key_size)
self.decoder = SequencePredictorWithSchema(params, decoder_input_size, self.output_embedder, self.column_name_token_embedder, self.token_predictor)
def predict_turn(self, utterance_final_state, input_hidden_states, schema_states, max_generation_length, gold_query=None, snippets=None, input_sequence=None, previous_queries=None, previous_query_states=None, input_schema=None, feed_gold_tokens=False, training=False, gold_query_weights=None):
predicted_sequence = []
fed_sequence = []
loss = None
token_accuracy = 0.0
if self.params.use_encoder_attention:
schema_attention = self.utterance2schema_attention_module(torch.stack(schema_states, dim=0), input_hidden_states).vector
utterance_attention = self.schema2utterance_attention_module(torch.stack(input_hidden_states, dim=0), schema_states).vector
if (schema_attention.dim() == 1):
schema_attention = schema_attention.unsqueeze(1)
if (utterance_attention.dim() == 1):
utterance_attention = utterance_attention.unsqueeze(1)
new_schema_states = torch.cat([torch.stack(schema_states, dim=1), schema_attention], dim=0)
schema_states = list(torch.split(new_schema_states, split_size_or_sections=1, dim=1))
schema_states = [schema_state.squeeze() for schema_state in schema_states]
new_input_hidden_states = torch.cat([torch.stack(input_hidden_states, dim=1), utterance_attention], dim=0)
input_hidden_states = list(torch.split(new_input_hidden_states, split_size_or_sections=1, dim=1))
input_hidden_states = [input_hidden_state.squeeze() for input_hidden_state in input_hidden_states]
if self.params.use_schema_encoder_2:
(final_schema_state, schema_states) = self.schema_encoder_2(schema_states, (lambda x: x), dropout_amount=self.dropout)
(final_utterance_state, input_hidden_states) = self.utterance_encoder_2(input_hidden_states, (lambda x: x), dropout_amount=self.dropout)
if feed_gold_tokens:
decoder_results = self.decoder(utterance_final_state, input_hidden_states, schema_states, max_generation_length, gold_sequence=gold_query, input_sequence=input_sequence, previous_queries=previous_queries, previous_query_states=previous_query_states, input_schema=input_schema, snippets=snippets, dropout_amount=self.dropout)
all_scores = []
all_alignments = []
for prediction in decoder_results.predictions:
scores = F.softmax(prediction.scores, dim=0)
alignments = prediction.aligned_tokens
if (self.params.use_previous_query and self.params.use_copy_switch and (len(previous_queries) > 0)):
query_scores = F.softmax(prediction.query_scores, dim=0)
copy_switch = prediction.copy_switch
scores = torch.cat([(scores * (1 - copy_switch)), (query_scores * copy_switch)], dim=0)
alignments = (alignments + prediction.query_tokens)
all_scores.append(scores)
all_alignments.append(alignments)
gold_sequence = gold_query
loss = torch_utils.compute_loss(gold_sequence, all_scores, all_alignments, get_token_indices, weights=gold_query_weights)
if (not training):
predicted_sequence = torch_utils.get_seq_from_scores(all_scores, all_alignments)
token_accuracy = torch_utils.per_token_accuracy(gold_sequence, predicted_sequence)
fed_sequence = gold_sequence
else:
decoder_results = self.decoder(utterance_final_state, input_hidden_states, schema_states, max_generation_length, input_sequence=input_sequence, previous_queries=previous_queries, previous_query_states=previous_query_states, input_schema=input_schema, snippets=snippets, dropout_amount=self.dropout)
predicted_sequence = decoder_results.sequence
fed_sequence = predicted_sequence
decoder_states = [pred.decoder_state for pred in decoder_results.predictions]
for (token, state) in zip(fed_sequence[:(- 1)], decoder_states[1:]):
if snippet_handler.is_snippet(token):
snippet_length = 0
for snippet in snippets:
if (snippet.name == token):
snippet_length = len(snippet.sequence)
break
assert (snippet_length > 0)
decoder_states.extend([state for _ in range(snippet_length)])
else:
decoder_states.append(state)
return (predicted_sequence, loss, token_accuracy, decoder_states, decoder_results)
def encode_schema_bow_simple(self, input_schema):
schema_states = []
for column_name in input_schema.column_names_embedder_input:
schema_states.append(input_schema.column_name_embedder_bow(column_name, surface_form=False, column_name_token_embedder=self.column_name_token_embedder))
input_schema.set_column_name_embeddings(schema_states)
return schema_states
def encode_schema_self_attention(self, schema_states):
schema_self_attention = self.schema2schema_attention_module(torch.stack(schema_states, dim=0), schema_states).vector
if (schema_self_attention.dim() == 1):
schema_self_attention = schema_self_attention.unsqueeze(1)
residual_schema_states = list(torch.split(schema_self_attention, split_size_or_sections=1, dim=1))
residual_schema_states = [schema_state.squeeze() for schema_state in residual_schema_states]
new_schema_states = [(schema_state + residual_schema_state) for (schema_state, residual_schema_state) in zip(schema_states, residual_schema_states)]
return new_schema_states
def encode_schema(self, input_schema, dropout=False):
schema_states = []
for column_name_embedder_input in input_schema.column_names_embedder_input:
tokens = column_name_embedder_input.split()
if dropout:
(final_schema_state_one, schema_states_one) = self.schema_encoder(tokens, self.column_name_token_embedder, dropout_amount=self.dropout)
else:
(final_schema_state_one, schema_states_one) = self.schema_encoder(tokens, self.column_name_token_embedder)
schema_states.append(final_schema_state_one[1][(- 1)])
input_schema.set_column_name_embeddings(schema_states)
if self.params.use_schema_self_attention:
schema_states = self.encode_schema_self_attention(schema_states)
return schema_states
def get_bert_encoding(self, input_sequence, input_schema, discourse_state, dropout):
(utterance_states, schema_token_states) = utils_bert.get_bert_encoding(self.bert_config, self.model_bert, self.tokenizer, input_sequence, input_schema, bert_input_version=self.params.bert_input_version, num_out_layers_n=1, num_out_layers_h=1)
if self.params.discourse_level_lstm:
utterance_token_embedder = (lambda x: torch.cat([x, discourse_state], dim=0))
else:
utterance_token_embedder = (lambda x: x)
if dropout:
(final_utterance_state, utterance_states) = self.utterance_encoder(utterance_states, utterance_token_embedder, dropout_amount=self.dropout)
else:
(final_utterance_state, utterance_states) = self.utterance_encoder(utterance_states, utterance_token_embedder)
schema_states = []
for schema_token_states1 in schema_token_states:
if dropout:
(final_schema_state_one, schema_states_one) = self.schema_encoder(schema_token_states1, (lambda x: x), dropout_amount=self.dropout)
else:
(final_schema_state_one, schema_states_one) = self.schema_encoder(schema_token_states1, (lambda x: x))
schema_states.append(final_schema_state_one[1][(- 1)])
input_schema.set_column_name_embeddings(schema_states)
if self.params.use_schema_self_attention:
schema_states = self.encode_schema_self_attention(schema_states)
return (final_utterance_state, utterance_states, schema_states)
def get_query_token_embedding(self, output_token, input_schema):
if input_schema:
if (not (self.output_embedder.in_vocabulary(output_token) or input_schema.in_vocabulary(output_token, surface_form=True))):
output_token = 'value'
if self.output_embedder.in_vocabulary(output_token):
output_token_embedding = self.output_embedder(output_token)
else:
output_token_embedding = input_schema.column_name_embedder(output_token, surface_form=True)
else:
output_token_embedding = self.output_embedder(output_token)
return output_token_embedding
def get_utterance_attention(self, final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep):
final_utterance_states_c.append(final_utterance_state[0][0])
final_utterance_states_h.append(final_utterance_state[1][0])
final_utterance_states_c = final_utterance_states_c[(- num_utterances_to_keep):]
final_utterance_states_h = final_utterance_states_h[(- num_utterances_to_keep):]
attention_result = self.utterance_attention_module(final_utterance_states_c[(- 1)], final_utterance_states_c)
final_utterance_state_attention_c = (final_utterance_states_c[(- 1)] + attention_result.vector.squeeze())
attention_result = self.utterance_attention_module(final_utterance_states_h[(- 1)], final_utterance_states_h)
final_utterance_state_attention_h = (final_utterance_states_h[(- 1)] + attention_result.vector.squeeze())
final_utterance_state = ([final_utterance_state_attention_c], [final_utterance_state_attention_h])
return (final_utterance_states_c, final_utterance_states_h, final_utterance_state)
def get_previous_queries(self, previous_queries, previous_query_states, previous_query, input_schema):
previous_queries.append(previous_query)
num_queries_to_keep = min(self.params.maximum_queries, len(previous_queries))
previous_queries = previous_queries[(- num_queries_to_keep):]
query_token_embedder = (lambda query_token: self.get_query_token_embedding(query_token, input_schema))
(_, previous_outputs) = self.query_encoder(previous_query, query_token_embedder, dropout_amount=self.dropout)
assert (len(previous_outputs) == len(previous_query))
previous_query_states.append(previous_outputs)
previous_query_states = previous_query_states[(- num_queries_to_keep):]
return (previous_queries, previous_query_states)
def train_step(self, interaction, max_generation_length, snippet_alignment_probability=1.0):
losses = []
total_gold_tokens = 0
input_hidden_states = []
input_sequences = []
final_utterance_states_c = []
final_utterance_states_h = []
previous_query_states = []
previous_queries = []
decoder_states = []
discourse_state = None
if self.params.discourse_level_lstm:
(discourse_state, discourse_lstm_states) = self._initialize_discourse_states()
discourse_states = []
input_schema = interaction.get_schema()
schema_states = []
if (input_schema and (not self.params.use_bert)):
schema_states = self.encode_schema_bow_simple(input_schema)
for (utterance_index, utterance) in enumerate(interaction.gold_utterances()):
if ((interaction.identifier in LIMITED_INTERACTIONS) and (utterance_index > LIMITED_INTERACTIONS[interaction.identifier])):
break
input_sequence = utterance.input_sequence()
available_snippets = utterance.snippets()
previous_query = utterance.previous_query()
gold_query_weights = utterance.gold_query_weights()
if (snippet_alignment_probability < 1.0):
gold_query = (sql_util.add_snippets_to_query(available_snippets, utterance.contained_entities(), utterance.anonymized_gold_query(), prob_align=snippet_alignment_probability) + [vocab.EOS_TOK])
else:
gold_query = utterance.gold_query()
if (not self.params.use_bert):
if self.params.discourse_level_lstm:
utterance_token_embedder = (lambda token: torch.cat([self.input_embedder(token), discourse_state], dim=0))
else:
utterance_token_embedder = self.input_embedder
(final_utterance_state, utterance_states) = self.utterance_encoder(input_sequence, utterance_token_embedder, dropout_amount=self.dropout)
else:
(final_utterance_state, utterance_states, schema_states) = self.get_bert_encoding(input_sequence, input_schema, discourse_state, dropout=True)
input_hidden_states.extend(utterance_states)
input_sequences.append(input_sequence)
num_utterances_to_keep = min(self.params.maximum_utterances, len(input_sequences))
if self.params.discourse_level_lstm:
(_, discourse_state, discourse_lstm_states) = torch_utils.forward_one_multilayer(self.discourse_lstms, final_utterance_state[1][0], discourse_lstm_states, self.dropout)
if self.params.use_utterance_attention:
(final_utterance_states_c, final_utterance_states_h, final_utterance_state) = self.get_utterance_attention(final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep)
if self.params.state_positional_embeddings:
(utterance_states, flat_sequence) = self._add_positional_embeddings(input_hidden_states, input_sequences)
else:
flat_sequence = []
for utt in input_sequences[(- num_utterances_to_keep):]:
flat_sequence.extend(utt)
snippets = None
if self.params.use_snippets:
if self.params.previous_decoder_snippet_encoding:
snippets = encode_snippets_with_states(available_snippets, decoder_states)
else:
snippets = self._encode_snippets(previous_query, available_snippets, input_schema)
if (self.params.use_previous_query and (len(previous_query) > 0)):
(previous_queries, previous_query_states) = self.get_previous_queries(previous_queries, previous_query_states, previous_query, input_schema)
if ((len(gold_query) <= max_generation_length) and (len(previous_query) <= max_generation_length)):
prediction = self.predict_turn(final_utterance_state, utterance_states, schema_states, max_generation_length, gold_query=gold_query, snippets=snippets, input_sequence=flat_sequence, previous_queries=previous_queries, previous_query_states=previous_query_states, input_schema=input_schema, feed_gold_tokens=True, training=True, gold_query_weights=gold_query_weights)
loss = prediction[1]
decoder_states = prediction[3]
total_gold_tokens += gold_query_weights.count(1.0)
losses.append(loss)
else:
if self.params.previous_decoder_snippet_encoding:
break
continue
torch.cuda.empty_cache()
if losses:
average_loss = (torch.sum(torch.stack(losses)) / total_gold_tokens)
normalized_loss = average_loss
if self.params.reweight_batch:
normalized_loss = ((len(losses) * average_loss) / float(self.params.batch_size))
normalized_loss.backward()
self.trainer.step()
if self.params.fine_tune_bert:
self.bert_trainer.step()
self.zero_grad()
loss_scalar = normalized_loss.item()
else:
loss_scalar = 0.0
return loss_scalar
def predict_with_predicted_queries(self, interaction, max_generation_length, syntax_restrict=True):
syntax_restrict = False
predictions = []
input_hidden_states = []
input_sequences = []
final_utterance_states_c = []
final_utterance_states_h = []
previous_query_states = []
previous_queries = []
discourse_state = None
if self.params.discourse_level_lstm:
(discourse_state, discourse_lstm_states) = self._initialize_discourse_states()
discourse_states = []
input_schema = interaction.get_schema()
schema_states = []
if (input_schema and (not self.params.use_bert)):
schema_states = self.encode_schema_bow_simple(input_schema)
interaction.start_interaction()
while (not interaction.done()):
utterance = interaction.next_utterance()
available_snippets = utterance.snippets()
previous_query = utterance.previous_query()
input_sequence = utterance.input_sequence()
if (not self.params.use_bert):
if self.params.discourse_level_lstm:
utterance_token_embedder = (lambda token: torch.cat([self.input_embedder(token), discourse_state], dim=0))
else:
utterance_token_embedder = self.input_embedder
(final_utterance_state, utterance_states) = self.utterance_encoder(input_sequence, utterance_token_embedder)
else:
(final_utterance_state, utterance_states, schema_states) = self.get_bert_encoding(input_sequence, input_schema, discourse_state, dropout=False)
input_hidden_states.extend(utterance_states)
input_sequences.append(input_sequence)
num_utterances_to_keep = min(self.params.maximum_utterances, len(input_sequences))
if self.params.discourse_level_lstm:
(_, discourse_state, discourse_lstm_states) = torch_utils.forward_one_multilayer(self.discourse_lstms, final_utterance_state[1][0], discourse_lstm_states)
if self.params.use_utterance_attention:
(final_utterance_states_c, final_utterance_states_h, final_utterance_state) = self.get_utterance_attention(final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep)
if self.params.state_positional_embeddings:
(utterance_states, flat_sequence) = self._add_positional_embeddings(input_hidden_states, input_sequences)
else:
flat_sequence = []
for utt in input_sequences[(- num_utterances_to_keep):]:
flat_sequence.extend(utt)
snippets = None
if self.params.use_snippets:
snippets = self._encode_snippets(previous_query, available_snippets, input_schema)
if (self.params.use_previous_query and (len(previous_query) > 0)):
(previous_queries, previous_query_states) = self.get_previous_queries(previous_queries, previous_query_states, previous_query, input_schema)
results = self.predict_turn(final_utterance_state, utterance_states, schema_states, max_generation_length, input_sequence=flat_sequence, previous_queries=previous_queries, previous_query_states=previous_query_states, input_schema=input_schema, snippets=snippets)
predicted_sequence = results[0]
predictions.append(results)
anonymized_sequence = utterance.remove_snippets(predicted_sequence)
if (EOS_TOK in anonymized_sequence):
anonymized_sequence = anonymized_sequence[:(- 1)]
else:
anonymized_sequence = ['select', '*', 'from', 't1']
if (not syntax_restrict):
utterance.set_predicted_query(interaction.remove_snippets(predicted_sequence))
if input_schema:
interaction.add_utterance(utterance, anonymized_sequence, previous_snippets=utterance.snippets(), simple=True)
else:
interaction.add_utterance(utterance, anonymized_sequence, previous_snippets=utterance.snippets(), simple=False)
else:
utterance.set_predicted_query(utterance.previous_query())
interaction.add_utterance(utterance, utterance.previous_query(), previous_snippets=utterance.snippets())
return predictions
def predict_with_gold_queries(self, interaction, max_generation_length, feed_gold_query=False):
predictions = []
input_hidden_states = []
input_sequences = []
final_utterance_states_c = []
final_utterance_states_h = []
previous_query_states = []
previous_queries = []
decoder_states = []
discourse_state = None
if self.params.discourse_level_lstm:
(discourse_state, discourse_lstm_states) = self._initialize_discourse_states()
discourse_states = []
input_schema = interaction.get_schema()
schema_states = []
if (input_schema and (not self.params.use_bert)):
schema_states = self.encode_schema_bow_simple(input_schema)
for utterance in interaction.gold_utterances():
input_sequence = utterance.input_sequence()
available_snippets = utterance.snippets()
previous_query = utterance.previous_query()
if (not self.params.use_bert):
if self.params.discourse_level_lstm:
utterance_token_embedder = (lambda token: torch.cat([self.input_embedder(token), discourse_state], dim=0))
else:
utterance_token_embedder = self.input_embedder
(final_utterance_state, utterance_states) = self.utterance_encoder(input_sequence, utterance_token_embedder, dropout_amount=self.dropout)
else:
(final_utterance_state, utterance_states, schema_states) = self.get_bert_encoding(input_sequence, input_schema, discourse_state, dropout=True)
input_hidden_states.extend(utterance_states)
input_sequences.append(input_sequence)
num_utterances_to_keep = min(self.params.maximum_utterances, len(input_sequences))
if self.params.discourse_level_lstm:
(_, discourse_state, discourse_lstm_states) = torch_utils.forward_one_multilayer(self.discourse_lstms, final_utterance_state[1][0], discourse_lstm_states, self.dropout)
if self.params.use_utterance_attention:
(final_utterance_states_c, final_utterance_states_h, final_utterance_state) = self.get_utterance_attention(final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep)
if self.params.state_positional_embeddings:
(utterance_states, flat_sequence) = self._add_positional_embeddings(input_hidden_states, input_sequences)
else:
flat_sequence = []
for utt in input_sequences[(- num_utterances_to_keep):]:
flat_sequence.extend(utt)
snippets = None
if self.params.use_snippets:
if self.params.previous_decoder_snippet_encoding:
snippets = encode_snippets_with_states(available_snippets, decoder_states)
else:
snippets = self._encode_snippets(previous_query, available_snippets, input_schema)
if (self.params.use_previous_query and (len(previous_query) > 0)):
(previous_queries, previous_query_states) = self.get_previous_queries(previous_queries, previous_query_states, previous_query, input_schema)
prediction = self.predict_turn(final_utterance_state, utterance_states, schema_states, max_generation_length, gold_query=utterance.gold_query(), snippets=snippets, input_sequence=flat_sequence, previous_queries=previous_queries, previous_query_states=previous_query_states, input_schema=input_schema, feed_gold_tokens=feed_gold_query)
decoder_states = prediction[3]
predictions.append(prediction)
return predictions
def spider_single_turn_encoding(self, interaction, max_generation_length):
input_hidden_states = []
input_sequences = []
final_utterance_states_c = []
final_utterance_states_h = []
previous_query_states = []
previous_queries = []
discourse_state = None
if self.params.discourse_level_lstm:
(discourse_state, discourse_lstm_states) = self._initialize_discourse_states()
input_schema = interaction.get_schema()
schema_states = []
if (input_schema and (not self.params.use_bert)):
schema_states = self.encode_schema_bow_simple(input_schema)
interaction.start_interaction()
utterance = interaction.next_utterance()
available_snippets = utterance.snippets()
previous_query = utterance.previous_query()
input_sequence = utterance.input_sequence()
if (not self.params.use_bert):
if self.params.discourse_level_lstm:
utterance_token_embedder = (lambda token: torch.cat([self.input_embedder(token), discourse_state], dim=0))
else:
utterance_token_embedder = self.input_embedder
(final_utterance_state, utterance_states) = self.utterance_encoder(input_sequence, utterance_token_embedder)
else:
(final_utterance_state, utterance_states, schema_states) = self.get_bert_encoding(input_sequence, input_schema, discourse_state, dropout=False)
input_hidden_states.extend(utterance_states)
input_sequences.append(input_sequence)
num_utterances_to_keep = min(self.params.maximum_utterances, len(input_sequences))
if self.params.discourse_level_lstm:
(_, discourse_state, discourse_lstm_states) = torch_utils.forward_one_multilayer(self.discourse_lstms, final_utterance_state[1][0], discourse_lstm_states)
if self.params.use_utterance_attention:
(final_utterance_states_c, final_utterance_states_h, final_utterance_state) = self.get_utterance_attention(final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep)
if self.params.state_positional_embeddings:
(utterance_states, flat_sequence) = self._add_positional_embeddings(input_hidden_states, input_sequences)
else:
flat_sequence = []
for utt in input_sequences[(- num_utterances_to_keep):]:
flat_sequence.extend(utt)
snippets = None
if self.params.use_snippets:
snippets = self._encode_snippets(previous_query, available_snippets, input_schema)
if (self.params.use_previous_query and (len(previous_query) > 0)):
(previous_queries, previous_query_states) = self.get_previous_queries(previous_queries, previous_query_states, previous_query, input_schema)
if self.params.use_encoder_attention:
schema_attention = self.utterance2schema_attention_module(torch.stack(schema_states, dim=0), utterance_states).vector
utterance_attention = self.schema2utterance_attention_module(torch.stack(utterance_states, dim=0), schema_states).vector
if (schema_attention.dim() == 1):
schema_attention = schema_attention.unsqueeze(1)
if (utterance_attention.dim() == 1):
utterance_attention = utterance_attention.unsqueeze(1)
new_schema_states = torch.cat([torch.stack(schema_states, dim=1), schema_attention], dim=0)
schema_states = list(torch.split(new_schema_states, split_size_or_sections=1, dim=1))
schema_states = [schema_state.squeeze() for schema_state in schema_states]
new_input_hidden_states = torch.cat([torch.stack(utterance_states, dim=1), utterance_attention], dim=0)
input_hidden_states = list(torch.split(new_input_hidden_states, split_size_or_sections=1, dim=1))
input_hidden_states = [input_hidden_state.squeeze() for input_hidden_state in input_hidden_states]
if self.params.use_schema_encoder_2:
(final_schema_state, schema_states) = self.schema_encoder_2(schema_states, (lambda x: x), dropout_amount=self.dropout)
(_, input_hidden_states) = self.utterance_encoder_2(input_hidden_states, (lambda x: x), dropout_amount=self.dropout)
return (final_utterance_state, input_hidden_states, schema_states, max_generation_length, snippets, flat_sequence, previous_queries, previous_query_states, input_schema) |
class StringToken(ProgramToken):
def __init__(self, s):
assert isinstance(s, unicode)
self._string = s
def execute(self, env):
return self._string
def return_type(self):
return unicode
def __str__(self):
return 'String({})'.format(repr(self._string))
__repr__ = __str__ |
class GPTNeoXForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class NewtonMethod(optimization_algorithm.OptimizationAlgorithm):
def __init__(self, db: database.Database, optimization_problem: _typing.OptimizationProblem, line_search: ls.LineSearch) -> None:
super().__init__(db, optimization_problem, line_search)
self.hessian_problem = optimization_problem.hessian_problem
self.stepsize = 1.0
self.armijo_stepsize_initial = self.stepsize
self.armijo_broken = False
def run(self) -> None:
while True:
self.compute_gradient()
self.gradient_norm = self.compute_gradient_norm()
if self.convergence_test():
break
self.compute_search_direction()
self.check_for_ascent()
self.evaluate_cost_functional()
self.line_search.perform(self, self.search_direction, self.has_curvature_info)
if (self.line_search_broken and self.has_curvature_info):
for i in range(len(self.gradient)):
self.search_direction[i].vector().vec().aypx(0.0, (- self.gradient[i].vector().vec()))
self.search_direction[i].vector().apply('')
self.has_curvature_info = False
self.line_search_broken = False
self.line_search.perform(self, self.search_direction, self.has_curvature_info)
self.iteration += 1
if self.nonconvergence():
break
def compute_search_direction(self) -> None:
self.search_direction = self.hessian_problem.newton_solve()
self.has_curvature_info = True |
class KnnSampler(_BasicSampler):
def __init__(self, dataset, params, is_training=True, seed=0, return_index=False):
self.num_points_per_sample = 0
self.knn_module = ''
self.max_workers = 64
self.overlap_ratio = 1.0
self.modify_type = None
super(KnnSampler, self).__init__(*[dataset, params, is_training])
self.return_index = return_index
self.random_machine = np.random.RandomState(seed)
self.q = KnnQuery(self.dataset.points, self.knn_module, set_k=self.num_points_per_sample)
self.center_list = (None if self.is_training else self._gen_center_list())
self.modify_func = PointModifier(self.modify_type)
def cal_length(self):
if self.is_training:
return (int((len(self.dataset) / self.num_points_per_sample)) + 1)
else:
return int(((len(self.dataset) / self.num_points_per_sample) / self.overlap_ratio))
def modify_points(self, points, *args, **kwargs):
return self.modify_func(points, center=kwargs['center_point'])
def sample(self, ind, set_random_machine=None, *args, **kwargs):
random_machine = (self.random_machine if (set_random_machine is None) else set_random_machine)
(ind, center_point) = self._sample_index(ind, random_machine)
if (not self.return_index):
(points, colors, labels) = self.dataset[ind]
points_centered = self.modify_points(points, center=center_point)
return (points_centered, points, labels, colors)
else:
(empty_pts_, empty_pts, _, empty_clrs) = _gen_empty_sample(self.num_points_per_sample, self.modify_func.shape, self.dataset.labels.shape[1])
return (empty_pts_, empty_pts, ind, empty_clrs)
def _sample_center_index(self, ind, random_machine):
return random_machine.randint(0, len(self.dataset))
def _sample_index(self, ind, random_machine):
if self.is_training:
center_index = self._sample_center_index(ind, random_machine)
center_point = self.dataset.points[center_index]
else:
center_point = self.center_list[ind]
(_, neighbour_ind) = self.q.search(np.expand_dims(center_point, axis=0), self.num_points_per_sample)
neighbour_ind = neighbour_ind[0]
random_machine.shuffle(neighbour_ind)
return (neighbour_ind, center_point)
def _gen_center_list(self):
centers = o3d.voxel_sampling(self.dataset.points, voxel_size=7)
self.random_machine.shuffle(centers)
if (len(centers) >= len(self)):
return centers[:len(self)]
else:
res_len = (len(self) - len(centers))
random_centers_index = self.random_machine.randint(0, len(self.dataset), res_len)
return np.concatenate([centers, self.dataset.points[random_centers_index]]) |
class Session():
session_id: str
start_time: str
scene: str
chat_history_for_llm: list[tuple]
chat_history_for_display: list[tuple]
chat_counter: int
image_id_to_path: dict[(int, str)] = field(factory=dict)
grounding_result_mesh_path: (str | None) = None
ground_result: (list[tuple[float]] | None) = None
candidate: (list | None) = None
chosen_candidate_id: (int | None) = None
working_scene_name: (str | None) = None
grounding_query: (str | None) = None
ground_truth: (list | None) = None
top_5_objects2scores: (dict | None) = None
center_list: (list | None) = None
box_size_list: (list | None) = None
values_list: (list | None) = None
base_mesh_path: (str | None) = None
candidate_visualization: (list | None) = None
landmark_visualization: (list | None) = None
camera_poses: (list | None) = None
def create(cls):
return Session.create_for_scene(Settings.default_scene)
def create_for_scene(cls, scene: str):
session = cls(session_id=str(uuid.uuid4()), start_time=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), scene=scene, chat_history_for_llm=[], chat_history_for_display=[(None, Settings.INITIAL_MSG_FOR_DISPLAY)], chat_counter=0)
logger.info(f'Creating a new session {session.session_id} with scene {session.scene}.')
session.working_scene_name = scene
return session
def convert_float32(self, obj):
if isinstance(obj, np.float32):
return float(obj)
if isinstance(obj, list):
return [self.convert_float32(item) for item in obj]
if isinstance(obj, tuple):
return tuple((self.convert_float32(item) for item in obj))
if isinstance(obj, dict):
return {key: self.convert_float32(value) for (key, value) in obj.items()}
return obj
def save(self, output_path: str) -> None:
logger.info(f'Saving session {self.session_id} to disk.')
os.makedirs(os.path.join(output_path, self.working_scene_name), exist_ok=True)
structured_data = cattrs.unstructure(self)
structured_data.pop('chat_history_for_display', None)
converted_data = self.convert_float32(structured_data)
with open(os.path.join(output_path, self.working_scene_name, f'{self.session_id}.json'), 'w', encoding='utf-8') as file:
json.dump(converted_data, file, indent=4)
logger.info(f'Session {self.session_id} saved to disk.') |
def random_split(dataset, lengths):
if (sum(lengths) != len(dataset)):
raise ValueError('Sum of input lengths does not equal the length of the input dataset!')
indices = randperm(sum(lengths))
return [Subset(dataset, indices[(offset - length):offset]) for (offset, length) in zip(_accumulate(lengths), lengths)] |
class SpatialAveragePooling(Module):
def __init__(self, kW, kH, dW=1, dH=1, padW=0, padH=0):
super(SpatialAveragePooling, self).__init__()
self.kW = kW
self.kH = kH
self.dW = dW
self.dH = dH
self.padW = padW
self.padH = padH
self.ceil_mode = False
self.count_include_pad = True
self.divide = True
def ceil(self):
self.ceil_mode = True
return self
def floor(self):
self.ceil_mode = False
return self
def setCountIncludePad(self):
self.count_include_pad = True
return self
def setCountExcludePad(self):
self.count_include_pad = False
return self
def updateOutput(self, input):
self._backend.SpatialAveragePooling_updateOutput(self._backend.library_state, input, self.output, self.kW, self.kH, self.dW, self.dH, self.padW, self.padH, self.ceil_mode, self.count_include_pad)
if (not self.divide):
self.output.mul_((self.kW * self.kH))
return self.output
def updateGradInput(self, input, gradOutput):
if (self.gradInput is not None):
self._backend.SpatialAveragePooling_updateGradInput(self._backend.library_state, input, gradOutput, self.gradInput, self.kW, self.kH, self.dW, self.dH, self.padW, self.padH, self.ceil_mode, self.count_include_pad)
if (not self.divide):
self.gradInput.mul_((self.kW * self.kH))
return self.gradInput
def __repr__(self):
s = super(SpatialAveragePooling, self).__repr__()
s += '({}x{}, {}, {}'.format(self.kW, self.kH, self.dW, self.dH)
if ((self.padW or self.padH) and ((self.padW != 0) or (self.padH != 0))):
s += ', {}, {}'.format(self.padW, self.padH)
s += ')'
return s |
def generate_type_hints(fname, decls, namedtuples, is_tensor=False):
if (fname in blocklist):
return []
type_hints = []
dnames = [d['name'] for d in decls]
has_out = ((fname + '_out') in dnames)
if has_out:
decls = [d for d in decls if (d['name'] != (fname + '_out'))]
for decl in decls:
render_kw_only_separator = True
python_args = []
has_tensor_options = ('TensorOptions' in (a['dynamic_type'] for a in decl['arguments']))
for a in decl['arguments']:
if (a['dynamic_type'] != 'TensorOptions'):
if (a.get('kwarg_only', False) and render_kw_only_separator):
python_args.append('*')
render_kw_only_separator = False
try:
python_args.append(arg_to_type_hint(a))
except Exception:
print('Error while processing function {}'.format(fname))
raise
if ('self: Tensor' in python_args):
self_index = python_args.index('self: Tensor')
python_args.remove('self: Tensor')
if is_tensor:
python_args = (['self'] + python_args)
else:
python_args.insert(self_index, 'input: Tensor')
elif is_tensor:
raise Exception('method without self is unexpected')
if has_out:
if render_kw_only_separator:
python_args.append('*')
render_kw_only_separator = False
python_args.append('out: Optional[Tensor]=None')
if has_tensor_options:
if render_kw_only_separator:
python_args.append('*')
render_kw_only_separator = False
python_args += ['dtype: _dtype=None', 'layout: _layout=strided', 'device: Union[_device, str, None]=None', 'requires_grad:_bool=False']
python_args_s = ', '.join(python_args)
python_returns = [type_to_python(r['dynamic_type']) for r in decl['returns']]
field_names = namedtuple_fieldnames(decl)
if field_names:
namedtuple_name = '_'.join((['namedtuple'] + field_names))
tuple_args = ['("{}", {})'.format(name, typ) for (name, typ) in zip(field_names, python_returns)]
namedtuple_def = 'NamedTuple("{}", [{}])'.format(namedtuple_name, ', '.join(tuple_args))
if (namedtuple_name in namedtuples):
assert (namedtuples[namedtuple_name] == namedtuple_def)
else:
namedtuples[namedtuple_name] = namedtuple_def
python_returns_s = namedtuple_name
elif (len(python_returns) > 1):
python_returns_s = (('Tuple[' + ', '.join(python_returns)) + ']')
elif (len(python_returns) == 1):
python_returns_s = python_returns[0]
else:
python_returns_s = 'None'
type_hint = 'def {}({}) -> {}: ...'.format(fname, python_args_s, python_returns_s)
numargs = len(decl['arguments'])
vararg_pos = int(is_tensor)
have_vararg_version = ((numargs > vararg_pos) and (decl['arguments'][vararg_pos]['dynamic_type'] in {'IntArrayRef'}) and ((numargs == (vararg_pos + 1)) or (python_args[(vararg_pos + 1)] == '*')) and ((not is_tensor) or (decl['arguments'][0]['name'] == 'self')))
type_hints.append(type_hint)
if have_vararg_version:
typelist = decl['arguments'][vararg_pos]['dynamic_type']
vararg_type = '_int'
python_args = (((['self'] if is_tensor else []) + [((('*' + decl['arguments'][vararg_pos]['name']) + ': ') + vararg_type)]) + python_args[(vararg_pos + 2):])
python_args_s = ', '.join(python_args)
type_hint = 'def {}({}) -> {}: ...'.format(fname, python_args_s, python_returns_s)
type_hints.append(type_hint)
return type_hints |
class ParallelTextAndMaskCopyingPipeline(ParallelTextAndMaskInputPipeline):
def make_data_provider(self, **kwargs):
target_files = self.params['target_files']
if (not target_files):
target_files = None
return self._get_copying_data_provider(target_files, **kwargs)
def _get_copying_data_provider(self, target_files, **kwargs):
return copying_data_provider.make_word_copying_data_provider(self.params['source_files'], target_files, self.params['decoder_mask_files'], num_epochs=self.params['num_epochs'], shuffle=self.params['shuffle'], source_delimiter=self.params['source_delimiter'], target_delimiter=self.params['target_delimiter'], **kwargs)
def _get_copying_decoder(self, tokens_feature_name, length_feature_name, prepend_token, append_token, delimiter):
return copying_decoder.WordCopyingDecoder(tokens_feature_name=tokens_feature_name, length_feature_name=length_feature_name, prepend_token=prepend_token, append_token=append_token, delimiter=delimiter)
def feature_keys(self):
return set(['source_tokens', 'source_len', 'decoder_mask'])
def label_keys(self):
return set(['target_tokens', 'target_len', 'source_copy_indices']) |
def display_report_metadata(meta: service.Metadata) -> None:
if (meta.ci_environment is not None):
click.secho(f'{meta.ci_environment.verbose_name} detected:', bold=True)
for (key, value) in meta.ci_environment.as_env().items():
if (value is not None):
click.secho(f' -> {key}: {value}')
click.echo()
click.secho(f'Compressed report size: {(meta.size / 1024.0):,.0f} KB', bold=True) |
class SigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha):
super(SigmoidFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, logits, targets, weight=None):
if logits.is_cuda:
loss_func = sigmoid_focal_loss_cuda
else:
loss_func = sigmoid_focal_loss_cpu
loss = loss_func(logits, targets, self.gamma, self.alpha)
if (weight is not None):
loss = (loss * weight)
return loss.sum()
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('gamma=' + str(self.gamma))
tmpstr += (', alpha=' + str(self.alpha))
tmpstr += ')'
return tmpstr |
class UnusedPrimitiveOrCollectionStatementVisitor(StatementVisitor):
def __init__(self):
self._used_references = set()
self._deleted_statement_indexes: set[int] = set()
def deleted_statement_indexes(self) -> set[int]:
return self._deleted_statement_indexes
def _handle_collection_or_primitive(self, stmt) -> None:
if (stmt.ret_val in self._used_references):
self._handle_remaining(stmt)
else:
self._deleted_statement_indexes.add(stmt.get_position())
stmt.test_case.remove_statement(stmt)
def _handle_remaining(self, stmt) -> None:
used = stmt.get_variable_references()
used.discard(stmt.ret_val)
self._used_references.update(used)
def visit_int_primitive_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt)
def visit_float_primitive_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt)
def visit_complex_primitive_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt)
def visit_string_primitive_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt)
def visit_bytes_primitive_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt)
def visit_boolean_primitive_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt)
def visit_enum_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt)
def visit_class_primitive_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt)
def visit_none_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt)
def visit_constructor_statement(self, stmt) -> None:
self._handle_remaining(stmt)
def visit_method_statement(self, stmt) -> None:
self._handle_remaining(stmt)
def visit_function_statement(self, stmt) -> None:
self._handle_remaining(stmt)
def visit_field_statement(self, stmt) -> None:
raise NotImplementedError('No field support yet.')
def visit_assignment_statement(self, stmt) -> None:
raise NotImplementedError('No field support yet.')
def visit_list_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt)
def visit_set_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt)
def visit_tuple_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt)
def visit_dict_statement(self, stmt) -> None:
self._handle_collection_or_primitive(stmt) |
def main():
parser = get_parser()
args = parser.parse_args()
sil_prob = args.sil_prob
surround = args.surround
sil = '<SIL>'
wrd_to_phn = {}
with open(args.lexicon, 'r') as lf:
for line in lf:
items = line.rstrip().split()
assert (len(items) > 1), line
assert (items[0] not in wrd_to_phn), items
wrd_to_phn[items[0]] = items[1:]
for line in sys.stdin:
words = line.strip().split()
if (not all(((w in wrd_to_phn) for w in words))):
continue
phones = []
if surround:
phones.append(sil)
sample_sil_probs = None
if ((sil_prob > 0) and (len(words) > 1)):
sample_sil_probs = np.random.random((len(words) - 1))
for (i, w) in enumerate(words):
phones.extend(wrd_to_phn[w])
if ((sample_sil_probs is not None) and (i < len(sample_sil_probs)) and (sample_sil_probs[i] < sil_prob)):
phones.append(sil)
if surround:
phones.append(sil)
print(' '.join(phones)) |
def debug_training(dataset_path, config_path=None):
with Path(dataset_path).open('r', encoding='utf8') as f:
dataset = json.load(f)
config = None
if (config_path is not None):
with Path(config_path).open('r', encoding='utf8') as f:
config = NLUEngineConfig.from_dict(json.load(f))
engine = SnipsNLUEngine(config).fit(dataset)
while True:
query = input("Enter a query (type 'q' to quit): ").strip()
if isinstance(query, bytes):
query = query.decode('utf8')
if (query == 'q'):
break
print(json.dumps(engine.parse(query), indent=2)) |
def get_system_metadata(repo_root):
import git
return dict(helsinki_git_sha=git.Repo(path=repo_root, search_parent_directories=True).head.object.hexsha, transformers_git_sha=git.Repo(path='', search_parent_directories=True).head.object.hexsha, port_machine=socket.gethostname(), port_time=time.strftime('%Y-%m-%d-%H:%M')) |
class LALR_TraditionalLexer(LALR_WithLexer):
def init_lexer(self):
self.init_traditional_lexer() |
def gen_random_dataframe(nrows: int=30, ncols: int=30, na_ratio: float=0.0, str_col_name_max_len: int=100, random_state: Union[(int, np.random.RandomState)]=0) -> pd.DataFrame:
rand = _resolve_random_state(random_state)
dtypes = ['int', 'float', 'boolean', 'datetime', 'string', 'object']
col_types = rand.choice(dtypes, size=ncols)
series_list = {}
for i in range(ncols):
series = gen_random_series(nrows, dtype=col_types[i], na_ratio=na_ratio, random_state=rand)
series_list[i] = series
df = pd.DataFrame(series_list)
col_names = gen_random_series(size=ncols, dtype='object', na_ratio=0.1, str_max_len=str_col_name_max_len, random_state=rand)
df.columns = col_names
df.index = gen_random_series(df.index.shape[0], na_ratio=0.1, str_max_len=str_col_name_max_len, random_state=rand)
return df |
def dataio_prep(hparams):
data_folder = hparams['data_folder']
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(json_path=hparams['train_annotation'], replacements={'data_root': data_folder})
if (hparams['sorting'] == 'ascending'):
train_data = train_data.filtered_sorted(sort_key='duration')
hparams['dataloader_options']['shuffle'] = False
elif (hparams['sorting'] == 'descending'):
train_data = train_data.filtered_sorted(sort_key='duration', reverse=True)
hparams['dataloader_options']['shuffle'] = False
elif (hparams['sorting'] == 'random'):
pass
else:
raise NotImplementedError('sorting must be random, ascending or descending')
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(json_path=hparams['valid_annotation'], replacements={'data_root': data_folder})
test_data = sb.dataio.dataset.DynamicItemDataset.from_json(json_path=hparams['test_annotation'], replacements={'data_root': data_folder})
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.TextEncoder()
.data_pipeline.takes('wav')
.data_pipeline.provides('sig')
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
.data_pipeline.takes('phn')
.data_pipeline.provides('phn_list', 'phn_encoded')
def text_pipeline(phn):
phn_list = phn.strip().split()
(yield phn_list)
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
(yield phn_encoded)
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
.data_pipeline.takes('ground_truth_phn_ends')
.data_pipeline.provides('phn_ends')
def phn_ends_pipeline(ground_truth_phn_ends):
phn_ends = ground_truth_phn_ends.strip().split()
phn_ends = [int(i) for i in phn_ends]
phn_ends = torch.Tensor(phn_ends)
return phn_ends
sb.dataio.dataset.add_dynamic_item(datasets, phn_ends_pipeline)
label_encoder_file = os.path.join(hparams['save_folder'], 'label_encoder.txt')
if os.path.exists(label_encoder_file):
label_encoder.load(label_encoder_file)
else:
label_encoder.update_from_didataset(train_data, output_key='phn_list')
label_encoder.save(os.path.join(hparams['save_folder'], 'label_encoder.txt'))
sb.dataio.dataset.set_output_keys(datasets, ['id', 'sig', 'phn_encoded', 'phn_ends'])
return (train_data, valid_data, test_data, label_encoder) |
def test_ListOffsetArray_RecordArray_NumpyArray():
a = ak.contents.listoffsetarray.ListOffsetArray(ak.index.Index(np.array([1, 4, 4, 6])), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7]))], ['nest']))
assert (a.to_typetracer().form == a.form)
assert (a.to_typetracer().form.type == a.form.type)
assert (len(a['nest']) == 3)
assert (a.to_typetracer()['nest'].form == a['nest'].form)
with pytest.raises(IndexError):
a['nest'][3]
with pytest.raises(IndexError):
a['nest'][(- 4)]
assert isinstance(a['nest'][2], ak.contents.numpyarray.NumpyArray)
assert (a.to_typetracer()['nest'][2].form == a['nest'][2].form)
assert (len(a['nest'][0]) == 3)
assert (len(a['nest'][1]) == 0)
assert (len(a['nest'][2]) == 2)
assert (len(a['nest'][(- 3)]) == 3)
assert (len(a['nest'][(- 2)]) == 0)
assert (len(a['nest'][(- 1)]) == 2)
assert (a['nest'][0][(- 1)] == 3.3)
assert (a['nest'][2][(- 1)] == 5.5)
assert isinstance(a['nest'][1:], ak.contents.listoffsetarray.ListOffsetArray)
assert (a.to_typetracer()['nest'][1:].form == a['nest'][1:].form)
assert (len(a['nest'][1:]) == 2)
assert (len(a['nest'][(- 2):]) == 2)
assert (len(a['nest'][1:100]) == 2)
assert (len(a['nest'][(- 2):100]) == 2)
with pytest.raises(IndexError):
a['nest']['bad'] |
class AdamP(optimizer_v2.OptimizerV2):
_HAS_AGGREGATE_GRAD = True
def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, weight_decay=0.0, delta=0.1, wd_ratio=0.1, nesterov=False, name='AdamP', **kwargs):
super(AdamP, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self._set_hyper('delta', delta)
self._set_hyper('wd_ratio', wd_ratio)
self.epsilon = (epsilon or backend_config.epsilon())
self.weight_decay = weight_decay
self.nesterov = nesterov
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, 'm')
for var in var_list:
self.add_slot(var, 'v')
for var in var_list:
self.add_slot(var, 'p')
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdamP, self)._prepare_local(var_device, var_dtype, apply_state)
local_step = math_ops.cast((self.iterations + 1), var_dtype)
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
lr = apply_state[(var_device, var_dtype)]['lr_t']
bias_correction1 = (1 - beta_1_power)
bias_correction2 = (1 - beta_2_power)
delta = array_ops.identity(self._get_hyper('delta', var_dtype))
wd_ratio = array_ops.identity(self._get_hyper('wd_ratio', var_dtype))
apply_state[(var_device, var_dtype)].update(dict(lr=lr, epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype), weight_decay=ops.convert_to_tensor_v2(self.weight_decay, var_dtype), beta_1_t=beta_1_t, beta_1_power=beta_1_power, one_minus_beta_1_t=(1 - beta_1_t), beta_2_t=beta_2_t, beta_2_power=beta_2_power, one_minus_beta_2_t=(1 - beta_2_t), bias_correction1=bias_correction1, bias_correction2=bias_correction2, delta=delta, wd_ratio=wd_ratio))
def set_weights(self, weights):
params = self.weights
num_vars = int(((len(params) - 1) / 2))
if (len(weights) == ((3 * num_vars) + 1)):
weights = weights[:len(params)]
super(AdamP, self).set_weights(weights)
def _resource_apply_dense(self, grad, var, apply_state=None):
(var_device, var_dtype) = (var.device, var.dtype.base_dtype)
coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
m_scaled_g_values = (grad * coefficients['one_minus_beta_1_t'])
m_t = state_ops.assign(m, ((m * coefficients['beta_1_t']) + m_scaled_g_values), use_locking=self._use_locking)
v = self.get_slot(var, 'v')
v_scaled_g_values = ((grad * grad) * coefficients['one_minus_beta_2_t'])
v_t = state_ops.assign(v, ((v * coefficients['beta_2_t']) + v_scaled_g_values), use_locking=self._use_locking)
denorm = ((math_ops.sqrt(v_t) / math_ops.sqrt(coefficients['bias_correction2'])) + coefficients['epsilon'])
step_size = (coefficients['lr'] / coefficients['bias_correction1'])
if self.nesterov:
perturb = (((coefficients['beta_1_t'] * m_t) + (coefficients['one_minus_beta_1_t'] * grad)) / denorm)
else:
perturb = (m_t / denorm)
wd_ratio = 1
if (len(var.shape) > 1):
(perturb, wd_ratio) = self._projection(var, grad, perturb, coefficients['delta'], coefficients['wd_ratio'], coefficients['epsilon'])
if (self.weight_decay > 0):
var = state_ops.assign(var, (var * (1 - ((coefficients['lr'] * coefficients['weight_decay']) * wd_ratio))), use_locking=self._use_locking)
var_update = state_ops.assign_sub(var, (step_size * perturb), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
(var_device, var_dtype) = (var.device, var.dtype.base_dtype)
coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
m_scaled_g_values = (grad * coefficients['one_minus_beta_1_t'])
m_t = state_ops.assign(m, (m * coefficients['beta_1_t']), use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
v = self.get_slot(var, 'v')
v_scaled_g_values = ((grad * grad) * coefficients['one_minus_beta_2_t'])
v_t = state_ops.assign(v, (v * coefficients['beta_2_t']), use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
denorm = ((math_ops.sqrt(v_t) / math_ops.sqrt(coefficients['bias_correction2'])) + coefficients['epsilon'])
step_size = (coefficients['lr'] / coefficients['bias_correction1'])
if self.nesterov:
p_scaled_g_values = (grad * coefficients['one_minus_beta_1_t'])
perturb = (m_t * coefficients['beta_1_t'])
perturb = (self._resource_scatter_add(perturb, indices, p_scaled_g_values) / denorm)
else:
perturb = (m_t / denorm)
wd_ratio = 1
if (len(var.shape) > 1):
(perturb, wd_ratio) = self._projection(var, grad, perturb, coefficients['delta'], coefficients['wd_ratio'], coefficients['epsilon'])
if (self.weight_decay > 0):
var = state_ops.assign(var, (var * (1 - ((coefficients['lr'] * coefficients['weight_decay']) * wd_ratio))), use_locking=self._use_locking)
var_update = state_ops.assign_sub(var, (step_size * perturb), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _channel_view(self, x):
return array_ops.reshape(x, shape=[x.shape[0], (- 1)])
def _layer_view(self, x):
return array_ops.reshape(x, shape=[1, (- 1)])
def _cosine_similarity(self, x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
x_norm = (math_ops.euclidean_norm(x, axis=(- 1)) + eps)
y_norm = (math_ops.euclidean_norm(y, axis=(- 1)) + eps)
dot = math_ops.reduce_sum((x * y), axis=(- 1))
return ((math_ops.abs(dot) / x_norm) / y_norm)
def _projection(self, var, grad, perturb, delta, wd_ratio, eps):
cosine_sim = self._cosine_similarity(grad, var, eps, self._channel_view)
cosine_max = math_ops.reduce_max(cosine_sim)
compare_val = (delta / math_ops.sqrt(math_ops.cast(self._channel_view(var).shape[(- 1)], dtype=delta.dtype)))
(perturb, wd) = control_flow_ops.cond(pred=(cosine_max < compare_val), true_fn=(lambda : self.channel_true_fn(var, perturb, wd_ratio, eps)), false_fn=(lambda : self.channel_false_fn(var, grad, perturb, delta, wd_ratio, eps)))
return (perturb, wd)
def channel_true_fn(self, var, perturb, wd_ratio, eps):
expand_size = ([(- 1)] + ([1] * (len(var.shape) - 1)))
var_n = (var / (array_ops.reshape(math_ops.euclidean_norm(self._channel_view(var), axis=(- 1)), shape=expand_size) + eps))
perturb -= (var_n * array_ops.reshape(math_ops.reduce_sum(self._channel_view((var_n * perturb)), axis=(- 1)), shape=expand_size))
wd = wd_ratio
return (perturb, wd)
def channel_false_fn(self, var, grad, perturb, delta, wd_ratio, eps):
cosine_sim = self._cosine_similarity(grad, var, eps, self._layer_view)
cosine_max = math_ops.reduce_max(cosine_sim)
compare_val = (delta / math_ops.sqrt(math_ops.cast(self._layer_view(var).shape[(- 1)], dtype=delta.dtype)))
(perturb, wd) = control_flow_ops.cond((cosine_max < compare_val), true_fn=(lambda : self.layer_true_fn(var, perturb, wd_ratio, eps)), false_fn=(lambda : self.identity_fn(perturb)))
return (perturb, wd)
def layer_true_fn(self, var, perturb, wd_ratio, eps):
expand_size = ([(- 1)] + ([1] * (len(var.shape) - 1)))
var_n = (var / (array_ops.reshape(math_ops.euclidean_norm(self._layer_view(var), axis=(- 1)), shape=expand_size) + eps))
perturb -= (var_n * array_ops.reshape(math_ops.reduce_sum(self._layer_view((var_n * perturb)), axis=(- 1)), shape=expand_size))
wd = wd_ratio
return (perturb, wd)
def identity_fn(self, perturb):
wd = 1.0
return (perturb, wd)
def get_config(self):
config = super(AdamP, self).get_config()
config.update({'learning_rate': self._serialize_hyperparameter('learning_rate'), 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'delta': self._serialize_hyperparameter('delta'), 'wd_ratio': self._serialize_hyperparameter('wd_ratio'), 'epsilon': self.epsilon, 'weight_decay': self.weight_decay, 'nesterov': self.nesterov})
return config |
_numpy_output(check_dtype=True)
def test_ufunc_fmax_ff(A: dace.float32[10], B: dace.float32[10]):
return np.fmax(A, B) |
def test_montage_simple_padding_gray():
(n_images, n_rows, n_cols) = (2, 2, 2)
arr_in = np.arange(((n_images * n_rows) * n_cols))
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
arr_out = montage(arr_in, padding_width=1)
arr_ref = np.array([[3, 3, 3, 3, 3, 3, 3], [3, 0, 1, 3, 4, 5, 3], [3, 2, 3, 3, 6, 7, 3], [3, 3, 3, 3, 3, 3, 3], [3, 3, 3, 3, 3, 3, 3], [3, 3, 3, 3, 3, 3, 3], [3, 3, 3, 3, 3, 3, 3]])
assert_array_equal(arr_out, arr_ref) |
class SampledFeatures(Features):
sampling_strategy: SampleAveragingStrategy
_samples: List[FeaturesValuesLike]
def __init__(self, sampling_strategy: SampleAveragingStrategy=ExplicitSampleAveragingStrategy(), *args, **kwargs) -> None:
self.sampling_strategy = sampling_strategy
self._samples = []
super().__init__(*args, **kwargs)
self._compute_values()
def reset(self) -> None:
super().reset()
self._samples = []
def _compute_values(self) -> None:
self._values = tuple(self.sampling_strategy.sample(self._samples))
def values(self) -> FeaturesValuesLike:
return self._values
def values(self, values: FeaturesValuesLike) -> None:
self.add_sample(values)
def samples(self) -> List[FeaturesValuesLike]:
return list(self._samples)
def nb_samples(self) -> int:
return len(self._samples)
def add_sample(self, sample) -> None:
self._samples.append(sample)
self._compute_values()
def merge(self, other) -> None:
self._samples += other._samples
self._compute_values() |
class TestReadValuesPlainSingle(ReadValuesPlain):
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0] |
.parametrize('lr', [0.0001])
.parametrize('module', [torch.nn.Linear(2, 3)])
def test_adam_factory(lr: float, module: torch.nn.Module) -> None:
factory = AdamFactory()
optim = factory.create(module.named_modules(), lr)
assert isinstance(optim, Adam)
assert (optim.defaults['lr'] == lr)
AdamFactory.deserialize(factory.serialize()) |
class DynamicGlobalWindowTransformer(nn.Module):
def __init__(self, dim, head, FFNdim) -> None:
super(DynamicGlobalWindowTransformer, self).__init__()
self.MHSA = GlobalMHA(dim, head)
self.FFN = FeedForwardNetwork(dim, FFNdim)
self.ln1 = nn.LayerNorm(dim, eps=1e-05)
self.ln2 = nn.LayerNorm(dim, eps=1e-05)
def forward(self, x, mask):
x = x.permute([1, 0, 2])
residual = x
(x1, attn) = self.MHSA(x, mask)
x = (residual + x1)
x = self.ln1(x)
residual = x
x2 = self.FFN(x)
x = self.ln2((residual + x2))
x = x.permute([1, 0, 2])
return (x, attn) |
def render_bboxes_to_img(image, bboxes, color=(255, 0, 0), thickness=5):
im = image.copy()
for bbox in bboxes:
pt1 = (int(bbox[0]), int(bbox[1]))
pt2 = (int(bbox[2]), int(bbox[3]))
cv2.rectangle(im, pt1, pt2, color, thickness)
return im |
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(Flatten(), nn.Linear(gate_channels, (gate_channels // reduction_ratio)), nn.ReLU(), nn.Linear((gate_channels // reduction_ratio), gate_channels))
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if (pool_type == 'avg'):
avg_pool = F.avg_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(avg_pool)
elif (pool_type == 'max'):
max_pool = F.max_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(max_pool)
elif (pool_type == 'lp'):
lp_pool = F.lp_pool2d(x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(lp_pool)
elif (pool_type == 'lse'):
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp(lse_pool)
if (channel_att_sum is None):
channel_att_sum = channel_att_raw
else:
channel_att_sum = (channel_att_sum + channel_att_raw)
scale = F.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).expand_as(x)
return (x * scale) |
class XLNetConfig(PretrainedConfig):
model_type = 'xlnet'
keys_to_ignore_at_inference = ['mems']
attribute_map = {'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'}
def __init__(self, vocab_size=32000, d_model=1024, n_layer=24, n_head=16, d_inner=4096, ff_activation='gelu', untie_r=True, attn_type='bi', initializer_range=0.02, layer_norm_eps=1e-12, dropout=0.1, mem_len=512, reuse_len=None, use_mems_eval=True, use_mems_train=False, bi_data=False, clamp_len=(- 1), same_length=False, summary_type='last', summary_use_proj=True, summary_activation='tanh', summary_last_dropout=0.1, start_n_top=5, end_n_top=5, pad_token_id=5, bos_token_id=1, eos_token_id=2, **kwargs):
self.vocab_size = vocab_size
self.d_model = d_model
self.n_layer = n_layer
self.n_head = n_head
if ((d_model % n_head) != 0):
raise ValueError(f"'d_model % n_head' ({(d_model % n_head)}) should be equal to 0")
if ('d_head' in kwargs):
if (kwargs['d_head'] != (d_model // n_head)):
raise ValueError(f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({(d_model // n_head)})")
self.d_head = (d_model // n_head)
self.ff_activation = ff_activation
self.d_inner = d_inner
self.untie_r = untie_r
self.attn_type = attn_type
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.dropout = dropout
self.mem_len = mem_len
self.reuse_len = reuse_len
self.bi_data = bi_data
self.clamp_len = clamp_len
self.same_length = same_length
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_last_dropout = summary_last_dropout
self.start_n_top = start_n_top
self.end_n_top = end_n_top
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.eos_token_id = eos_token_id
if ('use_cache' in kwargs):
warnings.warn('The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval` instead.', FutureWarning)
use_mems_eval = kwargs['use_cache']
self.use_mems_eval = use_mems_eval
self.use_mems_train = use_mems_train
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
def max_position_embeddings(self):
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.')
return (- 1)
_position_embeddings.setter
def max_position_embeddings(self, value):
raise NotImplementedError(f'The model {self.model_type} is one of the few models that has no sequence length limit.') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.