code stringlengths 281 23.7M |
|---|
class FileOutput(Output):
def __init__(self, file=None, pts=None, split=None):
super().__init__(pts=pts)
self.dead = False
self.fileoutput = file
self._firstframe = True
self._before = None
self._connectiondead = None
self._splitsize = split
def fileoutput(self):
return self._fileoutput
def fileoutput(self, file):
self._split = False
self._firstframe = True
self._needs_close = False
if (file is None):
self._fileoutput = None
else:
if (isinstance(file, str) or isinstance(file, Path)):
self._fileoutput = open(file, 'wb')
self._needs_close = True
elif isinstance(file, io.BufferedIOBase):
self._fileoutput = file
else:
raise RuntimeError('Must pass io.BufferedIOBase')
if (hasattr(self._fileoutput, 'raw') and isinstance(self._fileoutput.raw, socket.SocketIO) and (self._fileoutput.raw._sock.type == socket.SocketKind.SOCK_DGRAM)):
self._split = True
def connectiondead(self):
return self._connectiondead
def connectiondead(self, _callback):
if (isinstance(_callback, types.FunctionType) or (_callback is None)):
self._connectiondead = _callback
else:
raise RuntimeError('Must pass callback function or None')
def outputframe(self, frame, keyframe=True, timestamp=None):
if ((self._fileoutput is not None) and self.recording):
if self._firstframe:
if (not keyframe):
return
else:
self._firstframe = False
self._write(frame, timestamp)
def stop(self):
super().stop()
self.close()
def close(self):
try:
if self._needs_close:
self._fileoutput.close()
except (ConnectionResetError, ConnectionRefusedError, BrokenPipeError) as e:
self.dead = True
if (self._connectiondead is not None):
self._connectiondead(e)
def _write(self, frame, timestamp=None):
try:
if self._split:
maxsize = (65507 if (self._splitsize is None) else self._splitsize)
tosend = len(frame)
off = 0
while (tosend > 0):
lenv = min(tosend, maxsize)
self._fileoutput.write(frame[off:(off + lenv)])
self._fileoutput.flush()
off += lenv
tosend -= lenv
else:
self._fileoutput.write(frame)
self._fileoutput.flush()
self.outputtimestamp(timestamp)
except (ConnectionResetError, ConnectionRefusedError, BrokenPipeError, ValueError) as e:
self.dead = True
if (self._connectiondead is not None):
self._connectiondead(e) |
def test_tag_name(converter: BaseConverter) -> None:
union = Union[(A, B)]
tag_name = 't'
configure_tagged_union(union, converter, tag_name=tag_name)
assert (converter.unstructure(A(1), union) == {tag_name: 'A', 'a': 1})
assert (converter.unstructure(B('1'), union) == {tag_name: 'B', 'a': '1'})
assert (converter.structure({tag_name: 'A', 'a': 1}, union) == A(1))
assert (converter.structure({tag_name: 'B', 'a': 1}, union) == B('1')) |
class LineMaterial(Material):
uniform_type = dict(Material.uniform_type, color='4xf4', thickness='f4')
def __init__(self, color=(1, 1, 1, 1), thickness=2.0, color_mode='auto', map=None, map_interpolation='linear', aa=True, **kwargs):
super().__init__(**kwargs)
self.color = color
self.aa = aa
self.map = map
self.map_interpolation = map_interpolation
self.thickness = thickness
self.color_mode = color_mode
def _wgpu_get_pick_info(self, pick_value):
values = unpack_bitfield(pick_value, wobject_id=20, index=26, coord=18)
return {'vertex_index': values['index'], 'segment_coord': ((values['coord'] - 100000) / 100000.0)}
def color(self):
return Color(self.uniform_buffer.data['color'])
def color(self, color):
color = Color(color)
self.uniform_buffer.data['color'] = color
self.uniform_buffer.update_range(0, 1)
self._store.color_is_transparent = (color.a < 1)
def color_is_transparent(self):
return self._store.color_is_transparent
def aa(self):
return self._store.aa
def aa(self, aa):
self._store.aa = bool(aa)
def color_mode(self):
return self._store.color_mode
_mode.setter
def color_mode(self, value):
if isinstance(value, ColorMode):
pass
elif isinstance(value, str):
if value.startswith('ColorMode.'):
value = value.split('.')[(- 1)]
try:
value = getattr(ColorMode, value.lower())
except AttributeError:
raise ValueError(f"Invalid color_mode: '{value}'")
else:
raise TypeError(f'Invalid color_mode class: {value.__class__.__name__}')
self._store.color_mode = value
def vertex_colors(self):
return (self.color_mode == ColorMode.vertex)
_colors.setter
def vertex_colors(self, value):
raise DeprecationWarning("vertex_colors is deprecated, use ``color_mode='vertex'``")
def thickness(self):
return float(self.uniform_buffer.data['thickness'])
def thickness(self, thickness):
self.uniform_buffer.data['thickness'] = thickness
self.uniform_buffer.update_range(0, 1)
def map(self):
return self._map
def map(self, map):
assert ((map is None) or isinstance(map, Texture))
self._map = map
def map_interpolation(self):
return self._store.map_interpolation
_interpolation.setter
def map_interpolation(self, value):
assert (value in ('nearest', 'linear'))
self._store.map_interpolation = value |
def get_config():
config = get_default_configs()
training = config.training
training.sde = 'vpsde'
training.continuous = False
training.reduce_mean = True
sampling = config.sampling
sampling.method = 'pc'
sampling.predictor = 'ancestral_sampling'
sampling.corrector = 'none'
data = config.data
data.category = 'church_outdoor'
data.centered = True
model = config.model
model.name = 'ddpm'
model.scale_by_sigma = False
model.num_scales = 1000
model.ema_rate = 0.9999
model.normalization = 'GroupNorm'
model.nonlinearity = 'swish'
model.nf = 128
model.ch_mult = (1, 1, 2, 2, 4, 4)
model.num_res_blocks = 2
model.attn_resolutions = (16,)
model.resamp_with_conv = True
model.conditional = True
optim = config.optim
optim.lr = 2e-05
return config |
class WashExecutor(ActionExecutor):
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo, char_index, modify=True, in_place=False):
current_line = script[0]
info.set_current_line(current_line)
node = state.get_state_node(current_line.object())
if (node is None):
info.object_found_error()
elif self.check_washable(state, node, info, char_index):
new_node = node.copy()
new_node.states.discard(State.DIRTY)
new_node.states.add(State.CLEAN)
if modify:
(yield state.change_state([ChangeNode(new_node)], in_place=in_place))
else:
(yield state)
def check_washable(self, state: EnvironmentState, node: GraphNode, info: ExecutionInfo, char_index):
if (not _is_character_close_to(state, node, char_index)):
info.error('{} is not close to {}', _get_character_node(state, char_index), node)
return False
return True |
def load_ref(path):
with open(path) as f:
lines = f.readlines()
(src, tgt, refs) = ([], [], [])
i = 0
while (i < len(lines)):
if lines[i].startswith('S-'):
src.append(lines[i].split('\t')[1].rstrip())
i += 1
elif lines[i].startswith('T-'):
tgt.append(lines[i].split('\t')[1].rstrip())
i += 1
else:
a = []
while ((i < len(lines)) and lines[i].startswith('R')):
a.append(lines[i].split('\t')[1].rstrip())
i += 1
refs.append(a)
return (src, tgt, refs) |
def _etree_to_vdom(node: etree._Element, transforms: Iterable[_ModelTransform]) -> VdomDict:
if (not isinstance(node, etree._Element)):
msg = f'Expected node to be a etree._Element, not {type(node).__name__}'
raise TypeError(msg)
children = _generate_vdom_children(node, transforms)
el = vdom(node.tag, dict(node.items()), *children)
_mutate_vdom(el)
for transform in transforms:
el = transform(el)
return el |
def load_tinynas_net(backbone_cfg):
import ast
struct_str = ''.join([x.strip() for x in backbone_cfg.net_structure_str])
struct_info = ast.literal_eval(struct_str)
for layer in struct_info:
if ('nbitsA' in layer):
del layer['nbitsA']
if ('nbitsW' in layer):
del layer['nbitsW']
model = TinyNAS(structure_info=struct_info, out_indices=backbone_cfg.out_indices, with_spp=backbone_cfg.with_spp, use_focus=backbone_cfg.use_focus, act=backbone_cfg.act, reparam=backbone_cfg.reparam)
return model |
.parametrize('protocol', ['ucx', 'ucxx'])
def test_initialize_ucx_all(protocol):
if (protocol == 'ucx'):
pytest.importorskip('ucp')
elif (protocol == 'ucxx'):
pytest.importorskip('ucxx')
p = mp.Process(target=_test_initialize_ucx_all, args=(protocol,))
p.start()
p.join()
assert (not p.exitcode) |
class CalendarWrapper(hwndwrapper.HwndWrapper):
friendlyclassname = 'Calendar'
windowclasses = ['SysMonthCal32']
has_title = False
place_in_calendar = {'background': win32defines.MCSC_BACKGROUND, 'month_background': win32defines.MCSC_MONTHBK, 'text': win32defines.MCSC_TEXT, 'title_background': win32defines.MCSC_TITLEBK, 'title_text': win32defines.MCSC_TITLETEXT, 'trailing_text': win32defines.MCSC_TRAILINGTEXT}
def __init__(self, hwnd):
super(CalendarWrapper, self).__init__(hwnd)
def get_current_date(self):
remote_mem = RemoteMemoryBlock(self)
system_date = win32structures.SYSTEMTIME()
remote_mem.Write(system_date)
res = self.send_message(win32defines.MCM_GETCURSEL, 0, remote_mem)
remote_mem.Read(system_date)
del remote_mem
if (res == 0):
raise RuntimeError('Failed to get the currently selected date in Calendar')
return system_date
def set_current_date(self, year, month, day_of_week, day):
remote_mem = RemoteMemoryBlock(self)
system_time = win32structures.SYSTEMTIME()
system_time.wYear = year
system_time.wMonth = month
system_time.wDayOfWeek = day_of_week
system_time.wDay = day
system_time.wHour = 0
system_time.wMinute = 0
system_time.wSecond = 0
system_time.wMilliseconds = 0
remote_mem.Write(system_time)
res = self.send_message(win32defines.MCM_SETCURSEL, win32defines.GDT_VALID, remote_mem)
del remote_mem
if (res == 0):
raise RuntimeError('Failed to set the currently selected date in Calendar')
def get_border(self):
return self.send_message(win32defines.MCM_GETCALENDARBORDER, 0, 0)
def set_border(self, border):
self.send_message(win32defines.MCM_SETCALENDARBORDER, True, border)
def count(self):
return self.send_message(win32defines.MCM_GETCALENDARCOUNT, 0, 0)
def get_view(self):
return self.send_message(win32defines.MCM_GETCURRENTVIEW, 0, 0)
def set_view(self, viewType):
res = self.send_message(win32defines.MCM_SETCURRENTVIEW, 0, viewType)
if (res == 0):
raise RuntimeError('Failed to set view in Calendar')
def set_day_states(self, month_states):
remote_mem = RemoteMemoryBlock(self)
day_states = (wintypes.DWORD * len(month_states))(*month_states)
remote_mem.Write(day_states)
res = self.send_message(win32defines.MCM_SETDAYSTATE, len(day_states), remote_mem)
del remote_mem
if (res == 0):
raise RuntimeError('Failed to set the day states in Calendar')
return res
def calc_min_rectangle(self, left, top, right, bottom):
remote_mem = RemoteMemoryBlock(self)
minimized_rect = win32structures.RECT()
minimized_rect.left = left
minimized_rect.top = top
minimized_rect.right = right
minimized_rect.bottom = bottom
remote_mem.Write(minimized_rect)
self.send_message(win32defines.MCM_SIZERECTTOMIN, 0, remote_mem)
remote_mem.Read(minimized_rect)
del remote_mem
return minimized_rect
def hit_test(self, x, y):
remote_mem = RemoteMemoryBlock(self)
hit_test_info = win32structures.MCHITTESTINFO()
point = win32structures.POINT()
point.x = x
point.y = y
hit_test_info.pt = point
hit_test_info.cbSize = ctypes.sizeof(hit_test_info)
remote_mem.Write(hit_test_info)
res = self.send_message(win32defines.MCM_HITTEST, 0, remote_mem)
del remote_mem
return res
def set_id(self, ID):
dict_types = {'gregorian': win32defines.CAL_GREGORIAN, 'gregorian_us': win32defines.CAL_GREGORIAN_US, 'japan': win32defines.CAL_JAPAN, 'taiwan': win32defines.CAL_TAIWAN, 'korea': win32defines.CAL_KOREA, 'hijri': win32defines.CAL_HIJRI, 'thai': win32defines.CAL_THAI, 'hebrew': win32defines.CAL_HEBREW, 'gregorian_me_french': win32defines.CAL_GREGORIAN_ME_FRENCH, 'gregorian_arabic': win32defines.CAL_GREGORIAN_ARABIC, 'gregorian_english_xlit': win32defines.CAL_GREGORIAN_XLIT_ENGLISH, 'gregorian_french_xlit': win32defines.CAL_GREGORIAN_XLIT_FRENCH, 'umalqura': win32defines.CAL_UMALQURA}
if (ID in dict_types):
self.send_message(win32defines.MCM_SETCALID, dict_types[ID], 0)
else:
raise ValueError('Incorrect calendar ID (use one of {0})'.format(dict_types.keys()))
def get_id(self):
return self.send_message(win32defines.MCM_GETCALID, 0, 0)
def set_color(self, place_of_color, red, green, blue):
if (not (0 <= red <= 255)):
raise RuntimeError('Incorrect range of red color, must be from 0 to 255')
if (not (0 <= green <= 255)):
raise RuntimeError('Incorrect range of green color, must be from 0 to 255')
if (not (0 <= blue <= 255)):
raise RuntimeError('Incorrect range of blue color, must be from 0 to 255')
color = (((red << 16) | (green << 8)) | blue)
if (place_of_color in self.place_in_calendar):
result = self.send_message(win32defines.MCM_SETCOLOR, self.place_in_calendar[place_of_color], color)
else:
raise ValueError('Incorrect calendar place ID (use one of {0})'.format(self.place_in_calendar.keys()))
if (result == (- 1)):
raise RuntimeError('Incorrect color')
return result
'\n def get_color(self, place_of_color):\n """\n Return color of place in calendar, which you specify.\n\n Receive only one parameter, which takes variants below:\n \'background\', \'month_background\', \'text\', \'title_background\', \'title_text\', \'trailing_text\'\n """\n\n if place_of_color in self.place_in_calendar:\n return self.send_message(win32defines.MCM_GETCOLOR, self.place_in_calendar[place_of_color], 0)\n else:\n raise ValueError(\'Incorrect calendar place ID (use one of {0})\'.format(self.place_in_calendar.keys()))\n '
def set_today(self, year, month, day):
remote_mem = RemoteMemoryBlock(self)
system_time = win32structures.SYSTEMTIME()
system_time.wYear = year
system_time.wMonth = month
system_time.wDay = day
system_time.wHour = 0
system_time.wMinute = 0
system_time.wSecond = 0
system_time.wMilliseconds = 0
remote_mem.Write(system_time)
res = self.send_message(win32defines.MCM_SETTODAY, 0, remote_mem)
del remote_mem
if (res == 0):
raise RuntimeError('Failed to set today date in Calendar')
def get_today(self):
remote_mem = RemoteMemoryBlock(self)
system_date = win32structures.SYSTEMTIME()
remote_mem.Write(system_date)
res = self.send_message(win32defines.MCM_GETTODAY, 0, remote_mem)
remote_mem.Read(system_date)
del remote_mem
if (res == 0):
raise RuntimeError('Failed to get today date in Calendar')
return system_date
def set_first_weekday(self, dayNum):
self.send_message(win32defines.MCM_SETFIRSTDAYOFWEEK, 0, dayNum)
def get_first_weekday(self):
res = self.send_message(win32defines.MCM_GETFIRSTDAYOFWEEK, 0, 0)
return (win32functions.HiWord(res), win32functions.LoWord(res))
def get_month_delta(self):
return self.send_message(win32defines.MCM_GETMONTHDELTA, 0, 0)
def set_month_delta(self, delta):
if (delta < 0):
raise ValueError('Month delta must be greater than 0')
self.send_message(win32defines.MCM_SETMONTHDELTA, delta, 0)
def get_month_range(self, scope_of_range):
if (scope_of_range not in [win32defines.GMR_DAYSTATE, win32defines.GMR_VISIBLE]):
raise ValueError('scope_of_range value must be one of the following: GMR_DAYSTATE or GMR_VISIBLE')
remote_mem = RemoteMemoryBlock(self)
system_date_arr = (win32structures.SYSTEMTIME * 2)()
system_date_arr[0] = win32structures.SYSTEMTIME()
system_date_arr[1] = win32structures.SYSTEMTIME()
remote_mem.Write(system_date_arr)
res = self.send_message(win32defines.MCM_GETMONTHRANGE, scope_of_range, remote_mem)
remote_mem.Read(system_date_arr)
del remote_mem
return (res, system_date_arr) |
def test_line_dict_parser():
data_ret = [json.dumps({'filename': 'sample1.jpg', 'text': 'hello'}), json.dumps({'filename': 'sample2.jpg', 'text': 'world'})]
keys = ['filename', 'text']
with pytest.raises(AssertionError):
parser = LineJsonParser('filename')
with pytest.raises(AssertionError):
parser = LineJsonParser([])
parser = LineJsonParser(keys)
assert (parser.get_item(data_ret, 0) == {'filename': 'sample1.jpg', 'text': 'hello'})
with pytest.raises(Exception):
parser = LineJsonParser(['img_name', 'text'])
parser.get_item(data_ret, 0) |
def plot_results(model, p, facs, clis=None):
(fig, ax) = plt.subplots(figsize=(6, 6))
(markersize, markersize_factor) = (4, 4)
ax.set_title(model.name, fontsize=15)
cli_points = {}
fac_sites = {}
for (i, dv) in enumerate(model.fac_vars):
if dv.varValue:
dv = facs.loc[(i, 'dv')]
fac_sites[dv] = i
geom = clis.iloc[model.fac2cli[i]]['geometry']
cli_points[dv] = geom
legend_elements = []
facs.plot(ax=ax, fc='brown', marker='*', markersize=80, zorder=8)
_label = f'Facility sites ($n$={len(model.fac_vars)})'
_mkws = dict(marker='*', markerfacecolor='brown', markeredgecolor='brown')
legend_elements.append(mlines.Line2D([], [], ms=7, lw=0, label=_label, **_mkws))
zorder = 4
for (fname, fac) in fac_sites.items():
cset = dv_colors[fname]
geoms = cli_points[fname]
gdf = geopandas.GeoDataFrame(geoms)
gdf.plot(ax=ax, zorder=zorder, ec='k', fc=cset, markersize=(100 * markersize))
_label = f'Demand sites covered by {fname}'
_mkws = dict(markerfacecolor=cset, markeredgecolor='k', ms=(markersize + 7))
legend_elements.append(mlines.Line2D([], [], marker='o', lw=0, label=_label, **_mkws))
ec = 'k'
lw = 2
facs.iloc[[fac]].plot(ax=ax, marker='*', markersize=1000, zorder=9, fc=cset, ec=ec, lw=lw)
_mkws = dict(markerfacecolor=cset, markeredgecolor=ec, markeredgewidth=lw)
legend_elements.append(mlines.Line2D([], [], marker='*', ms=20, lw=0, label=fname, **_mkws))
zorder += 1
markersize -= (markersize_factor / p)
kws = dict(loc='upper left', bbox_to_anchor=(1.05, 0.7))
plt.legend(handles=legend_elements, **kws) |
class ProjectIssueNoteAwardEmojiManager(NoUpdateMixin, RESTManager):
_path = '/projects/{project_id}/issues/{issue_iid}/notes/{note_id}/award_emoji'
_obj_cls = ProjectIssueNoteAwardEmoji
_from_parent_attrs = {'project_id': 'project_id', 'issue_iid': 'issue_iid', 'note_id': 'id'}
_create_attrs = RequiredOptional(required=('name',))
def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> ProjectIssueNoteAwardEmoji:
return cast(ProjectIssueNoteAwardEmoji, super().get(id=id, lazy=lazy, **kwargs)) |
def bind(key, *, info):
model = completionmodel.CompletionModel(column_widths=(20, 60, 20))
data = _bind_current_default(key, info)
if data:
model.add_category(listcategory.ListCategory('Current/Default', data))
cmdlist = util.get_cmd_completions(info, include_hidden=True, include_aliases=True)
model.add_category(listcategory.ListCategory('Commands', cmdlist))
return model |
def admin_required(func):
(func)
def wrapper(*args, **kwargs):
print(current_user)
if (not current_user.is_authenticated):
return abort(401)
if (not current_user.is_administrator):
return abort(403)
return func(*args, **kwargs)
return wrapper |
def match_pattern(string: str, i: int) -> MatchResult[Pattern]:
concs = []
(c, i) = match_conc(string, i)
concs.append(c)
while True:
try:
i = static(string, i, '|')
(c, i) = match_conc(string, i)
concs.append(c)
except NoMatch:
return (Pattern(*concs), i) |
class RBDBContentHandler(ContentHandler):
def __init__(self, library):
ContentHandler.__init__(self)
self._library = library
self._current = None
self._tag = None
self._changed_songs = []
def characters(self, content):
if ((self._current is not None) and (self._tag is not None)):
self._current[self._tag] = content
def startElement(self, name, attrs):
self._tag = None
if ((name == 'entry') and (attrs.get('type') == 'song')):
self._current = {}
elif (name in ('location', 'rating', 'play-count', 'last-played')):
self._tag = name
def endElement(self, name):
self._tag = None
if ((name == 'entry') and (self._current is not None)):
current = self._current
self._current = None
if (len(current) > 1):
uri = current.pop('location', '')
try:
filename = uri2fsn(uri)
except ValueError:
return
self._process_song(normalize_path(filename), current)
def _process_song(self, path, stats):
song = self._library.get(path, None)
if (not song):
return
has_changed = False
if ('rating' in stats):
try:
value = (int(stats['rating']) / 5.0)
except ValueError:
pass
else:
song['~#rating'] = value
has_changed = True
if ('play-count' in stats):
try:
value = int(stats['play-count'])
except ValueError:
pass
else:
song['~#playcount'] = value
has_changed = True
if ('last-played' in stats):
try:
value = int(stats['last-played'])
except ValueError:
pass
else:
if (value > song('~#lastplayed', 0)):
song['~#lastplayed'] = value
has_changed = True
if has_changed:
self._changed_songs.append(song)
def finish(self):
count = len(self._changed_songs)
self._library.changed(self._changed_songs)
self._changed_songs = []
return count |
def parse_gltf_file(file, filename, batch):
if (file is None):
file = pyglet.resource.file(filename, 'r')
elif (file.mode != 'r'):
file.close()
file = pyglet.resource.file(filename, 'r')
try:
data = json.load(file)
except json.JSONDecodeError:
raise ModelDecodeException('Json error. Does not appear to be a valid glTF file.')
finally:
file.close()
if ('asset' not in data):
raise ModelDecodeException('Not a valid glTF file. Asset property not found.')
elif (float(data['asset']['version']) < 2.0):
raise ModelDecodeException('Only glTF 2.0+ models are supported')
buffers = dict()
buffer_views = dict()
accessors = dict()
materials = dict()
for (i, item) in enumerate(data.get('buffers', [])):
buffers[i] = Buffer(item['byteLength'], item['uri'])
for (i, item) in enumerate(data.get('bufferViews', [])):
buffer_index = item['buffer']
buffer = buffers[buffer_index]
offset = item.get('byteOffset', 0)
length = item.get('byteLength')
target = item.get('target')
stride = item.get('byteStride', 1)
buffer_views[i] = BufferView(buffer, offset, length, target, stride)
for (i, item) in enumerate(data.get('accessors', [])):
buf_view_index = item.get('bufferView')
buf_view = buffer_views[buf_view_index]
offset = item.get('byteOffset', 0)
comp_type = item.get('componentType')
count = item.get('count')
maxi = item.get('max')
mini = item.get('min')
acc_type = item.get('type')
sparse = item.get('sparse', None)
accessors[i] = Accessor(buf_view, offset, comp_type, count, maxi, mini, acc_type, sparse)
vertex_lists = []
for mesh_data in data.get('meshes'):
for primitive in mesh_data.get('primitives', []):
indices = None
attribute_list = []
count = 0
for (attribute_type, i) in primitive['attributes'].items():
accessor = accessors[i]
attrib = _attributes[attribute_type]
if (not attrib):
continue
attrib_size = _accessor_type_sizes[accessor.type]
pyglet_type = _pyglet_types[accessor.component_type]
pyglet_fmt = '{0}{1}{2}'.format(attrib, attrib_size, pyglet_type)
count = accessor.count
struct_fmt = (str((count * attrib_size)) + _struct_types[accessor.component_type])
array = struct.unpack(('<' + struct_fmt), accessor.read())
attribute_list.append((pyglet_fmt, array))
if ('indices' in primitive):
indices_index = primitive.get('indices')
accessor = accessors[indices_index]
attrib_size = _accessor_type_sizes[accessor.type]
fmt = (str((accessor.count * attrib_size)) + _struct_types[accessor.component_type])
indices = struct.unpack(('<' + fmt), accessor.read())
diffuse = [1.0, 1.0, 1.0]
ambient = [1.0, 1.0, 1.0]
specular = [1.0, 1.0, 1.0]
emission = [0.0, 0.0, 0.0]
shininess = 100.0
opacity = 1.0
material = Material('Default', diffuse, ambient, specular, emission, shininess, opacity)
group = MaterialGroup(material=material)
if indices:
vlist = batch.add_indexed(count, GL_TRIANGLES, group, indices, *attribute_list)
else:
vlist = batch.add(count, GL_TRIANGLES, group, *attribute_list)
vertex_lists.append(vlist)
return vertex_lists |
def pipeline(task: str=None, model: Optional=None, config: Optional[Union[(str, PretrainedConfig)]]=None, tokenizer: Optional[Union[(str, PreTrainedTokenizer, PreTrainedTokenizerFast)]]=None, feature_extractor: Optional[Union[(str, PreTrainedFeatureExtractor)]]=None, image_processor: Optional[Union[(str, BaseImageProcessor)]]=None, framework: Optional[str]=None, revision: Optional[str]=None, use_fast: bool=True, use_auth_token: Optional[Union[(str, bool)]]=None, device: Optional[Union[(int, str, 'torch.device')]]=None, device_map=None, torch_dtype=None, trust_remote_code: Optional[bool]=None, model_kwargs: Dict[(str, Any)]=None, pipeline_class: Optional[Any]=None, **kwargs) -> Pipeline:
if (model_kwargs is None):
model_kwargs = {}
use_auth_token = model_kwargs.pop('use_auth_token', use_auth_token)
hub_kwargs = {'revision': revision, 'use_auth_token': use_auth_token, 'trust_remote_code': trust_remote_code, '_commit_hash': None}
if ((task is None) and (model is None)):
raise RuntimeError('Impossible to instantiate a pipeline without either a task or a model being specified. Please provide a task class or a model')
if ((model is None) and (tokenizer is not None)):
raise RuntimeError('Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer may not be compatible with the default model. Please provide a PreTrainedModel class or a path/identifier to a pretrained model when providing tokenizer.')
if ((model is None) and (feature_extractor is not None)):
raise RuntimeError('Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class or a path/identifier to a pretrained model when providing feature_extractor.')
if isinstance(model, Path):
model = str(model)
if isinstance(config, str):
config = AutoConfig.from_pretrained(config, _from_pipeline=task, **hub_kwargs, **model_kwargs)
hub_kwargs['_commit_hash'] = config._commit_hash
elif ((config is None) and isinstance(model, str)):
config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs)
hub_kwargs['_commit_hash'] = config._commit_hash
custom_tasks = {}
if ((config is not None) and (len(getattr(config, 'custom_pipelines', {})) > 0)):
custom_tasks = config.custom_pipelines
if ((task is None) and (trust_remote_code is not False)):
if (len(custom_tasks) == 1):
task = list(custom_tasks.keys())[0]
else:
raise RuntimeError(f"We can't infer the task automatically for this model as there are multiple tasks available. Pick one in {', '.join(custom_tasks.keys())}")
if ((task is None) and (model is not None)):
if (not isinstance(model, str)):
raise RuntimeError(f'Inferring the task automatically requires to check the hub with a model_id defined as a `str`.{model} is not a valid model_id.')
task = get_task(model, use_auth_token)
if (task in custom_tasks):
normalized_task = task
(targeted_task, task_options) = clean_custom_task(custom_tasks[task])
if (pipeline_class is None):
if (not trust_remote_code):
raise ValueError('Loading this pipeline requires you to execute the code in the pipeline file in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option `trust_remote_code=True` to remove this error.')
class_ref = targeted_task['impl']
(module_file, class_name) = class_ref.split('.')
pipeline_class = get_class_from_dynamic_module(model, (module_file + '.py'), class_name, revision=revision, use_auth_token=use_auth_token)
else:
(normalized_task, targeted_task, task_options) = check_task(task)
if (pipeline_class is None):
pipeline_class = targeted_task['impl']
if (model is None):
(model, default_revision) = get_default_model_and_revision(targeted_task, framework, task_options)
revision = (revision if (revision is not None) else default_revision)
logger.warning(f'''No model was supplied, defaulted to {model} and revision {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).
Using a pipeline without specifying a model name and revision in production is not recommended.''')
if ((config is None) and isinstance(model, str)):
config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs)
hub_kwargs['_commit_hash'] = config._commit_hash
if (device_map is not None):
if ('device_map' in model_kwargs):
raise ValueError('You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those arguments might conflict, use only one.)')
if (device is not None):
logger.warning('Both `device` and `device_map` are specified. `device` will override `device_map`. You will most likely encounter unexpected behavior. Please remove `device` and keep `device_map`.')
model_kwargs['device_map'] = device_map
if (torch_dtype is not None):
if ('torch_dtype' in model_kwargs):
raise ValueError('You cannot use both `pipeline(... torch_dtype=..., model_kwargs={"torch_dtype":...})` as those arguments might conflict, use only one.)')
model_kwargs['torch_dtype'] = torch_dtype
model_name = (model if isinstance(model, str) else None)
model_classes = {'tf': targeted_task['tf'], 'pt': targeted_task['pt']}
(framework, model) = infer_framework_load_model(model, model_classes=model_classes, config=config, framework=framework, task=task, **hub_kwargs, **model_kwargs)
model_config = model.config
hub_kwargs['_commit_hash'] = model.config._commit_hash
load_tokenizer = ((type(model_config) in TOKENIZER_MAPPING) or (model_config.tokenizer_class is not None))
load_feature_extractor = ((type(model_config) in FEATURE_EXTRACTOR_MAPPING) or (feature_extractor is not None))
load_image_processor = ((type(model_config) in IMAGE_PROCESSOR_MAPPING) or (image_processor is not None))
if (load_image_processor and load_feature_extractor):
load_feature_extractor = False
if ((tokenizer is None) and (not load_tokenizer) and (normalized_task not in NO_TOKENIZER_TASKS) and (model_config.__class__.__name__ in MULTI_MODEL_CONFIGS)):
load_tokenizer = True
if ((image_processor is None) and (not load_image_processor) and (normalized_task not in NO_IMAGE_PROCESSOR_TASKS) and (model_config.__class__.__name__ in MULTI_MODEL_CONFIGS) and (normalized_task != 'automatic-speech-recognition')):
load_image_processor = True
if ((feature_extractor is None) and (not load_feature_extractor) and (normalized_task not in NO_FEATURE_EXTRACTOR_TASKS) and (model_config.__class__.__name__ in MULTI_MODEL_CONFIGS)):
load_feature_extractor = True
if (task in NO_TOKENIZER_TASKS):
load_tokenizer = False
if (task in NO_FEATURE_EXTRACTOR_TASKS):
load_feature_extractor = False
if (task in NO_IMAGE_PROCESSOR_TASKS):
load_image_processor = False
if load_tokenizer:
if (tokenizer is None):
if isinstance(model_name, str):
tokenizer = model_name
elif isinstance(config, str):
tokenizer = config
else:
raise Exception('Impossible to guess which tokenizer to use. Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer.')
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
use_fast = tokenizer[1].pop('use_fast', use_fast)
tokenizer_identifier = tokenizer[0]
tokenizer_kwargs = tokenizer[1]
else:
tokenizer_identifier = tokenizer
tokenizer_kwargs = model_kwargs
tokenizer = AutoTokenizer.from_pretrained(tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs)
if load_image_processor:
if (image_processor is None):
if isinstance(model_name, str):
image_processor = model_name
elif isinstance(config, str):
image_processor = config
elif ((feature_extractor is not None) and isinstance(feature_extractor, BaseImageProcessor)):
image_processor = feature_extractor
else:
raise Exception('Impossible to guess which image processor to use. Please provide a PreTrainedImageProcessor class or a path/identifier to a pretrained image processor.')
if isinstance(image_processor, (str, tuple)):
image_processor = AutoImageProcessor.from_pretrained(image_processor, _from_pipeline=task, **hub_kwargs, **model_kwargs)
if load_feature_extractor:
if (feature_extractor is None):
if isinstance(model_name, str):
feature_extractor = model_name
elif isinstance(config, str):
feature_extractor = config
else:
raise Exception('Impossible to guess which feature extractor to use. Please provide a PreTrainedFeatureExtractor class or a path/identifier to a pretrained feature extractor.')
if isinstance(feature_extractor, (str, tuple)):
feature_extractor = AutoFeatureExtractor.from_pretrained(feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs)
if (feature_extractor._processor_class and feature_extractor._processor_class.endswith('WithLM') and isinstance(model_name, str)):
try:
import kenlm
from pyctcdecode import BeamSearchDecoderCTC
if (os.path.isdir(model_name) or os.path.isfile(model_name)):
decoder = BeamSearchDecoderCTC.load_from_dir(model_name)
else:
language_model_glob = os.path.join(BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, '*')
alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME
allow_patterns = [language_model_glob, alphabet_filename]
decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_patterns=allow_patterns)
kwargs['decoder'] = decoder
except ImportError as e:
logger.warning(f'Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Error: {e}')
if (not is_kenlm_available()):
logger.warning('Try to install `kenlm`: `pip install kenlm')
if (not is_pyctcdecode_available()):
logger.warning('Try to install `pyctcdecode`: `pip install pyctcdecode')
if ((task == 'translation') and model.config.task_specific_params):
for key in model.config.task_specific_params:
if key.startswith('translation'):
task = key
warnings.warn(f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"', UserWarning)
break
if (tokenizer is not None):
kwargs['tokenizer'] = tokenizer
if (feature_extractor is not None):
kwargs['feature_extractor'] = feature_extractor
if (torch_dtype is not None):
kwargs['torch_dtype'] = torch_dtype
if (image_processor is not None):
kwargs['image_processor'] = image_processor
if (device is not None):
kwargs['device'] = device
return pipeline_class(model=model, framework=framework, task=task, **kwargs) |
class Config(object):
NAME = None
GPU_COUNT = 1
IMAGES_PER_GPU = 2
STEPS_PER_EPOCH = 1000
VALIDATION_STEPS = 50
BACKBONE = 'resnet101'
COMPUTE_BACKBONE_SHAPE = None
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
TOP_DOWN_PYRAMID_SIZE = 256
NUM_CLASSES = 1
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
RPN_ANCHOR_STRIDE = 1
RPN_NMS_THRESHOLD = 0.7
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56)
IMAGE_RESIZE_MODE = 'square'
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
IMAGE_MIN_SCALE = 0
MEAN_PIXEL = np.array([123.7, 116.8, 103.9])
TRAIN_ROIS_PER_IMAGE = 200
ROI_POSITIVE_RATIO = 0.33
POOL_SIZE = 7
MASK_POOL_SIZE = 14
MASK_SHAPE = [28, 28]
MAX_GT_INSTANCES = 100
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
DETECTION_MAX_INSTANCES = 100
DETECTION_MIN_CONFIDENCE = 0.7
DETECTION_NMS_THRESHOLD = 0.3
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
WEIGHT_DECAY = 0.0001
LOSS_WEIGHTS = {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0}
USE_RPN_ROIS = True
TRAIN_BN = False
GRADIENT_CLIP_NORM = 5.0
RUN_NAME = None
EDGE_LOSS_SMOOTHING = False
EDGE_LOSS_FILTERS = []
EDGE_LOSS_NORM = 'l2'
EDGE_LOSS_WEIGHT_FACTOR = 1.0
EDGE_LOSS_WEIGHT_ENTROPY = False
def __init__(self):
self.BATCH_SIZE = (self.IMAGES_PER_GPU * self.GPU_COUNT)
if (self.IMAGE_RESIZE_MODE == 'crop'):
self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, 3])
else:
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, 3])
self.IMAGE_META_SIZE = (((((1 + 3) + 3) + 4) + 1) + self.NUM_CLASSES)
def display(self, file=None):
print('\nConfigurations:', file=file)
for a in dir(self):
if ((not a.startswith('__')) and (not callable(getattr(self, a)))):
print('{:30} {}'.format(a, getattr(self, a)), file=file)
print('\n', file=file) |
def getdirinfo(pathtocheck):
cmd = ops.cmd.getDszCommand('dir', path=('"%s"' % os.path.dirname(pathtocheck)), mask=('"%s"' % os.path.basename(pathtocheck)))
obj = cmd.execute()
if cmd.success:
try:
return (obj.diritem[0].fileitem[0].filetimes.accessed.time, obj.diritem[0].fileitem[0].filetimes.created.time, obj.diritem[0].fileitem[0].filetimes.modified.time)
except:
pass
return (None, None, None) |
class AbstractNlg(object):
def __init__(self, domain, complexity):
self.domain = domain
self.complexity = complexity
def generate_sent(self, actions, **kwargs):
raise NotImplementedError('Generate sent is required for NLG')
def sample(self, examples):
return np.random.choice(examples) |
class Bounds():
south_west: Point
north_east: Point
def contains_point(self, point: Point) -> bool:
in_lon = (self.south_west.lon <= point.lon <= self.north_east.lon)
in_lat = (self.south_west.lat <= point.lat <= self.north_east.lat)
return (in_lon and in_lat)
def from_dict(cls, data: Dict) -> 'Bounds':
return cls(Point.from_dict(data['_southWest']), Point.from_dict(data['_northEast'])) |
class Effect8243(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Mining')), 'duration', ship.getModifiedItemAttr('exhumersBonusOreMiningDuration'), skill='Exhumers', **kwargs) |
class PreActBottleneck(nn.Module):
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = (cout or cin)
cmid = (cmid or (cout // 4))
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride)
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if ((stride != 1) or (cin != cout)):
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return (out + residual)
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma']))
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma']))
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma']))
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w)) |
def args_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=50, help='rounds of training')
parser.add_argument('--frac', type=float, default=1.0, help='the fraction of clients: C')
parser.add_argument('--local_ep', type=int, default=5, help='the number of local epochs: E')
parser.add_argument('--local_bs', type=int, default=10, help='local batch size: B')
parser.add_argument('--bs', type=int, default=128, help='test batch size')
parser.add_argument('--lr', type=float, default=0.01, help='learning rate')
parser.add_argument('--momentum', type=float, default=0.5, help='SGD momentum (default: 0.5)')
parser.add_argument('--split', type=str, default='user', help='train-test split type, user or sample')
parser.add_argument('--model', type=str, default='cnn', help='model name')
parser.add_argument('--kernel_num', type=int, default=9, help='number of each kind of kernel')
parser.add_argument('--kernel_sizes', type=str, default='3,4,5', help='comma-separated kernel size to use for convolution')
parser.add_argument('--norm', type=str, default='batch_norm', help='batch_norm, layer_norm, or None')
parser.add_argument('--num_filters', type=int, default=32, help='number of filters for conv nets')
parser.add_argument('--max_pool', type=str, default='True', help='Whether use max pooling rather than strided convolutions')
parser.add_argument('--hyper', type=float, default=0.3, help='hypermeter alpha')
parser.add_argument('--dataset', type=str, default='fashion_mnist', help='name of dataset')
parser.add_argument('--iid', action='store_true', help='whether i.i.d or not')
parser.add_argument('--num_classes', type=int, default=10, help='number of classes')
parser.add_argument('--num_channels', type=int, default=1, help='number of channels of imges')
parser.add_argument('--gpu', type=int, default=(- 1), help='GPU ID, -1 for CPU')
parser.add_argument('--stopping_rounds', type=int, default=10, help='rounds of early stopping')
parser.add_argument('--verbose', action='store_true', help='verbose print')
parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument('--log_level', type=str, default='DEBUG', help='level of logs: DEBUG, INFO, WARNING, ERROR, or CRITICAL')
parser.add_argument('--fade', type=float, default=(- 1), help='static fade coefficient, -1 means dynamic')
parser.add_argument('--dataset_train_size', type=int, default=1500, help='total dataset training size')
parser.add_argument('--test_ip_addr', type=str, default='10.150.187.13', help='ip address used to test local IP')
parser.add_argument('--start_sleep', type=int, default=300, help='sleep for seconds before start train')
parser.add_argument('--exit_sleep', type=int, default=300, help='sleep for seconds before exit python')
parser.add_argument('--poisoning_attackers', nargs='+', default=[])
parser.add_argument('--poisoning_detect_threshold', type=float, default=0.8)
parser.add_argument('--ddos_duration', type=int, default=(- 1))
parser.add_argument('--ddos_no_response_percent', type=float, default=0.9)
args = parser.parse_args()
return args |
_fixtures(WebFixture, SqlAlchemyFixture, ValidationScenarios.with_javascript)
def test_input_validation_cues_javascript_interaction(web_fixture, sql_alchemy_fixture, javascript_validation_scenario):
fixture = javascript_validation_scenario
web_fixture.reahl_server.set_app(web_fixture.new_wsgi_app(child_factory=fixture.Form.factory(), enable_js=False))
browser = fixture.browser
with sql_alchemy_fixture.persistent_test_classes(fixture.ModelObject):
fixture.domain_object = fixture.ModelObject()
Session.add(fixture.domain_object)
browser.open('/')
browser.type(XPath.input_labelled('Some input'), '')
browser.click(XPath.button_labelled('Submit'))
assert (['is-invalid'] == fixture.get_form_group_highlight_marks(browser, index=0))
[error] = fixture.get_form_group_errors(browser, index=0)
assert (error.text == 'Some input is required')
web_fixture.reahl_server.set_app(web_fixture.new_wsgi_app(child_factory=fixture.Form.factory(), enable_js=True))
browser.open('/')
browser.click(XPath.button_labelled('Submit'))
assert (['is-invalid'] == fixture.get_form_group_highlight_marks(browser, index=0))
[error] = fixture.get_form_group_errors(browser, index=0)
assert (error.text == 'Some input is required')
browser.type(XPath.input_labelled('Some input'), 'valid value', trigger_blur=False, wait_for_ajax=False)
browser.press_tab()
def form_group_is_marked_success(index):
return (['is-valid'] == fixture.get_form_group_highlight_marks(browser, index=index))
assert web_fixture.driver_browser.wait_for(form_group_is_marked_success, 0)
assert (not fixture.get_form_group_errors(browser, index=0)) |
class Config():
(TRAIN, DEV, TEST) = range(3)
def __init__(self, dataset_size, shuffle_before_select, dataset_file, simplified, horizon, reward_function_type, use_localhost, stop_action_reward, screen_size):
self.dataset_size = dataset_size
self.shuffle_before_select = shuffle_before_select
self.dataset_file = dataset_file
self.simplified = simplified
self.horizon = horizon
self.reward_function_type = reward_function_type
self.use_localhost = use_localhost
self.stop_action_reward = stop_action_reward
self.screen_size = screen_size
if (dataset_file == 'trainset.json'):
self.data_mode = Config.TRAIN
elif (dataset_file == 'devset.json'):
self.data_mode = Config.DEV
elif ((dataset_file == 'testset.json') or (dataset_file == 'testset1.json')):
self.data_mode = Config.TEST
else:
raise AssertionError(('Unknown dataset file ' + str(dataset_file)))
def parse(file_name):
lines = open(file_name).readlines()
dataset_size = int(lines[0][(lines[0].index(':') + 1):])
shuffle_before_select = (True if (lines[1][(lines[1].index(':') + 1):] == 'true') else False)
dataset_file = lines[2][(lines[2].index(':') + 1):].strip()
simplified = (True if (lines[3][(lines[3].index(':') + 1):] == 'true') else False)
horizon = int(lines[4][(lines[4].index(':') + 1):])
reward_function_type = int(lines[5][(lines[5].index(':') + 1):])
use_localhost = (True if (lines[6][(lines[6].index(':') + 1):] == 'true') else False)
stop_action_reward = (True if (lines[7][(lines[7].index(':') + 1):] == 'true') else False)
screen_size = int(lines[8][(lines[8].index(':') + 1):])
return Config(dataset_size, shuffle_before_select, dataset_file, simplified, horizon, reward_function_type, use_localhost, stop_action_reward, screen_size)
def log_flag(self):
logger.Log.info(('Dataset size: ' + str(self.dataset_size)))
logger.Log.info(('Shuffle before select: ' + str(self.shuffle_before_select)))
logger.Log.info(('Dataset file: ' + str(self.dataset_file)))
logger.Log.info(('Simplified: ' + str(self.simplified)))
logger.Log.info(('Horizon: ' + str(self.horizon)))
logger.Log.info(('Reward function type: ' + str(self.reward_function_type)))
logger.Log.info(('Use localhost: ' + str(self.use_localhost)))
logger.Log.info(('Stop action reward: ' + str(self.stop_action_reward)))
logger.Log.info(('Screen size: ' + str(self.screen_size))) |
class MetricResult(object):
def __init__(self, result: float, meta: Dict[(str, Any)]={}):
self._result = result
self._meta = meta
def result(self):
return self._result
def meta(self):
return self._meta
def __repr__(self):
if self._meta:
meta_str = ','.join([('%s:%s' % (k, v)) for (k, v) in self._meta.items()])
return ('%f (%s)' % (self._result, meta_str))
else:
return ('%f' % self._result) |
def handle_format(self, data, rectype=XL_FORMAT):
DEBUG = 0
bv = self.biff_version
if (rectype == XL_FORMAT2):
bv = min(bv, 30)
if (not self.encoding):
self.derive_encoding()
strpos = 2
if (bv >= 50):
fmtkey = unpack('<H', data[0:2])[0]
else:
fmtkey = self.actualfmtcount
if (bv <= 30):
strpos = 0
self.actualfmtcount += 1
if (bv >= 80):
unistrg = unpack_unicode(data, 2)
else:
unistrg = unpack_string(data, strpos, self.encoding, lenlen=1)
blah = (DEBUG or (self.verbosity >= 3))
if blah:
fprintf(self.logfile, 'FORMAT: count=%d fmtkey=0x%04x (%d) s=%r\n', self.actualfmtcount, fmtkey, fmtkey, unistrg)
is_date_s = self.is_date_format_string(unistrg)
ty = [FGE, FDT][is_date_s]
if (not ((fmtkey > 163) or (bv < 50))):
std_ty = std_format_code_types.get(fmtkey, FUN)
is_date_c = (std_ty == FDT)
if (self.verbosity and (0 < fmtkey < 50) and (is_date_c ^ is_date_s)):
DEBUG = 2
fprintf(self.logfile, 'WARNING *** Conflict between std format key %d and its format string %r\n', fmtkey, unistrg)
if (DEBUG == 2):
fprintf(self.logfile, 'ty: %d; is_date_c: %r; is_date_s: %r; fmt_strg: %r', ty, is_date_c, is_date_s, unistrg)
fmtobj = Format(fmtkey, ty, unistrg)
if blah:
fmtobj.dump(self.logfile, header=('--- handle_format [%d] ---' % ((self.actualfmtcount - 1),)))
self.format_map[fmtkey] = fmtobj
self.format_list.append(fmtobj) |
def __print_size_warning(ow, oh, w, h):
if (not hasattr(__print_size_warning, 'has_printed')):
print(('The image size needs to be a multiple of 4. The loaded image size was (%d, %d), so it was adjusted to (%d, %d). This adjustment will be done to all images whose sizes are not multiples of 4' % (ow, oh, w, h)))
__print_size_warning.has_printed = True |
class VoxelResBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=0.001, momentum=0.01)
self.sparse_shape = (grid_size[::(- 1)] + [1, 0, 0])
self.conv_input = spconv.SparseSequential(spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'), norm_fn(16), nn.ReLU())
block = post_act_block
self.conv1 = spconv.SparseSequential(SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'), SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'))
self.conv2 = spconv.SparseSequential(block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'), SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'))
self.conv3 = spconv.SparseSequential(block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'), SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'))
self.conv4 = spconv.SparseSequential(block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'), SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'))
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(spconv.SparseConv3d(128, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad, bias=False, indice_key='spconv_down2'), norm_fn(128), nn.ReLU())
self.num_point_features = 128
def forward(self, batch_dict):
(voxel_features, voxel_coords) = (batch_dict['voxel_features'], batch_dict['voxel_coords'])
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(features=voxel_features, indices=voxel_coords.int(), spatial_shape=self.sparse_shape, batch_size=batch_size)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
out = self.conv_out(x_conv4)
batch_dict.update({'encoded_spconv_tensor': out, 'encoded_spconv_tensor_stride': 8})
batch_dict.update({'multi_scale_3d_features': {'x_conv1': x_conv1, 'x_conv2': x_conv2, 'x_conv3': x_conv3, 'x_conv4': x_conv4}})
return batch_dict |
def fractional(value: NumberOrString) -> str:
try:
number = float(value)
if (not math.isfinite(number)):
return _format_not_finite(number)
except (TypeError, ValueError):
return str(value)
whole_number = int(number)
frac = Fraction((number - whole_number)).limit_denominator(1000)
numerator = frac.numerator
denominator = frac.denominator
if (whole_number and (not numerator) and (denominator == 1)):
return f'{whole_number:.0f}'
if (not whole_number):
return f'{numerator:.0f}/{denominator:.0f}'
return f'{whole_number:.0f} {numerator:.0f}/{denominator:.0f}' |
def default_profile(monkeypatch):
profile = QtWebEngineCore.QWebEngineProfile()
profile.setter = webenginesettings.ProfileSetter(profile)
monkeypatch.setattr(profile, 'isOffTheRecord', (lambda : False))
monkeypatch.setattr(webenginesettings, 'default_profile', profile)
return profile |
.parametrize('namespace_name, repo_name, tag_names, expected', [('devtable', 'simple', ['latest'], 'latest'), ('devtable', 'simple', ['unknown', 'latest'], 'latest'), ('devtable', 'simple', ['unknown'], None)])
def test_find_matching_tag(namespace_name, repo_name, tag_names, expected, initialized_db):
repo = get_repository(namespace_name, repo_name)
if (expected is not None):
with assert_query_count(1):
found = find_matching_tag(repo, tag_names)
assert (found is not None)
assert (found.name == expected)
assert (not found.lifetime_end_ms)
else:
with assert_query_count(1):
assert (find_matching_tag(repo, tag_names) is None) |
class F9_Firewall(FC3_Firewall):
removedKeywords = FC3_Firewall.removedKeywords
removedAttrs = FC3_Firewall.removedAttrs
def _getParser(self):
op = FC3_Firewall._getParser(self)
op.remove_argument('--high', version=F9)
op.remove_argument('--medium', version=F9)
return op |
class MeanIoU():
def __init__(self, class_indices, ignore_label: int, label_str, name):
self.class_indices = class_indices
self.num_classes = len(class_indices)
self.ignore_label = ignore_label
self.label_str = label_str
self.name = name
def reset(self) -> None:
self.total_seen = torch.zeros(self.num_classes).cuda()
self.total_correct = torch.zeros(self.num_classes).cuda()
self.total_positive = torch.zeros(self.num_classes).cuda()
def _after_step(self, outputs, targets):
outputs = outputs[(targets != self.ignore_label)]
targets = targets[(targets != self.ignore_label)]
for (i, c) in enumerate(self.class_indices):
self.total_seen[i] += torch.sum((targets == c)).item()
self.total_correct[i] += torch.sum(((targets == c) & (outputs == c))).item()
self.total_positive[i] += torch.sum((outputs == c)).item()
def _after_epoch(self):
dist.all_reduce(self.total_seen)
dist.all_reduce(self.total_correct)
dist.all_reduce(self.total_positive)
ious = []
for i in range(self.num_classes):
if (self.total_seen[i] == 0):
ious.append(1)
else:
cur_iou = (self.total_correct[i] / ((self.total_seen[i] + self.total_positive[i]) - self.total_correct[i]))
ious.append(cur_iou.item())
miou = np.mean(ious)
logger = MMLogger.get_current_instance()
logger.info(f'Validation per class iou {self.name}:')
for (iou, label_str) in zip(ious, self.label_str):
logger.info(('%s : %.2f%%' % (label_str, (iou * 100))))
return (miou * 100) |
class KnownValues(unittest.TestCase):
def test_vxc_col(self):
ni = numint2c.NumInt2C()
ni.collinear = 'c'
dm = mf.get_init_guess(mol, 'minao')
(n, e, v) = ni.nr_vxc(mol, mf.grids, 'B88,', dm)
self.assertAlmostEqual(n, 9., 5)
self.assertAlmostEqual(e, (- 8.), 6)
self.assertAlmostEqual(lib.fp(v), (- 2.), 8)
def test_vxc_ncol(self):
ni = numint2c.NumInt2C()
ni.collinear = 'n'
dm = mf.get_init_guess(mol, 'minao')
(n, e, v) = ni.nr_vxc(mol, mf.grids, 'LDA,', dm)
self.assertAlmostEqual(n, 9., 5)
self.assertAlmostEqual(e, (- 7.), 6)
self.assertAlmostEqual(lib.fp(v), ((- 2.) + 0j), 8)
((mcfun is None), 'mcfun library not found.')
def test_vxc_mcol(self):
ni = numint2c.NumInt2C()
ni.collinear = 'm'
ni.spin_samples = 14
dm = mf.get_init_guess(mol, 'minao')
(n, e, v) = ni.nr_vxc(mol, mf.grids, 'LDA,', dm)
self.assertAlmostEqual(n, 9., 6)
self.assertAlmostEqual(e, (- 7.), 6)
self.assertAlmostEqual(lib.fp(v), ((- 2.) + 0j), 8)
(n, e, v) = ni.nr_vxc(mol, mf.grids, 'B88,', dm)
self.assertAlmostEqual(n, 9., 5)
self.assertAlmostEqual(e, (- 8.), 6)
self.assertAlmostEqual(lib.fp(v), ((- 2.) + 0j), 8)
def test_fxc_col(self):
ni = numint2c.NumInt2C()
ni.collinear = 'c'
dm = mf.get_init_guess(mol, 'minao')
np.random.seed(10)
dm1 = np.random.random(dm.shape)
v = ni.nr_fxc(mol, mf.grids, 'B88,', dm, dm1)
self.assertAlmostEqual(lib.fp(v), 1., 6)
((mcfun is None), 'mcfun library not found.')
def test_fxc_mcol(self):
ni = numint2c.NumInt2C()
ni.collinear = 'm'
ni.spin_samples = 14
dm = mf.get_init_guess(mol, 'minao')
np.random.seed(10)
dm1 = np.random.random(dm.shape)
v = ni.nr_fxc(mol, mf.grids, 'LDA,', dm, dm1)
self.assertAlmostEqual(lib.fp(v), (1. + 0j), 6)
v = ni.nr_fxc(mol, mf.grids, 'M06', dm, dm1)
self.assertAlmostEqual(lib.fp(v), (0. + 0j), 6)
def test_get_rho(self):
ni = numint2c.NumInt2C()
ni.collinear = 'c'
dm = mf.get_init_guess(mol, 'minao')
rho = ni.get_rho(mol, dm, mf.grids)
self.assertAlmostEqual(lib.fp(rho), (- 361.), 8)
ni.collinear = 'm'
ni.spin_samples = 50
rho = ni.get_rho(mol, dm, mf.grids)
self.assertAlmostEqual(lib.fp(rho), (- 361.), 8) |
class WaterBigBoxPBE0(unittest.TestCase):
def setUpClass(cls):
cell = gto.Cell()
cell.verbose = 4
cell.output = '/dev/null'
cell.atom = '\n O 0.00000 0.00000 0.11779\n H 0.00000 0.75545 -0.47116\n H 0.00000 -0.75545 -0.47116\n '
cell.spin = 2
cell.a = (np.eye(3) * 15)
cell.basis = 'sto-3g'
cell.build()
xc = 'pbe0'
mf = scf.UKS(cell).set(xc=xc).rs_density_fit(auxbasis='weigend')
mf.with_df.omega = 0.1
mf.kernel()
cls.cell = cell
cls.mf = mf
mol = molgto.Mole()
for key in ['verbose', 'output', 'atom', 'spin', 'basis']:
setattr(mol, key, getattr(cell, key))
mol.build()
molmf = molscf.UKS(mol).set(xc=xc).density_fit(auxbasis=mf.with_df.auxbasis).run()
cls.mol = mol
cls.molmf = molmf
cls.nstates = 5
cls.nstates_test = 2
def tearDownClass(cls):
cls.cell.stdout.close()
cls.mol.stdout.close()
del cls.cell, cls.mf
del cls.mol, cls.molmf
def kernel(self, TD, **kwargs):
td = getattr(self.mf, TD)().set(nstates=self.nstates, **kwargs).run()
moltd = getattr(self.mf, TD)().set(nstates=self.nstates, **kwargs).run()
self.assertTrue((abs(((td.e[:self.nstates_test] * unitev) - (moltd.e[:self.nstates_test] * unitev))).max() < 0.1))
def test_tda(self):
self.kernel('TDA')
def test_tdhf(self):
self.kernel('TDDFT') |
def send_mime_email(mime_msg: MIMEMultipart, mail_from: str, mail_to: str, smtp_host: str, smtp_port: int, smtp_user: str, smtp_password: str, use_ssl: bool=True, use_tls: bool=False):
if use_tls:
server = smtplib.SMTP(smtp_host, smtp_port)
server.starttls()
elif use_ssl:
server = smtplib.SMTP_SSL(smtp_host, smtp_port)
else:
server = smtplib.SMTP(smtp_host, smtp_port)
if (smtp_user and smtp_password):
server.login(smtp_user, smtp_password)
server.sendmail(mail_from, mail_to, mime_msg.as_string())
server.quit() |
def get_initial_pos(nparticles, scale, dtype):
nrows = int((nparticles ** 0.5))
ncols = int(np.ceil((nparticles / nrows)))
x0 = torch.linspace(0, scale, ncols, dtype=dtype)
y0 = torch.linspace(0, scale, nrows, dtype=dtype)
(y, x) = torch.meshgrid(y0, x0)
y = y.reshape((- 1))[:nparticles]
x = x.reshape((- 1))[:nparticles]
pos = torch.cat((x.unsqueeze((- 1)), y.unsqueeze((- 1))), dim=(- 1)).unsqueeze(0)
return pos |
class LatentEncoder(nn.Module):
def __init__(self, num_hidden, num_latent, input_dim, num_self_attention_l):
super(LatentEncoder, self).__init__()
self.input_projection = Linear(input_dim, num_hidden)
self.self_attentions = nn.ModuleList([Attention(num_hidden) for _ in range(num_self_attention_l)])
self.penultimate_layer = Linear(num_hidden, num_hidden, w_init='relu')
self.mu = Linear(num_hidden, num_latent)
self.log_sigma = Linear(num_hidden, num_latent)
def forward(self, x, y):
encoder_input = t.cat([x, y], dim=(- 1))
encoder_input = self.input_projection(encoder_input)
for attention in self.self_attentions:
(encoder_input, _) = attention(encoder_input, encoder_input, encoder_input)
hidden = encoder_input.mean(dim=1)
hidden = t.relu(self.penultimate_layer(hidden))
mu = self.mu(hidden)
log_sigma = self.log_sigma(hidden)
std = t.exp((0.5 * log_sigma))
eps = t.randn_like(std)
z = eps.mul(std).add_(mu)
return (mu, log_sigma, z) |
def zaleplon_with_other_formula() -> GoalDirectedBenchmark:
zaleplon = TanimotoScoringFunction('O=C(C)N(CC)C1=CC=CC(C2=CC=NC3=C(C=NN23)C#N)=C1', fp_type='ECFP4')
formula = IsomerScoringFunction('C19H17N3O2')
specification = uniform_specification(1, 10, 100)
return GoalDirectedBenchmark(name='Zaleplon MPO', objective=GeometricMeanScoringFunction([zaleplon, formula]), contribution_specification=specification) |
def test_select_components():
from reana.reana_dev.utils import select_components
from reana.config import REPO_LIST_ALL, REPO_LIST_CLIENT, REPO_LIST_CLUSTER
for (input_value, output_expected) in ((['reana-job-controller'], ['reana-job-controller']), (['reana-job-controller', 'reana'], ['reana-job-controller', 'reana, ']), (['.'], [os.path.basename(os.getcwd())]), (['CLUSTER'], REPO_LIST_CLUSTER), (['CLIENT'], REPO_LIST_CLIENT), (['ALL'], REPO_LIST_ALL), (['nonsense'], []), (['nonsense', 'reana'], ['reana']), (['ALL', 'reana'], REPO_LIST_ALL), (['CLUSTER', 'reana'], REPO_LIST_CLUSTER), (['ALL', 'CLUSTER', 'reana'], REPO_LIST_ALL)):
output_obtained = select_components(input_value)
assert (output_obtained.sort() == output_expected.sort())
num_excluded = 2
exclude_components = REPO_LIST_CLUSTER[:num_excluded]
output_obtained = select_components(REPO_LIST_CLUSTER, exclude_components)
assert (len(output_obtained) == (len(REPO_LIST_CLUSTER) - num_excluded))
assert (not set(exclude_components).intersection(output_obtained)) |
def _parse_static_node_value(node):
import ast
from collections import OrderedDict
if isinstance(node, ast.Num):
value = node.n
elif isinstance(node, ast.Str):
value = node.s
elif isinstance(node, ast.List):
value = list(map(_parse_static_node_value, node.elts))
elif isinstance(node, ast.Tuple):
value = tuple(map(_parse_static_node_value, node.elts))
elif isinstance(node, ast.Dict):
keys = map(_parse_static_node_value, node.keys)
values = map(_parse_static_node_value, node.values)
value = OrderedDict(zip(keys, values))
elif isinstance(node, ast.NameConstant):
value = node.value
else:
print(node.__dict__)
raise TypeError('Cannot parse a static value from non-static node of type: {!r}'.format(type(node)))
return value |
def test_render_registry_fails():
r = gfx.renderers._base.RenderFunctionRegistry()
r.register(Object1, Material1, foo1)
with raises(TypeError):
r.register(4, Material1, foo1)
with raises(TypeError):
r.register(str, Material1, foo1)
with raises(TypeError):
r.register(Object1, 4, foo1)
with raises(TypeError):
r.register(Object1, str, foo1)
with raises(TypeError):
r.register(Object1, Material1, 'not callable')
with raises(ValueError):
r.register(Object1, Material1, foo1)
assert (len(r._store) == 1)
assert (foo1 is r.get_render_function(Object1(Material1())))
assert (None is r.get_render_function(Object1(None)))
with raises(TypeError):
r.get_render_function(3)
with raises(TypeError):
r.get_render_function(Object1(3)) |
_REGISTRY.register()
class CIFARSTL(DatasetBase):
dataset_dir = 'cifar_stl'
domains = ['cifar', 'stl']
def __init__(self, cfg):
root = osp.abspath(osp.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = osp.join(root, self.dataset_dir)
self.check_input_domains(cfg.DATASET.SOURCE_DOMAINS, cfg.DATASET.TARGET_DOMAINS)
train_x = self._read_data(cfg.DATASET.SOURCE_DOMAINS, split='train')
train_u = self._read_data(cfg.DATASET.TARGET_DOMAINS, split='train')
test = self._read_data(cfg.DATASET.TARGET_DOMAINS, split='test')
super().__init__(train_x=train_x, train_u=train_u, test=test)
def _read_data(self, input_domains, split='train'):
items = []
for (domain, dname) in enumerate(input_domains):
data_dir = osp.join(self.dataset_dir, dname, split)
class_names = listdir_nohidden(data_dir)
for class_name in class_names:
class_dir = osp.join(data_dir, class_name)
imnames = listdir_nohidden(class_dir)
label = int(class_name.split('_')[0])
for imname in imnames:
impath = osp.join(class_dir, imname)
item = Datum(impath=impath, label=label, domain=domain)
items.append(item)
return items |
def _partition_key(number_of_shards=None):
key = None
if (number_of_shards is not None):
shard_number = random.randrange(0, number_of_shards)
key = hashlib.sha1((KINESIS_PARTITION_KEY_PREFIX + str(shard_number)).encode('utf-8')).hexdigest()
else:
key = hashlib.sha1((KINESIS_PARTITION_KEY_PREFIX + str(random.getrandbits(256))).encode('utf-8')).hexdigest()
return key |
def init_sensors():
global SENSORS
SENSORS = {}
LOGGER.debug('Reading sensors configuration...')
if os.path.isfile(os.path.join(CONFIG_PATH, SENSORS_CONFIG_FILE)):
SENSORS = read_yaml_file(os.path.join(CONFIG_PATH, SENSORS_CONFIG_FILE))
sensors_config_file_found = True
else:
LOGGER.info('No sensors config file found.')
sensors_config_file_found = False
for sensor_mac in SENSORS:
if (SENSORS[sensor_mac].get('invert_state') is None):
SENSORS[sensor_mac]['invert_state'] = False
try:
result = WYZESENSE_DONGLE.List()
LOGGER.debug(f'Linked sensors: {result}')
if result:
for sensor_mac in result:
if valid_sensor_mac(sensor_mac):
if (SENSORS.get(sensor_mac) is None):
add_sensor_to_config(sensor_mac, None, None)
else:
LOGGER.warning(f'Sensor list failed with result: {result}')
except TimeoutError:
pass
if (not sensors_config_file_found):
LOGGER.info('Writing Sensors Config File')
write_yaml_file(os.path.join(CONFIG_PATH, SENSORS_CONFIG_FILE), SENSORS)
if CONFIG['hass_discovery']:
for sensor_mac in SENSORS:
if valid_sensor_mac(sensor_mac):
send_discovery_topics(sensor_mac) |
class Z3Visitor():
def __init__(self):
visitor = TransformVisitor()
visitor.register_transform(nodes.FunctionDef, self.set_function_def_z3_constraints)
self.visitor = visitor
def set_function_def_z3_constraints(self, node: nodes.FunctionDef):
types = {}
annotations = node.args.annotations
arguments = node.args.args
for (ann, arg) in zip(annotations, arguments):
if (ann is None):
continue
inferred = ann.inferred()
if ((len(inferred) > 0) and (inferred[0] is not Uninferable)):
if isinstance(inferred[0], nodes.ClassDef):
types[arg.name] = inferred[0].name
preconditions = parse_assertions(node, parse_token='Precondition')
z3_constraints = []
for pre in preconditions:
pre = astroid.parse(pre).body[0]
ew = ExprWrapper(pre, types)
try:
transformed = ew.reduce()
except (Z3Exception, Z3ParseException):
transformed = None
if (transformed is not None):
z3_constraints.append(transformed)
node.z3_constraints = z3_constraints
return node |
class TestGetKeyboardMapping(EndianTest):
def setUp(self):
self.req_args_0 = {'count': 207, 'first_keycode': 169}
self.req_bin_0 = b'e\x00\x00\x02\xa9\xcf\x00\x00'
self.reply_args_0 = {'keysyms': [[, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ]], 'sequence_number': 48346}
self.reply_bin_0 = b"\x01\x03\xbc\xda\x00\x00\x00<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r\xd6.\nJ\x12Wj\x03In1v\xe5\xc6['\xee7\rI\xb3\xb5\x01V\xb3P\xdet:3kz\xa5hQ0\n\x0e\xf42\xc5\x92\x91x\x82\x1b\xc2[\x85\x9a\x10\x19b\xde\xc4\t\\xa3K\xc9\xdc\xdaXf\r\x9djP+\xd3PIz\x95\x1f\x15\xb0H\r\x0b\x1a\x16Y=\x9e\xc8b\xe0\xa7Z&\x9aB\xe2l\xd6h\xacW\x95\xcc\xdaL\xdaI\xa8g\x1b\x8aW\x0f~\xbbte\xea\xc5\xfb}\xee\x9c\x99HpzPd-e#\x05A\xc0\xdf,q,DD0\xff\x0c6\xd5\x174T\xac\xbb1/n\xdd\xdf\x08\xd3\x93\\xc3R\xbbR\xe2\xb2\x86+\xadv\x96\x01\x9e\x95\xe7l\xbe\xe4\xe65\xc4|\x1eiB#\xb6>\xbe\xed\xf6\r\x82L17}\xb1\xd8z\xfb\x16K.\\-vQZ\xdaLr\x1f!Dj\x13\x14\xc1:\xd8\xbd*Z\x89\x16\xfc\x13\x956\xc77\xbe\n\xf5\x16\x8b\xa7\xe4"
def testPackRequest0(self):
bin = request.GetKeyboardMapping._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.GetKeyboardMapping._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0)
def testPackReply0(self):
bin = request.GetKeyboardMapping._reply.to_binary(*(), **self.reply_args_0)
self.assertBinaryEqual(bin, self.reply_bin_0)
def testUnpackReply0(self):
(args, remain) = request.GetKeyboardMapping._reply.parse_binary(self.reply_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.reply_args_0) |
def _load_pre_prompt_dataset(data_path, augment_times):
with open(data_path, 'rb') as f:
data = pickle.load(f)
data = data['observations']
data_dict: Dict[(str, List[Any])] = {'input': [], 'output': [], 'route_descriptors': [], 'vehicle_descriptors': [], 'pedestrian_descriptors': [], 'ego_vehicle_descriptor': []}
for _ in tqdm(range(augment_times), desc='Augment times'):
for d in tqdm(data, desc='Processing data', leave=False):
data_dict['input'].append('')
data_dict['output'].append(make_observation_prompt(d))
_append_descriptors(data_dict, _get_random_obs(d))
for d in data:
data_dict['input'].append('')
data_dict['output'].append(make_observation_prompt(d))
_append_descriptors(data_dict, d)
training_data = Dataset.from_dict(data_dict)
dataset = DatasetDict(train=training_data)
return dataset |
def _init_placeholder(env):
Da = env.action_space.flat_dim
Do = env.observation_space.flat_dim
num_actors = env.num_envs
iteration_ph = get_placeholder(name='iteration', dtype=tf.int64, shape=None)
observations_ph = get_placeholder(name='observations', dtype=tf.float32, shape=(None, Do))
next_observations_ph = get_placeholder(name='next_observations', dtype=tf.float32, shape=(None, Do))
actions_ph = get_placeholder(name='actions', dtype=tf.float32, shape=(None, Da))
next_actions_ph = get_placeholder(name='next_actions', dtype=tf.float32, shape=(None, Da))
rewards_ph = get_placeholder(name='rewards', dtype=tf.float32, shape=(None,))
terminals_ph = get_placeholder(name='terminals', dtype=tf.float32, shape=(None,))
not_best_ph = get_placeholder(name='not_best', dtype=tf.float32, shape=(num_actors,))
beta_ph = get_placeholder(name='beta', dtype=tf.float32, shape=None)
d = {'iteration_ph': iteration_ph, 'observations_ph': observations_ph, 'next_observations_ph': next_observations_ph, 'actions_ph': actions_ph, 'next_actions_ph': next_actions_ph, 'rewards_ph': rewards_ph, 'terminals_ph': terminals_ph, 'not_best_ph': not_best_ph, 'beta_ph': beta_ph}
return d |
def _dfs(graph, room_corners, current, end, path, path_len, all_paths, all_lens, trial_num):
if (current == end):
all_paths.append(path)
all_lens.append(float(path_len))
return
if (path_len > (6 + (trial_num * 5))):
return
elif check_corner_on_path(current, path, room_corners):
return
for neighbour_node in graph[current].keys():
dist = graph[current][neighbour_node]
if ((neighbour_node in path) or (dist == np.inf)):
continue
path = (path + [neighbour_node])
path_len += dist
_dfs(graph, room_corners, neighbour_node, end, path, path_len, all_paths, all_lens, trial_num)
path = path[:(- 1)]
path_len -= dist |
class QlArchX86(QlArchIntel):
type = QL_ARCH.X86
bits = 32
_property
def uc(self) -> Uc:
return Uc(UC_ARCH_X86, UC_MODE_32)
_property
def regs(self) -> QlRegisterManager:
regs_map = dict(**x86_const.reg_map_8, **x86_const.reg_map_16, **x86_const.reg_map_32, **x86_const.reg_map_cr, **x86_const.reg_map_dr, **x86_const.reg_map_st, **x86_const.reg_map_misc)
pc_reg = 'eip'
sp_reg = 'esp'
return QlRegisterManager(self.uc, regs_map, pc_reg, sp_reg)
_property
def disassembler(self) -> Cs:
return Cs(CS_ARCH_X86, CS_MODE_32)
_property
def assembler(self) -> Ks:
return Ks(KS_ARCH_X86, KS_MODE_32) |
def save_checkpoint(ckpt, is_best, save_dir, model_name=''):
if (not osp.exists(save_dir)):
os.makedirs(save_dir)
filename = osp.join(save_dir, (model_name + '.pt'))
torch.save(ckpt, filename)
if is_best:
best_filename = osp.join(save_dir, 'best_ckpt.pt')
shutil.copyfile(filename, best_filename) |
def test_repr_pyobjectsdef_pyfunction_without_associated_resource(project):
code = 'def func(arg): pass'
mod = libutils.get_string_module(project, code)
obj = mod.get_attribute('func').pyobject
assert isinstance(obj, pyobjectsdef.PyFunction)
assert repr(obj).startswith('<rope.base.pyobjectsdef.PyFunction "::func" at 0x') |
def test_multiple_column_references_from_previous_defined_cte():
sql = 'WITH\ncte1 AS (SELECT a, b FROM tab1),\ncte2 AS (SELECT a, max(b) AS b_max, count(b) AS b_cnt FROM cte1 GROUP BY a)\nINSERT INTO tab2\nSELECT cte1.a, cte2.b_max, cte2.b_cnt FROM cte1 JOIN cte2\nWHERE cte1.a = cte2.a'
assert_column_lineage_equal(sql, [(ColumnQualifierTuple('a', 'tab1'), ColumnQualifierTuple('a', 'tab2')), (ColumnQualifierTuple('b', 'tab1'), ColumnQualifierTuple('b_max', 'tab2')), (ColumnQualifierTuple('b', 'tab1'), ColumnQualifierTuple('b_cnt', 'tab2'))]) |
def _download_url(url):
req = urllib.request.Request(url, headers={'User-Agent': 'Quay (External Library Downloader)'})
for index in range(0, MAX_RETRY_COUNT):
try:
response = urllib.request.urlopen(req)
return response.read()
except urllib.error.URLError:
logger.exception('Got exception when trying to download URL %s (try #%s)', url, (index + 1))
raise Exception('Aborted due to maximum retries reached') |
class AnsiStatusFormatter(object):
def __init__(self):
self._colourMap = ansi.ColourMap()
def __call__(self, status, options):
colour = self._colourMap.colourFor(status['user']['screen_name'])
return ('%s%s% 16s%s %s ' % (get_time_string(status, options), ansiFormatter.cmdColour(colour), status['user']['screen_name'], ansiFormatter.cmdReset(), align_text(replaceInStatus(correctRTStatus(status))))) |
class AlexNetV1(_AlexNet):
output_stride = 8
def __init__(self):
super(AlexNetV1, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(3, 96, 11, 2), _BatchNorm2d(96), nn.ReLU(inplace=True), nn.MaxPool2d(3, 2))
self.conv2 = nn.Sequential(nn.Conv2d(96, 256, 5, 1, groups=2), _BatchNorm2d(256), nn.ReLU(inplace=True), nn.MaxPool2d(3, 2))
self.conv3 = nn.Sequential(nn.Conv2d(256, 384, 3, 1), _BatchNorm2d(384), nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(nn.Conv2d(384, 384, 3, 1, groups=2), _BatchNorm2d(384), nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(nn.Conv2d(384, 256, 3, 1, groups=2)) |
class FrontierState(BaseState):
computation_class: Type[ComputationAPI] = FrontierComputation
transaction_context_class: Type[TransactionContextAPI] = FrontierTransactionContext
account_db_class: Type[AccountDatabaseAPI] = AccountDB
transaction_executor_class: Type[TransactionExecutorAPI] = FrontierTransactionExecutor |
class memoized(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args, **kwargs):
key = cPickle.dumps((args, kwargs))
try:
return self.cache[key]
except KeyError:
value = self.func(*args, **kwargs)
self.cache[key] = value
return value
def __repr__(self):
return self.func.__doc__
def __get__(self, obj, objtype):
return functools.partial(self.__call__, obj) |
def main(args):
pdf = get_input(args)
toc = pdf.get_toc(max_depth=args.max_depth)
for item in toc:
state = ('*' if (item.n_kids == 0) else ('-' if item.is_closed else '+'))
target = ('?' if (item.page_index is None) else (item.page_index + 1))
print(((' ' * item.level) + ('[%s] %s -> %s # %s %s' % (state, item.title, target, pdfium_i.ViewmodeToStr.get(item.view_mode), round_list(item.view_pos, args.n_digits))))) |
def test_pycodestyle(workspace):
doc = Document(DOC_URI, workspace, DOC)
diags = pycodestyle_lint.pylsp_lint(workspace, doc)
assert all(((d['source'] == 'pycodestyle') for d in diags))
msg = 'W191 indentation contains tabs'
mod_import = [d for d in diags if (d['message'] == msg)][0]
assert (mod_import['code'] == 'W191')
assert (mod_import['severity'] == lsp.DiagnosticSeverity.Warning)
assert (mod_import['range']['start'] == {'line': 3, 'character': 0})
assert (mod_import['range']['end'] == {'line': 3, 'character': 6})
msg = 'W391 blank line at end of file'
mod_import = [d for d in diags if (d['message'] == msg)][0]
assert (mod_import['code'] == 'W391')
assert (mod_import['severity'] == lsp.DiagnosticSeverity.Warning)
assert (mod_import['range']['start'] == {'line': 10, 'character': 0})
assert (mod_import['range']['end'] == {'line': 10, 'character': 1})
msg = "E201 whitespace after '('"
mod_import = [d for d in diags if (d['message'] == msg)][0]
assert (mod_import['code'] == 'E201')
assert (mod_import['severity'] == lsp.DiagnosticSeverity.Warning)
assert (mod_import['range']['start'] == {'line': 2, 'character': 10})
assert (mod_import['range']['end'] == {'line': 2, 'character': 14})
msg = 'E128 continuation line under-indented for visual indent'
mod_import = [d for d in diags if (d['message'] == msg)][0]
assert (mod_import['code'] == 'E128')
assert (mod_import['severity'] == lsp.DiagnosticSeverity.Warning)
assert (mod_import['range']['start'] == {'line': 5, 'character': 1})
assert (mod_import['range']['end'] == {'line': 5, 'character': 10}) |
def infer_type_arguments(type_vars: Sequence[TypeVarLikeType], template: Type, actual: Type, is_supertype: bool=False, skip_unsatisfied: bool=False) -> list[(Type | None)]:
constraints = infer_constraints(template, actual, (SUPERTYPE_OF if is_supertype else SUBTYPE_OF))
return solve_constraints(type_vars, constraints, skip_unsatisfied=skip_unsatisfied)[0] |
.parametrize('blocking_enabled, method', [(True, 'auto'), (True, 'adblock'), (False, 'auto'), (False, 'adblock'), (False, 'both'), (False, 'hosts')])
def test_disabled_blocking_update(config_stub, tmp_path, caplog, host_blocker_factory, blocking_enabled, method):
if (blocking_enabled and (method == 'auto')):
pytest.importorskip('adblock')
config_stub.val.content.blocking.hosts.lists = generic_blocklists(tmp_path)
config_stub.val.content.blocking.enabled = blocking_enabled
config_stub.val.content.blocking.method = method
host_blocker = host_blocker_factory()
downloads = host_blocker.adblock_update()
while downloads._in_progress:
current_download = downloads._in_progress[0]
with caplog.at_level(logging.ERROR):
current_download.successful = True
current_download.finished.emit()
host_blocker.read_hosts()
for str_url in URLS_TO_CHECK:
assert (not host_blocker._is_blocked(QUrl(str_url))) |
def load_optimizer_state(optimizer: torch.optim.Optimizer, flat_metadata: Dict, flat_tensors: Sequence[torch.Tensor]):
flat_optimizer_state = []
for elem in flat_metadata:
if ((elem.get('type') == 'tensor') and isinstance(elem.get('index'), int)):
flat_optimizer_state.append(flat_tensors[elem['index']])
elif ((elem.get('type') == 'value') and ('value' in elem)):
flat_optimizer_state.append(elem['value'])
with torch.no_grad():
return optimizer.load_state_dict(nested_pack(flat_optimizer_state, structure=optimizer.state_dict())) |
def simxGetCollectionHandle(clientID, collectionName, operationMode):
handle = ct.c_int()
if ((sys.version_info[0] == 3) and (type(collectionName) is str)):
collectionName = collectionName.encode('utf-8')
return (c_GetCollectionHandle(clientID, collectionName, ct.byref(handle), operationMode), handle.value) |
_module()
class MobileNetV2(nn.Module):
arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4], [6, 96, 3], [6, 160, 3], [6, 320, 1]]
def __init__(self, widen_factor=1.0, strides=(1, 2, 2, 2, 1, 2, 1), dilations=(1, 1, 1, 1, 1, 1, 1), out_indices=(1, 2, 4, 6), frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6'), norm_eval=False, with_cp=False):
super(MobileNetV2, self).__init__()
self.widen_factor = widen_factor
self.strides = strides
self.dilations = dilations
assert (len(strides) == len(dilations) == len(self.arch_settings))
self.out_indices = out_indices
for index in out_indices:
if (index not in range(0, 7)):
raise ValueError(f'the item in out_indices must in range(0, 8). But received {index}')
if (frozen_stages not in range((- 1), 7)):
raise ValueError(f'frozen_stages must be in range(-1, 7). But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible((32 * widen_factor), 8)
self.conv1 = ConvModule(in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.layers = []
for (i, layer_cfg) in enumerate(self.arch_settings):
(expand_ratio, channel, num_blocks) = layer_cfg
stride = self.strides[i]
dilation = self.dilations[i]
out_channels = make_divisible((channel * widen_factor), 8)
inverted_res_layer = self.make_layer(out_channels=out_channels, num_blocks=num_blocks, stride=stride, dilation=dilation, expand_ratio=expand_ratio)
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
def make_layer(self, out_channels, num_blocks, stride, dilation, expand_ratio):
layers = []
for i in range(num_blocks):
layers.append(InvertedResidual(self.in_channels, out_channels, (stride if (i == 0) else 1), expand_ratio=expand_ratio, dilation=(dilation if (i == 0) else 1), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
outs = []
for (i, layer_name) in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if (i in self.out_indices):
outs.append(x)
if (len(outs) == 1):
return outs[0]
else:
return tuple(outs)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() |
class nnUNetTrainer_5epochs(nnUNetTrainer):
def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool=True, device: torch.device=torch.device('cuda')):
super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
self.num_epochs = 5 |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
setup_multi_processes(cfg)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
elif (cfg.get('work_dir', None) is None):
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
if (args.gpus is not None):
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support single GPU mode in non-distributed training. Use `gpus=1` now.')
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. Because we only support single GPU mode in non-distributed training. Use the first GPU in `gpu_ids` now.')
if ((args.gpus is None) and (args.gpu_ids is None)):
cfg.gpu_ids = [args.gpu_id]
if args.autoscale_lr:
cfg.optimizer['lr'] = ((cfg.optimizer['lr'] * len(cfg.gpu_ids)) / 8)
if (args.launcher == 'none'):
distributed = False
if (len(cfg.gpu_ids) > 1):
warnings.warn(f'We treat {cfg.gpu_ids} as gpu-ids, and reset to {cfg.gpu_ids[0:1]} as gpu-ids to avoid potential error in non-distribute training time.')
cfg.gpu_ids = cfg.gpu_ids[0:1]
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(_, world_size) = get_dist_info()
cfg.gpu_ids = range(world_size)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
logger.info(f'Distributed training: {distributed}')
logger.info(f'''Config:
{cfg.pretty_text}''')
seed = init_random_seed(args.seed)
seed = ((seed + dist.get_rank()) if args.diff_seed else seed)
logger.info(f'Set random seed to {seed}, deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
model = build_posenet(cfg.model)
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmpose_version=(__version__ + get_git_hash(digits=7)), config=cfg.pretty_text)
train_model(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta) |
class Planner(object):
def __init__(self, city_name):
self._city_track = city_track.CityTrack(city_name)
self._commands = []
def get_next_command(self, source, source_ori, target, target_ori):
track_source = self._city_track.project_node(source)
track_target = self._city_track.project_node(target)
if self._city_track.is_at_goal(track_source, track_target):
return REACH_GOAL
if (self._city_track.is_at_new_node(track_source) and self._city_track.is_away_from_intersection(track_source)):
route = self._city_track.compute_route(track_source, source_ori, track_target, target_ori)
if (route is None):
raise RuntimeError('Impossible to find route')
self._commands = self._route_to_commands(route)
if self._city_track.is_far_away_from_route_intersection(track_source):
return LANE_FOLLOW
elif self._commands:
return self._commands[0]
else:
return LANE_FOLLOW
else:
if self._city_track.is_far_away_from_route_intersection(track_source):
return LANE_FOLLOW
if self._commands:
return self._commands[0]
else:
return LANE_FOLLOW
def get_shortest_path_distance(self, source, source_ori, target, target_ori):
distance = 0
track_source = self._city_track.project_node(source)
track_target = self._city_track.project_node(target)
current_pos = track_source
route = self._city_track.compute_route(track_source, source_ori, track_target, target_ori)
if (route is None):
return 0.0
for node_iter in route:
distance += sldist(node_iter, current_pos)
current_pos = node_iter
return ((distance * self._city_track.get_pixel_density()) * self._city_track.get_node_density())
def is_there_posible_route(self, source, source_ori, target, target_ori):
track_source = self._city_track.project_node(source)
track_target = self._city_track.project_node(target)
return (not (self._city_track.compute_route(track_source, source_ori, track_target, target_ori) is None))
def test_position(self, source):
node_source = self._city_track.project_node(source)
return self._city_track.is_away_from_intersection(node_source)
def _route_to_commands(self, route):
commands_list = []
for i in range(0, len(route)):
if (route[i] not in self._city_track.get_intersection_nodes()):
continue
current = route[i]
past = route[(i - 1)]
future = route[(i + 1)]
past_to_current = np.array([(current[0] - past[0]), (current[1] - past[1])])
current_to_future = np.array([(future[0] - current[0]), (future[1] - current[1])])
angle = signal(current_to_future, past_to_current)
if (angle < (- 0.1)):
command = TURN_RIGHT
elif (angle > 0.1):
command = TURN_LEFT
else:
command = GO_STRAIGHT
commands_list.append(command)
return commands_list |
def test_simple_for_with_surrounding_blocks() -> None:
src = '\n n = 10\n for i in range(n):\n print(i - 1)\n else:\n print(i + 1)\n print(i)\n '
cfg = build_cfg(src)
expected_blocks = [['n = 10', 'range(n)'], ['i'], ['print(i - 1)'], ['print(i + 1)'], ['print(i)'], []]
assert (expected_blocks == _extract_blocks(cfg))
expected_edges = [[['n = 10', 'range(n)'], ['i']], [['i'], ['print(i - 1)']], [['print(i - 1)'], ['i']], [['i'], ['print(i + 1)']], [['print(i + 1)'], ['print(i)']], [['print(i)'], []]]
assert (expected_edges == _extract_edges(cfg)) |
class cvae_model_parser(fvae_model_parser):
def __init__(self, model_config_path):
super(cvae_model_parser, self).__init__(model_config_path)
self.config['conv_enc'] = parse_raw_conv_str(self.parser.get('model', 'conv_enc', ''))
def write_config(config, f):
super(cvae_model_parser, cvae_model_parser).write_config(config, f)
f.write('\n')
for key in ['conv_enc']:
f.write(('%s= %s\n' % (key.ljust(20), conv_conf_to_str(config[key])))) |
def items(validator: Validator, items: Mapping[(Hashable, Any)], instance: Any, schema: Mapping[(Hashable, Any)]) -> Iterator[ValidationError]:
if (not validator.is_type(instance, 'array')):
return
for (index, item) in enumerate(instance):
(yield from validator.descend(item, items, path=index)) |
def CounterList():
(counters, set_counters) = use_state([0, 0, 0])
def make_increment_click_handler(index):
def handle_click(event):
new_value = (counters[index] + 1)
set_counters(((counters[:index] + [new_value]) + counters[(index + 1):]))
return handle_click
return html.ul([html.li({'key': index}, count, html.button({'on_click': make_increment_click_handler(index)}, '+1')) for (index, count) in enumerate(counters)]) |
def test_get_wavenumber_range(*args, **kwargs):
assert (get_wavenumber_range((1 * u.um), (10 * u.um), medium='vacuum', return_input_wunit=True) == (1000, 10000, 'nm_vac'))
assert (get_wavenumber_range(wavenum_min=10, wavenum_max=20, wunit=Default('cm-1'), return_input_wunit=True) == (10, 20, 'cm-1'))
assert np.isclose(get_wavenumber_range(wavelength_min=1, wavelength_max=2, medium='vacuum', wunit=Default('cm-1')), (5000000.0, .0)).all()
with pytest.raises(ValueError):
get_wavenumber_range(wavenum_min=1000, wavenum_max=2000, wavelength_min=1, wavelength_max=2, medium='vacuum', wunit=Default('cm-1'))
with pytest.raises(ValueError):
get_wavenumber_range(wunit=Default('cm-1'))
with pytest.raises(ValueError):
get_wavenumber_range(wavenum_min=1, wavenum_max=2, wmin=10, wmax=20, wunit=Default('cm-1'))
with pytest.raises(ValueError):
get_wavenumber_range(wmin=10, wmax=20, wavelength_min=1, wavelength_max=2)
with pytest.raises(ValueError):
get_wavenumber_range(wmin=1, wmax=2, wavenum_min=10, wavenum_max=20, wavelength_min=100, wavelength_max=200, wunit=Default('cm-1'))
assert (get_wavenumber_range(wmin=10, wmax=20, wunit=Default('cm-1'), return_input_wunit=True) == (10.0, 20.0, 'cm-1'))
with pytest.raises(ValueError):
get_wavenumber_range(wavenum_min=10, wavenum_max=20, wmin=(100 * (1 / u.cm)), wmax=(200 * (1 / u.cm)), wunit=Default('cm-1'))
with pytest.raises(ValueError):
get_wavenumber_range(wmin=(100 * (1 / u.cm)), wmax=(200 * (1 / u.cm)), wavelength_min=10, wavelength_max=20, wunit=Default('cm-1'))
with pytest.raises(ValueError):
get_wavenumber_range(wavenum_min=10, wavenum_max=20, wmin=(100 * (1 / u.cm)), wmax=(200 * (1 / u.cm)), wavelength_min=10, wavelength_max=20, wunit=Default('cm-1'))
assert (get_wavenumber_range(wmin=(100 * (1 / u.cm)), wmax=(200 * (1 / u.cm)), wunit=Default('cm-1')) == (100.0, 200.0))
assert np.isclose(get_wavenumber_range(wmin=(1 * u.cm), wmax=(2 * u.cm), medium='vacuum', wunit=Default('cm-1')), (0.5, 1.0)).all()
with pytest.raises(ValueError):
get_wavenumber_range(wavenum_min=1, wavenum_max=2, wunit='cm')
with pytest.raises(ValueError):
get_wavenumber_range(wavelength_min=1, wavelength_max=2, wunit='cm')
with pytest.raises(ValueError):
get_wavenumber_range(wavenum_min=1, wavenum_max=2, wavelength_min=10, wavelength_max=20, wunit='cm')
with pytest.raises(ValueError):
get_wavenumber_range(wunit='cm')
with pytest.raises(ValueError):
get_wavenumber_range(wmin=1, wmax=2, wavenum_min=10, wavenum_max=20, wunit='cm')
with pytest.raises(ValueError):
get_wavenumber_range(wmin=1, wmax=2, wavelength_min=10, wavelength_max=20, wunit='cm')
with pytest.raises(ValueError):
get_wavenumber_range(wmin=1, wmax=2, wavenum_min=10, wavenum_max=20, wavelength_min=100, wavelength_max=200, wunit='cm')
assert np.isclose(get_wavenumber_range(wmin=1, wmax=2, wunit='cm'), (0., 0.)).all()
assert (get_wavenumber_range(wmin=1, wmax=2, wunit='cm-1') == (1.0, 2.0))
with pytest.raises(ValueError):
get_wavenumber_range(wmin=(1 * u.cm), wmax=(2 * u.cm), wavenum_min=10, wavenum_max=20, wunit='cm')
with pytest.raises(ValueError):
get_wavenumber_range(wmin=(1 * u.cm), wmax=(2 * u.cm), wavelength_min=10, wavelength_max=20, wunit='cm')
with pytest.raises(ValueError):
get_wavenumber_range(wmin=(1 * u.cm), wmax=(2 * u.cm), wavenum_min=1, wavenum_max=2, wavelength_min=10, wavelength_max=20, wunit='cm')
assert np.isclose(get_wavenumber_range(wmin=(1 * u.cm), wmax=(2 * u.cm), wunit='cm'), (0., 0.)).all()
assert (get_wavenumber_range(wmin=(1 * (1 / u.cm)), wmax=(2 * (1 / u.cm)), wunit='cm-1') == (1.0, 2.0))
assert np.isclose(get_wavenumber_range(wmin=(1 * u.cm), wmax=(2 * u.cm), wunit='cm'), (0., 0.)).all()
assert np.isclose(get_wavenumber_range(wavelength_min=(1 * u.cm), wavelength_max=(2 * u.cm), wunit=Default('cm-1')), (0., 0.)).all()
with pytest.raises(ValueError):
get_wavenumber_range(wmin=(1 * u.cm), wmax=(2 * u.cm), wunit='cm-1') |
class _VIIRSCoefficients(_Coefficients):
LUTS = [np.array([0., 0.0015933, 0, 1.78644e-05, 0., 0., 0., 0., 0., 0., 0, 0, 0, 0, 0, 0]), np.array([0.812659, 0.832931, 1.0, 0.867785, 0.806816, 0.944958, 0.78812, 0.791204, 0.900564, 0.942907, 0, 0, 0, 0, 0, 0]), np.array([0.0433461, 0.0, 0.0178299, 0.0853012, 0, 0, 0, 0.0813531, 0, 0, 0.0663, 0.0836, 0.0485, 0.0395, 0.0119, 0.00263]), np.array([0.0435, 0.01582, 0.16176, 0.0974, 0.00369, 0.00132, 0.00033, 0.05373, 0.01561, 0.00129, 0.1131, 0.0994, 0.0446, 0.0416, 0.0286, 0.0155])]
COEFF_INDEX_MAP = {1000: {WavelengthRange(0.662, 0.672, 0.682): 0, 'M05': 0, WavelengthRange(0.846, 0.865, 0.885): 1, 'M07': 1, WavelengthRange(0.478, 0.488, 0.498): 2, 'M03': 2, WavelengthRange(0.545, 0.555, 0.565): 3, 'M04': 3, WavelengthRange(1.23, 1.24, 1.25): 4, 'M08': 4, WavelengthRange(1.58, 1.61, 1.64): 5, 'M10': 5, WavelengthRange(2.225, 2.25, 2.275): 6, 'M11': 6}, 500: {WavelengthRange(0.6, 0.64, 0.68): 7, 'I01': 7, WavelengthRange(0.845, 0.865, 0.884): 8, 'I02': 8, WavelengthRange(1.58, 1.61, 1.64): 9, 'I03': 9}} |
class InfoCriteria(Scorer):
def __init__(self, crit='ebic', gamma='default', zero_tol=1e-06):
pass
def name(self):
return self.crit
def __call__(self, estimator, X, y, sample_weight=None, offsets=None):
if (sample_weight is not None):
raise NotImplementedError('TODO: add')
if (offsets is not None):
raise NotImplementedError('Think through')
if (not isinstance(estimator.fit_penalty_, Lasso)):
raise NotImplementedError('Information criteria is currently only supported for entrywise sparse penalties.')
log_lik = estimator.sample_log_liks(X=X, y=y, offsets=offsets).sum()
n_samples = X.shape[0]
if (self.crit in ['aic', 'bic']):
dof = estimator.inferencer_.dof_
if (dof is None):
raise NotImplementedError('The estimator does not currentlysupport estimating the degrees of freedom.')
if (self.crit == 'aic'):
return (- aic(log_lik=log_lik, n_samples=n_samples, dof=dof))
elif (self.crit == 'bic'):
return (- bic(log_lik=log_lik, n_samples=n_samples, dof=dof))
elif (self.crit == 'ebic'):
n_support = count_support(estimator.coef_, zero_tol=self.zero_tol)
n_features = estimator.inferencer_.X_shape_[1]
return (- ebic(log_lik=log_lik, n_samples=n_samples, n_features=n_features, n_support=n_support, gamma=self.gamma, fit_intercept=estimator.fit_intercept))
else:
raise NotImplementedError("crit must be on of ['aic', 'bic', 'ebic'], not {}".format(self.crit)) |
def pytest_addoption(parser):
parser.addoption('--non-interactive', action='store_true', help='[Interactive tests only] Do not use interactive prompts. Skip tests that cannot validate or run without.')
parser.addoption('--sanity', action='store_true', help='[Interactive tests only] Do not use interactive prompts. Only skips tests that cannot finish without user intervention.') |
def test_iterative_find_nets():
class Top(ComponentLevel3):
def construct(s):
s.w = Wire(SomeMsg)
s.x = Wire(SomeMsg)
s.y = Wire(SomeMsg)
s.z = Wire(SomeMsg)
connect(s.w, s.x)
connect(s.x.a, s.y.a)
connect(s.y, s.z)
def up_wr_s_w():
s.w = SomeMsg(12, 123)
_test_model(Top) |
def simplify_ops(ops):
ret = ops
if (ret.get('Modify') and ret.get('Insert')):
ins = ret['Insert']
mod = ret['Modify']
for i in range(len(mod)):
idx = (mod[i]['pos'] + len(mod[i]['label']))
for j in range((len(ins) - 1), (- 1), (- 1)):
if ((ins[j]['pos'] + 1) == idx):
mod[i]['tag'] += ''.join(['+', ins[j]['tag']])
mod[i]['label'] += ins[j]['label']
ins.pop(j)
if (len(ins) == 0):
ret.pop('Insert')
if (ret.get('Modify') and ret.get('Delete')):
dels = ret['Delete']
mod = ret['Modify']
for i in range(len(mod)):
idx = (mod[i]['pos'] + len(mod[i]['label']))
k = 0
while (idx in dels):
dels.remove(idx)
idx += 1
k += 1
if (k > 0):
mod[i]['tag'] = 'MOD_{}+DEL_{}'.format((k + len(mod[i]['label'])), k)
if (len(dels) == 0):
ret.pop('Delete') |
def get_expired_tag(repository_id, tag_name):
try:
return Tag.select().where((Tag.name == tag_name), (Tag.repository == repository_id)).where((~ (Tag.lifetime_end_ms >> None))).where((Tag.lifetime_end_ms <= get_epoch_timestamp_ms())).get()
except Tag.DoesNotExist:
return None |
def task_lists():
task_ptr_type = task_type.get_type().pointer()
init_task = gdb.parse_and_eval('init_task').address
t = g = init_task
while True:
while True:
(yield t)
t = utils.container_of(t['thread_group']['next'], task_ptr_type, 'thread_group')
if (t == g):
break
t = g = utils.container_of(g['tasks']['next'], task_ptr_type, 'tasks')
if (t == init_task):
return |
def build_source(components, howcall):
components = [c.strip() for c in components]
components = [make_autocall(c, howcall) for c in components]
indent = ' '
lines = ''.join([f'''{indent}{SYMBOL} = {c}
''' for c in components])
howsig = howcall_to_howsig[howcall]
source = textwrap.dedent(f''' async def _mario_runner({howsig.value}):
{lines}
return {SYMBOL}
''')
return source |
def check_valid(pos):
hex = pos.hex
tiles = pos.tiles
tot = 0
for i in range(8):
if (tiles[i] > 0):
tot += tiles[i]
else:
tiles[i] = 0
if (tot != hex.count):
raise Exception(('Invalid input. Expected %d tiles, got %d.' % (hex.count, tot))) |
class SupervisedGraphSage(nn.Module):
def __init__(self, num_classes, enc):
super(SupervisedGraphSage, self).__init__()
self.enc = enc
self.xent = nn.CrossEntropyLoss()
self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim))
init.xavier_uniform(self.weight)
def forward(self, nodes, full_nodes):
embeds = self.enc(nodes, full_nodes)
scores = self.weight.mm(embeds)
return scores.t()
def loss(self, nodes, full_nodes, labels):
scores = self.forward(nodes, full_nodes)
return self.xent(scores, labels.squeeze()) |
class CT_LatentStyles(BaseOxmlElement):
lsdException = ZeroOrMore('w:lsdException', successors=())
count = OptionalAttribute('w:count', ST_DecimalNumber)
defLockedState = OptionalAttribute('w:defLockedState', ST_OnOff)
defQFormat = OptionalAttribute('w:defQFormat', ST_OnOff)
defSemiHidden = OptionalAttribute('w:defSemiHidden', ST_OnOff)
defUIPriority = OptionalAttribute('w:defUIPriority', ST_DecimalNumber)
defUnhideWhenUsed = OptionalAttribute('w:defUnhideWhenUsed', ST_OnOff)
def bool_prop(self, attr_name):
value = getattr(self, attr_name)
if (value is None):
return False
return value
def get_by_name(self, name):
found = self.xpath(('w:lsdException[:name="%s"]' % name))
if (not found):
return None
return found[0]
def set_bool_prop(self, attr_name, value):
setattr(self, attr_name, bool(value)) |
def main(args: argparse.Namespace):
text_renderer = PyGameTextRenderer.from_pretrained(args.renderer_name_or_path, use_auth_token=args.auth_token)
data = {'pixel_values': [], 'num_patches': []}
dataset_stats = {'total_uploaded_size': 0, 'total_dataset_nbytes': 0, 'total_num_shards': 0, 'total_num_examples': 0, 'total_num_words': 0}
max_pixels = ((text_renderer.pixels_per_patch * text_renderer.max_seq_length) - (2 * text_renderer.pixels_per_patch))
target_seq_length = max_pixels
idx = 0
newline_count = 0
current_doc = ''
doc_id = 0
title = 'Anarchism'
with open(args.data_path, 'r', encoding='utf-8') as f:
for line in f:
if ((len(line) > 0) and (not line.isspace())):
if (newline_count >= 3):
num_examples = idx
num_words = dataset_stats['total_num_words']
logger.info(f'{doc_id}: {title}, target_seq_length={target_seq_length!r}px, num_examples={num_examples!r}, num_words={num_words!r}')
(idx, data, dataset_stats) = process_doc(args=args, text_renderer=text_renderer, idx=idx, data=data, dataset_stats=dataset_stats, doc=current_doc, target_seq_length=target_seq_length)
doc_id += 1
if (len(line.strip()) > 100):
current_doc = line.strip()
title = 'No title'
else:
current_doc = ''
title = line.strip()
else:
current_doc += f'''
{line.strip()}'''
newline_count = 0
else:
newline_count += 1
(idx, data, dataset_stats) = process_doc(args=args, text_renderer=text_renderer, idx=idx, data=data, dataset_stats=dataset_stats, doc=current_doc, target_seq_length=target_seq_length)
push_rendered_chunk_to_hub(args, data, dataset_stats, idx)
logger.info(f"Total num words in wikipedia: {dataset_stats['total_num_words']}") |
class WarnTypoAccess(dict):
def __getitem__(self, key):
if (key == 'specialty'):
raise RuntimeError("You may be using the wrong spelling for 'speciality'; The correct key name is 'speciality', not 'specialty'.")
return super().__getitem__(key)
def get(self, key, default=None):
if (key == 'specialty'):
raise RuntimeError("You may be using the wrong spelling for 'speciality'; The correct key name is 'speciality', not 'specialty'.")
return super().get(key, default) |
def hook_cmp(se: SymbolicExecutor, pstate: ProcessState, addr: int):
zf = pstate.cpu.zf
sym_zf = pstate.read_symbolic_register(pstate.registers.zf)
(status, model) = pstate.solve((sym_zf.getAst() != zf))
if (status == SolverStatus.SAT):
new_seed = se.mk_new_seed_from_model(model)
se.enqueue_seed(new_seed) |
.end_to_end()
.parametrize('file_or_folder', ['folder_a', 'folder_a/task_a.py', 'folder_b', 'folder_b/task_b.py'])
def test_passing_paths_via_configuration_file(tmp_path, file_or_folder):
config = f'''
[tool.pytask.ini_options]
paths = "{file_or_folder}"
'''
tmp_path.joinpath('pyproject.toml').write_text(textwrap.dedent(config))
for letter in ('a', 'b'):
tmp_path.joinpath(f'folder_{letter}').mkdir()
tmp_path.joinpath(f'folder_{letter}', f'task_{letter}.py').write_text('def task_passes(): pass')
session = build(config=tmp_path.joinpath('pyproject.toml'))
assert (session.exit_code == ExitCode.OK)
assert (len(session.tasks) == 1) |
class ArrayPredicate(SingleInputMixin, Filter):
params = ('op', 'opargs')
window_length = 0
_types(term=Term, opargs=tuple)
def __new__(cls, term, op, opargs):
hash(opargs)
return super(ArrayPredicate, cls).__new__(ArrayPredicate, op=op, opargs=opargs, inputs=(term,), mask=term.mask)
def _compute(self, arrays, dates, assets, mask):
params = self.params
data = arrays[0]
return (params['op'](data, *params['opargs']) & mask)
def graph_repr(self):
return '{}:\\l op: {}.{}()'.format(type(self).__name__, self.params['op'].__module__, self.params['op'].__name__) |
(help='Send alerts based on the results of a Safety scan.')
('--check-report', help='JSON output of Safety Check to work with.', type=click.File('r'), default=sys.stdin)
('--policy-file', type=SafetyPolicyFile(), default='.safety-policy.yml', help='Define the policy file to be used')
('--key', envvar='SAFETY_API_KEY', help="API Key for pyup.io's vulnerability database. Can be set as SAFETY_API_KEY environment variable.", required=True)
_context
def alert(ctx, check_report, policy_file, key):
LOG.info('alert started')
LOG.info(f'check_report is using stdin: {(check_report == sys.stdin)}')
with check_report:
try:
safety_report = json.load(check_report)
except json.decoder.JSONDecodeError as e:
LOG.info('Error in the JSON report.')
click.secho('Error decoding input JSON: {}'.format(e.msg), fg='red')
sys.exit(1)
if (not ('report_meta' in safety_report)):
click.secho('You must pass in a valid Safety Check JSON report', fg='red')
sys.exit(1)
ctx.obj = Alert(report=safety_report, policy=(policy_file if policy_file else {}), key=key) |
class HighResolutionNet(nn.Module):
def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, head='classification'):
super(HighResolutionNet, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
stem_width = cfg['STEM_WIDTH']
self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM)
self.act2 = nn.ReLU(inplace=True)
self.stage1_cfg = cfg['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = (block.expansion * num_channels)
self.stage2_cfg = cfg['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = cfg['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = cfg['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True)
self.head = head
self.head_channels = None
if (head == 'classification'):
self.num_features = 2048
(self.incre_modules, self.downsamp_modules, self.final_layer) = self._make_head(pre_stage_channels)
(self.global_pool, self.classifier) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
elif (head == 'incre'):
self.num_features = 2048
(self.incre_modules, _, _) = self._make_head(pre_stage_channels, True)
else:
self.incre_modules = None
self.num_features = 256
curr_stride = 2
self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')]
for (i, c) in enumerate((self.head_channels if self.head_channels else num_channels)):
curr_stride *= 2
c = ((c * 4) if self.head_channels else c)
self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{(i + 1)}')]
self.init_weights()
def _make_head(self, pre_stage_channels, incre_only=False):
head_block = Bottleneck
self.head_channels = [32, 64, 128, 256]
incre_modules = []
for (i, channels) in enumerate(pre_stage_channels):
incre_modules.append(self._make_layer(head_block, channels, self.head_channels[i], 1, stride=1))
incre_modules = nn.ModuleList(incre_modules)
if incre_only:
return (incre_modules, None, None)
downsamp_modules = []
for i in range((len(pre_stage_channels) - 1)):
in_channels = (self.head_channels[i] * head_block.expansion)
out_channels = (self.head_channels[(i + 1)] * head_block.expansion)
downsamp_module = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True))
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(nn.Conv2d(in_channels=(self.head_channels[3] * head_block.expansion), out_channels=self.num_features, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True))
return (incre_modules, downsamp_modules, final_layer)
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
if (num_channels_cur_layer[i] != num_channels_pre_layer[i]):
transition_layers.append(nn.Sequential(nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM), nn.ReLU(inplace=True)))
else:
transition_layers.append(nn.Identity())
else:
conv3x3s = []
for j in range(((i + 1) - num_branches_pre)):
inchannels = num_channels_pre_layer[(- 1)]
outchannels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else inchannels)
conv3x3s.append(nn.Sequential(nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), nn.BatchNorm2d(outchannels, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion), momentum=_BN_MOMENTUM))
layers = [block(inplanes, planes, stride, downsample)]
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
reset_multi_scale_output = (multi_scale_output or (i < (num_modules - 1)))
modules.append(HighResolutionModule(num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output))
num_inchannels = modules[(- 1)].get_num_inchannels()
return (nn.Sequential(*modules), num_inchannels)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
(self.global_pool, self.classifier) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def stages(self, x) -> List[torch.Tensor]:
x = self.layer1(x)
xl = [t(x) for (i, t) in enumerate(self.transition1)]
yl = self.stage2(xl)
xl = [(t(yl[(- 1)]) if (not isinstance(t, nn.Identity)) else yl[i]) for (i, t) in enumerate(self.transition2)]
yl = self.stage3(xl)
xl = [(t(yl[(- 1)]) if (not isinstance(t, nn.Identity)) else yl[i]) for (i, t) in enumerate(self.transition3)]
yl = self.stage4(xl)
return yl
def forward_features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.act2(x)
yl = self.stages(x)
y = self.incre_modules[0](yl[0])
for (i, down) in enumerate(self.downsamp_modules):
y = (self.incre_modules[(i + 1)](yl[(i + 1)]) + down(y))
y = self.final_layer(y)
return y
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x)
if (self.drop_rate > 0.0):
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.classifier(x)
return x |
class RawTCPClient(Client):
def __init__(self, host, prog, vers, port, open_timeout=5000):
Client.__init__(self, host, prog, vers, port)
open_timeout = (open_timeout if (open_timeout is not None) else 5000)
self.connect((0.001 * open_timeout))
self.timeout = 4.0
def make_call(self, proc, args, pack_func, unpack_func):
if (proc == 11):
self.timeout = (args[1] / 1000.0)
elif (proc in (12, 22)):
self.timeout = (args[2] / 1000.0)
elif (proc in (13, 14, 15, 16, 17)):
self.timeout = (args[3] / 1000.0)
else:
self.timeout = 4.0
self.timeout += 1.0
return super(RawTCPClient, self).make_call(proc, args, pack_func, unpack_func)
def connect(self, timeout=5.0):
logger.debug('RawTCPClient: connecting to socket at (%s, %s)', self.host, self.port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if (not _connect(self.sock, self.host, self.port, timeout)):
raise RPCError("can't connect to server")
def close(self):
logger.debug('RawTCPClient: closing socket')
self.sock.close()
def do_call(self):
call = self.packer.get_buf()
_sendrecord(self.sock, call, timeout=self.timeout)
try:
min_packages = int((self.packer.proc == 3))
logger.debug(('RawTCPClient: procedure type %i' % self.packer.proc))
except AttributeError:
min_packages = 0
while True:
reply = _recvrecord(self.sock, self.timeout, min_packages=min_packages)
u = self.unpacker
u.reset(reply)
(xid, verf) = u.unpack_replyheader()
if (xid == self.lastxid):
return
elif (xid < self.lastxid):
continue
else:
raise RPCError(('wrong xid in reply %r instead of %r' % (xid, self.lastxid))) |
def check_lockstring(accessing_obj, lockstring, no_superuser_bypass=False, default=False, access_type=None):
global _LOCK_HANDLER
if (not _LOCK_HANDLER):
_LOCK_HANDLER = LockHandler(_ObjDummy())
return _LOCK_HANDLER.check_lockstring(accessing_obj, lockstring, no_superuser_bypass=no_superuser_bypass, default=default, access_type=access_type) |
.unit()
def test_captureresult() -> None:
cr = CaptureResult('out', 'err')
assert (len(cr) == 2)
assert (cr.out == 'out')
assert (cr.err == 'err')
(out, err) = cr
assert (out == 'out')
assert (err == 'err')
assert (cr[0] == 'out')
assert (cr[1] == 'err')
assert (cr == cr)
assert (cr == CaptureResult('out', 'err'))
assert (cr != CaptureResult('wrong', 'err'))
assert (cr == ('out', 'err'))
assert (cr != ('out', 'wrong'))
assert (hash(cr) == hash(CaptureResult('out', 'err')))
assert (hash(cr) == hash(('out', 'err')))
assert (hash(cr) != hash(('out', 'wrong')))
assert (cr < ('z',))
assert (cr < ('z', 'b'))
assert (cr < ('z', 'b', 'c'))
assert (cr.count('err') == 1)
assert (cr.count('wrong') == 0)
assert (cr.index('err') == 1)
with pytest.raises(ValueError):
assert (cr.index('wrong') == 0)
assert (next(iter(cr)) == 'out')
assert (cr._replace(err='replaced') == ('out', 'replaced')) |
class DataCollatorCTCWithPadding():
processor: AutoProcessor
padding: Union[(bool, str)] = 'longest'
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
def __call__(self, features: List[Dict[(str, Union[(List[int], torch.Tensor)])]]) -> Dict[(str, torch.Tensor)]:
input_features = []
label_features = []
for feature in features:
if (self.max_length and (feature['input_values'].shape[(- 1)] > self.max_length)):
continue
input_features.append({'input_values': feature['input_values']})
label_features.append({'input_ids': feature['labels']})
batch = self.processor.pad(input_features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt')
with self.processor.as_target_processor():
labels_batch = self.processor.pad(label_features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors='pt')
labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), (- 100))
batch['labels'] = labels
return batch |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.