code stringlengths 281 23.7M |
|---|
def ql_syscall_kernelrpc_mach_vm_map_trap(ql, target, address, size, mask, flags, cur_protection):
ql.log.debug(('[mach] mach vm map trap(target: 0x%x, address: 0x%x, size: 0x%x, mask: 0x%x, flag: 0x%x, cur_protect: 0x%x)' % (target, address, size, mask, flags, cur_protection)))
if ((ql.os.macho_vmmap_end & mask) > 0):
ql.os.macho_vmmap_end = (ql.os.macho_vmmap_end - (ql.os.macho_vmmap_end & mask))
ql.os.macho_vmmap_end += (mask + 1)
vmmap_address = page_align_end(ql.os.macho_vmmap_end, PAGE_SIZE)
vmmap_end = page_align_end((vmmap_address + size), PAGE_SIZE)
ql.os.macho_vmmap_end = vmmap_end
ql.mem.map(vmmap_address, (vmmap_end - vmmap_address))
ql.mem.write(address, struct.pack('<Q', vmmap_address))
return KERN_SUCCESS |
class PropertyInfo(object):
def __init__(self, host, name, tp, doc='', enum=None, getter=propGet, group='Base', internal=False, duplicate=False, default=None):
self.Name = name
self.Type = tp
self.Group = group
self.Doc = doc
self.Enum = enum
self.get = getter.__get__(self, self.__class__)
self.Internal = internal
self.Default = default
self.Key = host.addPropertyInfo(self, duplicate) |
class Client(object):
def __init__(self, instance=None, host=None, user=None, password=None, raise_on_empty=None, request_params=None, use_ssl=True, session=None):
if ((host and instance) is not None):
raise InvalidUsage("Arguments 'instance' and 'host' are mutually exclusive, you cannot use both.")
if (type(use_ssl) is not bool):
raise InvalidUsage("Argument 'use_ssl' must be of type bool")
if (raise_on_empty is None):
self.raise_on_empty = True
elif (type(raise_on_empty) is bool):
warnings.warn('The use of the `raise_on_empty` argument is deprecated and will be removed in a future release.', DeprecationWarning)
self.raise_on_empty = raise_on_empty
else:
raise InvalidUsage("Argument 'raise_on_empty' must be of type bool")
if (not (host or instance)):
raise InvalidUsage("You must supply either 'instance' or 'host'")
if (not isinstance(self, pysnow.OAuthClient)):
if ((not (user and password)) and (not session)):
raise InvalidUsage('You must supply either username and password or a session object')
elif ((user and session) is not None):
raise InvalidUsage('Provide either username and password or a session, not both.')
self.parameters = ParamsBuilder()
if (request_params is not None):
warnings.warn('The use of the `request_params` argument is deprecated and will be removed in a future release. Please use Client.parameters instead.', DeprecationWarning)
self.parameters.add_custom(request_params)
self.request_params = (request_params or {})
self.instance = instance
self.host = host
self._user = user
self._password = password
self.use_ssl = use_ssl
self.base_url = URLBuilder.get_base_url(use_ssl, instance, host)
if (not isinstance(self, pysnow.OAuthClient)):
self.session = self._get_session(session)
else:
self.session = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
def close(self):
self.session.close()
def _get_session(self, session):
if (not session):
logger.debug(('(SESSION_CREATE) User: %s' % self._user))
s = requests.Session()
s.auth = HTTPBasicAuth(self._user, self._password)
else:
logger.debug(('(SESSION_CREATE) Object: %s' % session))
s = session
s.headers.update({'content-type': 'application/json', 'accept': 'application/json', 'User-Agent': 'pysnow'})
return s
def _legacy_request(self, method, table, **kwargs):
warnings.warn(('`%s` is deprecated and will be removed in a future release. Please use `resource()` instead.' % inspect.stack()[1][3]), DeprecationWarning)
return LegacyRequest(method, table, request_params=self.request_params, raise_on_empty=self.raise_on_empty, session=self.session, instance=self.instance, base_url=self.base_url, **kwargs)
def resource(self, api_path=None, base_path='/api/now', chunk_size=None, **kwargs):
for path in [api_path, base_path]:
URLBuilder.validate_path(path)
return Resource(api_path=api_path, base_path=base_path, parameters=self.parameters, chunk_size=(chunk_size or 8192), session=self.session, base_url=self.base_url, **kwargs)
def query(self, table, **kwargs):
return self._legacy_request('GET', table, **kwargs)
def insert(self, table, payload, **kwargs):
r = self._legacy_request('POST', table, **kwargs)
return r.insert(payload) |
def inline_comments_in_inp(filepath, overwrite=False):
newfilename = (os.path.splitext(os.path.basename(filepath))[0] + '_unGUI.inp')
newfilepath = os.path.join(os.path.dirname(filepath), newfilename)
allheaders = get_inp_sections_details(filepath)
with open(filepath) as oldf:
with open(newfilepath, 'w') as new:
comment_concat = []
current_section = list(allheaders.keys())[0]
for line in oldf:
if ('[' and (']' in line)):
current_section = line.strip()
if (len(line.strip()) > 1):
if ((line.strip()[0] == ';') and (''.join(line.strip()[:2]) != ';;')):
words = line.split()
hdrs = allheaders[current_section]['columns']
perc_match_to_header = (float(len([x for x in words if (x in hdrs)])) / float(len(hdrs)))
if (perc_match_to_header <= 0.75):
comment_concat.append(line.strip())
else:
comment_string = ''
if (len(comment_concat) > 0):
comment_string = ' '.join(comment_concat)
newlinestring = ((line.strip() + comment_string) + '\n')
new.write(newlinestring)
comment_concat = []
else:
new.write(line)
if overwrite:
os.remove(filepath)
os.rename(newfilepath, filepath) |
class TruetypeInfo():
_name_id_lookup = {'copyright': 0, 'family': 1, 'subfamily': 2, 'identifier': 3, 'name': 4, 'version': 5, 'postscript': 6, 'trademark': 7, 'manufacturer': 8, 'designer': 9, 'description': 10, 'vendor-url': 11, 'designer-url': 12, 'license': 13, 'license-url': 14, 'preferred-family': 16, 'preferred-subfamily': 17, 'compatible-name': 18, 'sample': 19}
_platform_id_lookup = {'unicode': 0, 'macintosh': 1, 'iso': 2, 'microsoft': 3, 'custom': 4}
_microsoft_encoding_lookup = {1: 'utf_16_be', 2: 'shift_jis', 4: 'big5', 6: 'johab', 10: 'utf_16_be'}
_macintosh_encoding_lookup = {0: 'mac_roman'}
def __init__(self, filename):
assert filename, 'must provide a font file name'
length = os.stat(filename).st_size
self._fileno = os.open(filename, os.O_RDONLY)
if hasattr(mmap, 'MAP_SHARED'):
self._data = mmap.mmap(self._fileno, length, mmap.MAP_SHARED, mmap.PROT_READ)
else:
self._data = mmap.mmap(self._fileno, length, None, mmap.ACCESS_READ)
self._closed = False
offsets = _read_offset_table(self._data, 0)
self._tables = {}
for table in _read_table_directory_entry.array(self._data, offsets.size, offsets.num_tables):
self._tables[table.tag] = table
self._names = None
self._horizontal_metrics = None
self._character_advances = None
self._character_kernings = None
self._glyph_kernings = None
self._character_map = None
self._glyph_map = None
self._font_selection_flags = None
self.header = _read_head_table(self._data, self._tables['head'].offset)
self.horizontal_header = _read_horizontal_header(self._data, self._tables['hhea'].offset)
def get_font_selection_flags(self):
if (not self._font_selection_flags):
OS2_table = _read_OS2_table(self._data, self._tables['OS/2'].offset)
self._font_selection_flags = OS2_table.fs_selection
return self._font_selection_flags
def is_bold(self):
return bool((self.get_font_selection_flags() & 32))
def is_italic(self):
return bool((self.get_font_selection_flags() & 1))
def get_names(self):
if self._names:
return self._names
naming_table = _read_naming_table(self._data, self._tables['name'].offset)
name_records = _read_name_record.array(self._data, (self._tables['name'].offset + naming_table.size), naming_table.count)
storage = (naming_table.string_offset + self._tables['name'].offset)
self._names = {}
for record in name_records:
value = self._data[(record.offset + storage):((record.offset + storage) + record.length)]
key = (record.platform_id, record.name_id)
value = (record.encoding_id, record.language_id, value)
if (key not in self._names):
self._names[key] = []
self._names[key].append(value)
return self._names
def get_name(self, name, platform=None, languages=None):
names = self.get_names()
if (type(name) == str):
name = self._name_id_lookup[name]
if (not platform):
for platform in ('microsoft', 'macintosh'):
value = self.get_name(name, platform, languages)
if value:
return value
if (type(platform) == str):
platform = self._platform_id_lookup[platform]
if (not ((platform, name) in names)):
return None
if (platform == 3):
encodings = self._microsoft_encoding_lookup
if (not languages):
languages = (1033, 2057, 3081, 4105, 5129, 6153)
elif (platform == 1):
encodings = self.__macintosh_encoding_lookup
if (not languages):
languages = (0,)
for record in names[(platform, name)]:
if ((record[1] in languages) and (record[0] in encodings)):
decoder = codecs.getdecoder(encodings[record[0]])
return decoder(record[2])[0]
return None
def get_horizontal_metrics(self):
if (not self._horizontal_metrics):
ar = _read_long_hor_metric.array(self._data, self._tables['hmtx'].offset, self.horizontal_header.number_of_h_metrics)
self._horizontal_metrics = ar
return self._horizontal_metrics
def get_character_advances(self):
if self._character_advances:
return self._character_advances
ga = self.get_glyph_advances()
gmap = self.get_glyph_map()
self._character_advances = {}
for i in range(len(ga)):
if ((i in gmap) and (not (gmap[i] in self._character_advances))):
self._character_advances[gmap[i]] = ga[i]
return self._character_advances
def get_glyph_advances(self):
hm = self.get_horizontal_metrics()
return [(float(m.advance_width) / self.header.units_per_em) for m in hm]
def get_character_kernings(self):
if (not self._character_kernings):
gmap = self.get_glyph_map()
kerns = self.get_glyph_kernings()
self._character_kernings = {}
for (pair, value) in kerns.items():
(lglyph, rglyph) = pair
lchar = (((lglyph in gmap) and gmap[lglyph]) or None)
rchar = (((rglyph in gmap) and gmap[rglyph]) or None)
if (lchar and rchar):
self._character_kernings[(lchar, rchar)] = value
return self._character_kernings
def get_glyph_kernings(self):
if self._glyph_kernings:
return self._glyph_kernings
header = _read_kern_header_table(self._data, self._tables['kern'].offset)
offset = (self._tables['kern'].offset + header.size)
kernings = {}
for i in range(header.n_tables):
header = _read_kern_subtable_header(self._data, offset)
if ((header.coverage & header.horizontal_mask) and (not (header.coverage & header.minimum_mask)) and (not (header.coverage & header.perpendicular_mask))):
if ((header.coverage & header.format_mask) == 0):
self._add_kernings_format0(kernings, (offset + header.size))
offset += header.length
self._glyph_kernings = kernings
return kernings
def _add_kernings_format0(self, kernings, offset):
header = _read_kern_subtable_format0(self._data, offset)
kerning_pairs = _read_kern_subtable_format0Pair.array(self._data, (offset + header.size), header.n_pairs)
for pair in kerning_pairs:
if ((pair.left, pair.right) in kernings):
kernings[(pair.left, pair.right)] += (pair.value / float(self.header.units_per_em))
else:
kernings[(pair.left, pair.right)] = (pair.value / float(self.header.units_per_em))
def get_glyph_map(self):
if self._glyph_map:
return self._glyph_map
cmap = self.get_character_map()
self._glyph_map = {}
for (ch, glyph) in cmap.items():
if (not (glyph in self._glyph_map)):
self._glyph_map[glyph] = ch
return self._glyph_map
def get_character_map(self):
if self._character_map:
return self._character_map
cmap = _read_cmap_header(self._data, self._tables['cmap'].offset)
records = _read_cmap_encoding_record.array(self._data, (self._tables['cmap'].offset + cmap.size), cmap.num_tables)
self._character_map = {}
for record in records:
if ((record.platform_id == 3) and (record.encoding_id == 1)):
offset = (self._tables['cmap'].offset + record.offset)
format_header = _read_cmap_format_header(self._data, offset)
if (format_header.format == 4):
self._character_map = self._get_character_map_format4(offset)
break
return self._character_map
def _get_character_map_format4(self, offset):
header = _read_cmap_format4Header(self._data, offset)
seg_count = (header.seg_count_x2 // 2)
array_size = struct.calcsize(f'>{seg_count}H')
end_count = self._read_array(f'>{seg_count}H', (offset + header.size))
start_count = self._read_array(f'>{seg_count}H', (((offset + header.size) + array_size) + 2))
id_delta = self._read_array(f'>{seg_count}H', ((((offset + header.size) + array_size) + 2) + array_size))
id_range_offset_address = (((((offset + header.size) + array_size) + 2) + array_size) + array_size)
id_range_offset = self._read_array(f'>{seg_count}H', id_range_offset_address)
character_map = {}
for i in range(0, seg_count):
if (id_range_offset[i] != 0):
if (id_range_offset[i] == 65535):
continue
for c in range(start_count[i], (end_count[i] + 1)):
addr = (((id_range_offset[i] + (2 * (c - start_count[i]))) + id_range_offset_address) + (2 * i))
g = struct.unpack('>H', self._data[addr:(addr + 2)])[0]
if (g != 0):
character_map[chr(c)] = ((g + id_delta[i]) % 65536)
else:
for c in range(start_count[i], (end_count[i] + 1)):
g = ((c + id_delta[i]) % 65536)
if (g != 0):
character_map[chr(c)] = g
return character_map
def _read_array(self, format, offset):
size = struct.calcsize(format)
return struct.unpack(format, self._data[offset:(offset + size)])
def close(self):
self._data.close()
os.close(self._fileno)
self._closed = True
def __del__(self):
if (not self._closed):
self.close() |
def test_select_column_wildcard_with_qualifier():
sql = 'INSERT INTO tab1\nSELECT tab2.*\nFROM tab2 a\n INNER JOIN tab3 b\n ON a.id = b.id'
assert_column_lineage_equal(sql, [(ColumnQualifierTuple('*', 'tab2'), ColumnQualifierTuple('*', 'tab1'))])
sql = 'INSERT INTO tab1\nSELECT a.*\nFROM tab2 a\n INNER JOIN tab3 b\n ON a.id = b.id'
assert_column_lineage_equal(sql, [(ColumnQualifierTuple('*', 'tab2'), ColumnQualifierTuple('*', 'tab1'))]) |
class AttrVI_ATTR_USB_BULK_OUT_PIPE(RangeAttribute):
resources = [(constants.InterfaceType.usb, 'RAW')]
py_name = ''
visa_name = 'VI_ATTR_USB_BULK_OUT_PIPE'
visa_type = 'ViInt16'
default = NotAvailable
(read, write, local) = (True, True, True)
(min_value, max_value, values) = (1, 15, [(- 1)]) |
class RoundRectItem(QGraphicsObject):
def __init__(self, bounds, color, parent=None):
super(RoundRectItem, self).__init__(parent)
self.fillRect = False
self.bounds = QRectF(bounds)
self.pix = QPixmap()
self.gradient = QLinearGradient()
self.gradient.setStart(self.bounds.topLeft())
self.gradient.setFinalStop(self.bounds.bottomRight())
self.gradient.setColorAt(0, color)
self.gradient.setColorAt(1, color.darker(200))
self.setCacheMode(QGraphicsItem.ItemCoordinateCache)
def setFill(self, fill):
self.fillRect = fill
self.update()
def fill(self):
return self.fillRect
fill = pyqtProperty(bool, fill, setFill)
def paint(self, painter, option, widget):
painter.setPen(Qt.NoPen)
painter.setBrush(QColor(0, 0, 0, 64))
painter.drawRoundedRect(self.bounds.translated(2, 2), 25.0, 25.0)
if self.fillRect:
painter.setBrush(QApplication.palette().brush(QPalette.Window))
else:
painter.setBrush(self.gradient)
painter.setPen(QPen(Qt.black, 1))
painter.drawRoundedRect(self.bounds, 25.0, 25.0)
if (not self.pix.isNull()):
painter.scale(1.95, 1.95)
painter.drawPixmap(((- self.pix.width()) / 2), ((- self.pix.height()) / 2), self.pix)
def boundingRect(self):
return self.bounds.adjusted(0, 0, 2, 2)
def pixmap(self):
return QPixmap(self.pix)
def setPixmap(self, pixmap):
self.pix = QPixmap(pixmap)
self.update() |
_fixtures(WebFixture, LargeFileUploadInputFixture)
def test_queueing_async_uploads(web_fixture, large_file_upload_input_fixture):
fixture = large_file_upload_input_fixture
fixture.run_hook_after = True
web_fixture.reahl_server.set_app(fixture.new_wsgi_app(enable_js=True))
browser = web_fixture.driver_browser
browser.open('/')
assert (not fixture.file_was_uploaded(fixture.file_to_upload1.name))
assert (not fixture.uploaded_file_is_listed(fixture.file_to_upload1.name))
with web_fixture.reahl_server.in_background(wait_till_done_serving=False):
browser.type(XPath.input_labelled('Choose file(s)'), fixture.file_to_upload1.name, wait_for_ajax=False)
browser.type(XPath.input_labelled('Choose file(s)'), fixture.file_to_upload2.name, wait_for_ajax=False)
progress1 = browser.get_attribute('//ul/li[1]/progress', 'value')
assert (progress1 == '100')
progress2 = browser.get_attribute('//ul/li[2]/progress', 'value')
assert (progress2 == '0')
fixture.simulate_large_file_upload_done()
browser.wait_for(fixture.uploaded_file_is_listed, fixture.file_to_upload2.name)
assert fixture.uploaded_file_is_listed(fixture.file_to_upload1.name)
assert fixture.uploaded_file_is_listed(fixture.file_to_upload2.name)
assert fixture.file_was_uploaded(fixture.file_to_upload1.name)
assert fixture.file_was_uploaded(fixture.file_to_upload2.name) |
def get_xritdecompress_outfile(stdout):
outfile = b''
for line in stdout:
try:
(k, v) = [x.strip() for x in line.split(b':', 1)]
except ValueError:
break
if (k == b'Decompressed file'):
outfile = v
break
return outfile |
def _convert_configs_values_to_bool(dictionary: Dict):
for (key, value) in dictionary.items():
if (value == 'True'):
dictionary[key] = True
elif (value == 'False'):
dictionary[key] = False
elif isinstance(value, List):
for item in value:
if isinstance(item, Dict):
_convert_configs_values_to_bool(item)
elif isinstance(value, Dict):
_convert_configs_values_to_bool(value)
else:
pass |
def main():
args = parse_args()
if (len(args.shape) == 1):
input_shape = (1, 3, args.shape[0], args.shape[0])
elif (len(args.shape) == 2):
input_shape = ((1, 3) + tuple(args.shape))
elif (len(args.shape) == 4):
input_shape = tuple(args.shape)
elif (len(args.shape) == 5):
input_shape = tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
model = build_recognizer(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg'))
model = model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError('FLOPs counter is currently not currently supported with {}'.format(model.__class__.__name__))
(flops, params) = get_model_complexity_info(model, input_shape)
split_line = ('=' * 30)
print(f'''{split_line}
Input shape: {input_shape}
Flops: {flops}
Params: {params}
{split_line}''')
print('!!!Please be cautious if you use the results in papers. You may need to check if all ops are supported and verify that the flops computation is correct.') |
def find_most_similar_index(str_list, target_str):
most_similar_str = None
most_similar_index = None
highest_similarity = 0
for (i, str) in enumerate(str_list):
similarity = str_similarity(str, target_str)
if (similarity > highest_similarity):
most_similar_str = str
most_similar_index = i
highest_similarity = similarity
return most_similar_index |
def _infer_content_types_from_paths(paths: List[str], content_type_provider: Callable[([str], ContentType)]) -> Dict[(ContentType, List[str])]:
content_type_to_paths = defaultdict(list)
for path in paths:
if (not path.endswith('/')):
content_type_to_paths[content_type_provider(path)].append(path)
return content_type_to_paths |
class egg_info(InfoCommon, Command):
description = "create a distribution's .egg-info directory"
user_options = [('egg-base=', 'e', 'directory containing .egg-info directories (default: top of the source tree)'), ('tag-date', 'd', 'Add date stamp (e.g. ) to version number'), ('tag-build=', 'b', 'Specify explicit tag to add to version number'), ('no-date', 'D', "Don't include date stamp [default]")]
boolean_options = ['tag-date']
negative_opt = {'no-date': 'tag-date'}
def initialize_options(self):
self.egg_base = None
self.egg_name = None
self.egg_info = None
self.egg_version = None
self.ignore_egg_info_in_manifest = False
def tag_svn_revision(self):
pass
_svn_revision.setter
def tag_svn_revision(self, value):
pass
def save_version_info(self, filename):
egg_info = collections.OrderedDict()
egg_info['tag_build'] = self.tags()
egg_info['tag_date'] = 0
edit_config(filename, dict(egg_info=egg_info))
def finalize_options(self):
self.egg_name = self.name
self.egg_version = self.tagged_version()
parsed_version = packaging.version.Version(self.egg_version)
try:
is_version = isinstance(parsed_version, packaging.version.Version)
spec = ('%s==%s' if is_version else '%s===%s')
packaging.requirements.Requirement((spec % (self.egg_name, self.egg_version)))
except ValueError as e:
raise distutils.errors.DistutilsOptionError(('Invalid distribution name or version syntax: %s-%s' % (self.egg_name, self.egg_version))) from e
if (self.egg_base is None):
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('', os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = (_normalization.filename_component(self.egg_name) + '.egg-info')
if (self.egg_base != os.curdir):
self.egg_info = os.path.join(self.egg_base, self.egg_info)
self.distribution.metadata.version = self.egg_version
pd = self.distribution._patched_dist
key = (getattr(pd, 'key', None) or getattr(pd, 'name', None))
if ((pd is not None) and (key == self.egg_name.lower())):
pd._version = self.egg_version
pd._parsed_version = packaging.version.Version(self.egg_version)
self.distribution._patched_dist = None
def _get_egg_basename(self, py_version=PY_MAJOR, platform=None):
return _egg_basename(self.egg_name, self.egg_version, py_version, platform)
def write_or_delete_file(self, what, filename, data, force=False):
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if ((data is None) and (not force)):
log.warn('%s not set in setup(), but %s exists', what, filename)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
log.info('writing %s to %s', what, filename)
data = data.encode('utf-8')
if (not self.dry_run):
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
log.info('deleting %s', filename)
if (not self.dry_run):
os.unlink(filename)
def run(self):
self.mkpath(self.egg_info)
try:
os.utime(self.egg_info, None)
except OSError as e:
msg = f"Cannot update time stamp of directory '{self.egg_info}'"
raise distutils.errors.DistutilsFileError(msg) from e
for ep in metadata.entry_points(group='egg_info.writers'):
writer = ep.load()
writer(self, ep.name, os.path.join(self.egg_info, ep.name))
nl = os.path.join(self.egg_info, 'native_libs.txt')
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def find_sources(self):
manifest_filename = os.path.join(self.egg_info, 'SOURCES.txt')
mm = manifest_maker(self.distribution)
mm.ignore_egg_info_dir = self.ignore_egg_info_in_manifest
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist |
class TestConfigPath():
def test_correct(self, isolation):
config = {'path': 'foo/bar.py'}
hook = VersionBuildHook(str(isolation), config, None, None, '', '')
assert (hook.config_path == hook.config_path == 'foo/bar.py')
def test_missing(self, isolation):
config = {'path': ''}
hook = VersionBuildHook(str(isolation), config, None, None, '', '')
with pytest.raises(ValueError, match='Option `path` for build hook `version` is required'):
_ = hook.config_path
def test_not_string(self, isolation):
config = {'path': 9000}
hook = VersionBuildHook(str(isolation), config, None, None, '', '')
with pytest.raises(TypeError, match='Option `path` for build hook `version` must be a string'):
_ = hook.config_path |
def test_log1mexp_deprecation_warnings():
with pytest.warns(FutureWarning, match='pymc.math.log1mexp_numpy will expect a negative input'):
res_pos = log1mexp_numpy(2)
with warnings.catch_warnings():
warnings.simplefilter('error')
res_neg = log1mexp_numpy((- 2), negative_input=True)
with pytest.warns(FutureWarning, match='pymc.math.log1mexp will expect a negative input'):
res_pos_at = log1mexp(2).eval()
with warnings.catch_warnings():
warnings.simplefilter('error')
res_neg_at = log1mexp((- 2), negative_input=True).eval()
assert np.isclose(res_pos, res_neg)
assert np.isclose(res_pos_at, res_neg)
assert np.isclose(res_neg_at, res_neg) |
_arg_scope
def layer_norm(inputs, center=True, scale=True, activation_fn=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, begin_norm_axis=1, begin_params_axis=(- 1), scope=None):
with variable_scope.variable_scope(scope, 'LayerNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.shape
inputs_rank = inputs_shape.ndims
if (inputs_rank is None):
raise ValueError(('Inputs %s has undefined rank.' % inputs.name))
dtype = inputs.dtype.base_dtype
if (begin_norm_axis < 0):
begin_norm_axis = (inputs_rank + begin_norm_axis)
if ((begin_params_axis >= inputs_rank) or (begin_norm_axis >= inputs_rank)):
raise ValueError(('begin_params_axis (%d) and begin_norm_axis (%d) must be < rank(inputs) (%d)' % (begin_params_axis, begin_norm_axis, inputs_rank)))
params_shape = inputs_shape[begin_params_axis:]
if (not params_shape.is_fully_defined()):
raise ValueError(('Inputs %s: shape(inputs)[%s:] is not fully defined: %s' % (inputs.name, begin_params_axis, inputs_shape)))
(beta, gamma) = (None, None)
if center:
beta_collections = utils.get_variable_collections(variables_collections, 'beta')
beta = variables.model_variable('beta', shape=params_shape, dtype=dtype, initializer=init_ops.zeros_initializer(), collections=beta_collections, trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections, 'gamma')
gamma = variables.model_variable('gamma', shape=params_shape, dtype=dtype, initializer=init_ops.ones_initializer(), collections=gamma_collections, trainable=trainable)
norm_axes = list(range(begin_norm_axis, inputs_rank))
(mean, variance) = nn.moments(inputs, norm_axes, keep_dims=True)
variance_epsilon = (1e-12 if (dtype != dtypes.float16) else 0.001)
outputs = nn.batch_normalization(inputs, mean, variance, offset=beta, scale=gamma, variance_epsilon=variance_epsilon)
outputs.set_shape(inputs_shape)
if (activation_fn is not None):
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs) |
class CenterCrop_iBims1(object):
def __init__(self, size_image, size_depth):
self.size_image = size_image
self.size_depth = size_depth
def __call__(self, sample):
(image, depth, edges, calib, mask_invalid, mask_transp, mask_wall, mask_wall_paras, mask_table, mask_table_paras, mask_floor, mask_floor_paras) = (sample['image'], sample['depth'], sample['edges'], sample['calib'], sample['mask_invalid'], sample['mask_transp'], sample['mask_wall'], sample['mask_wall_paras'], sample['mask_table'], sample['mask_table_paras'], sample['mask_floor'], sample['mask_floor_paras'])
image = self.centerCrop(image, self.size_image)
depth = self.centerCrop(depth, self.size_image)
edges = self.centerCrop(edges, self.size_image)
mask_invalid = self.centerCrop(mask_invalid, self.size_image)
mask_transp = self.centerCrop(mask_transp, self.size_image)
mask_wall = self.centerCrop(mask_wall, self.size_image)
mask_table = self.centerCrop(mask_table, self.size_image)
mask_floor = self.centerCrop(mask_floor, self.size_image)
(ow, oh) = self.size_depth
depth = depth.resize((ow, oh))
edges = edges.resize((ow, oh))
mask_invalid = mask_invalid.resize((ow, oh))
mask_transp = mask_transp.resize((ow, oh))
mask_wall = mask_wall.resize((ow, oh))
mask_table = mask_table.resize((ow, oh))
mask_floor = mask_floor.resize((ow, oh))
return {'image': image, 'depth': depth, 'edges': edges, 'calib': calib, 'mask_invalid': mask_invalid, 'mask_transp': mask_transp, 'mask_wall': mask_wall, 'mask_wall_paras': mask_wall_paras, 'mask_table': mask_table, 'mask_table_paras': mask_table_paras, 'mask_floor': mask_floor, 'mask_floor_paras': mask_floor_paras}
def centerCrop(self, image, size):
(w1, h1) = image.size
(tw, th) = size
if ((w1 == tw) and (h1 == th)):
return image
x1 = int(round(((w1 - tw) / 2.0)))
y1 = int(round(((h1 - th) / 2.0)))
image = image.crop((x1, y1, (tw + x1), (th + y1)))
return image |
def test_expect_rho(all_qevo):
vec = _data.dense.fast_from_numpy(((np.random.rand((N * N)) + 1) + (1j * np.random.rand((N * N)))))
mat = _data.column_unstack_dense(vec, N)
qobj = Qobj(mat)
op = liouvillian(all_qevo)
for t in TESTTIMES:
Qo1 = op(t)
assert (abs((_data.expect_super(Qo1.data, vec) - op.expect(t, qobj))) < 1e-14) |
def merge_turns(turns):
new_turns = []
for ((file_id, speaker_id), speaker_turns) in groupby(turns, (lambda x: (x.file_id, x.speaker_id))):
speaker_turns = list(speaker_turns)
speaker_it = IntervalTree.from_tuples([(turn.onset, turn.offset) for turn in speaker_turns])
n_turns_pre = len(speaker_it)
speaker_it.merge_overlaps()
n_turns_post = len(speaker_it)
if (n_turns_post < n_turns_pre):
speaker_turns = []
for intrvl in speaker_it:
speaker_turns.append(Turn(intrvl.begin, intrvl.end, speaker_id=speaker_id, file_id=file_id))
speaker_turns = sorted(speaker_turns, key=(lambda x: (x.onset, x.offset)))
warn(('Merging overlapping speaker turns. FILE: %s, SPEAKER: %s n_turns_pre: %s n_turns_post: %s' % (file_id, speaker_id, str(n_turns_pre), str(n_turns_post))))
new_turns.extend(speaker_turns)
return new_turns |
class PegasusConfig(PretrainedConfig):
model_type = 'pegasus'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=0, classifier_dropout=0.0, scale_embedding=False, pad_token_id=0, eos_token_id=1, forced_eos_token_id=1, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding
super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs)
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
def hidden_size(self) -> int:
return self.d_model |
class OptionalPackagesTestCase(unittest.TestCase):
def test_exception(self):
ex = OptionalPackageRequirementError('python-magic')
self.assertTrue(str(ex).startswith('The following packages are missing'))
self.assertRaises(ValueError, OptionalPackageRequirementError, 'PackageThatNotFoundInRequirements-optional.txt') |
class Solution():
def __init__(self):
self.total = 0
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
if (not root):
return
if root:
if (R >= root.val >= L):
self.total += root.val
self.rangeSumBST(root.left, L, R)
self.rangeSumBST(root.right, L, R)
return self.total |
def insert_projects_table(file: Path, *, projects: Sequence[Project], input_filename: str, include_info: bool=True):
text = file.read_text()
projects_table = render_projects(projects, include_info=include_info, dest_path=file)
start_str = '<!-- START bin/projects.py -->\n'
start = text.find(start_str)
assert (start != (- 1))
end = text.find('<!-- END bin/projects.py -->\n')
assert (end != (- 1))
generated_note = f"<!-- this section is generated by bin/projects.py. Don't edit it directly, instead, edit {input_filename} -->"
new_text = f'''{text[:(start + len(start_str))]}
{generated_note}
{projects_table}
{text[end:]}'''
file.write_text(new_text) |
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
root = (os.path.abspath(root) + os.sep)
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if (not filename.startswith(root)):
return HTTPError(403, 'Access denied.')
if ((not os.path.exists(filename)) or (not os.path.isfile(filename))):
return HTTPError(404, 'File does not exist.')
if (not os.access(filename, os.R_OK)):
return HTTPError(403, 'You do not have permission to access this file.')
if (mimetype == 'auto'):
(mimetype, encoding) = mimetypes.guess_type(filename)
if encoding:
headers['Content-Encoding'] = encoding
if mimetype:
if ((mimetype[:5] == 'text/') and charset and ('charset' not in mimetype)):
mimetype += ('; charset=%s' % charset)
headers['Content-Type'] = mimetype
if download:
download = os.path.basename((filename if (download == True) else download))
headers['Content-Disposition'] = ('attachment; filename="%s"' % download)
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(';')[0].strip())
if ((ims is not None) and (ims >= int(stats.st_mtime))):
headers['Date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())
return HTTPResponse(status=304, **headers)
body = ('' if (request.method == 'HEAD') else open(filename, 'rb'))
headers['Accept-Ranges'] = 'bytes'
ranges = request.environ.get('HTTP_RANGE')
if ('HTTP_RANGE' in request.environ):
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if (not ranges):
return HTTPError(416, 'Requested Range Not Satisfiable')
(offset, end) = ranges[0]
headers['Content-Range'] = ('bytes %d-%d/%d' % (offset, (end - 1), clen))
headers['Content-Length'] = str((end - offset))
if body:
body = _file_iter_range(body, offset, (end - offset))
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers) |
def cli_main(modify_parser: Optional[Callable[([argparse.ArgumentParser], None)]]=None) -> None:
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
cfg = convert_namespace_to_omegaconf(args)
if distributed_utils.is_master(cfg.distributed_training):
print(args)
if cfg.common.use_plasma_view:
server = PlasmaStore(path=cfg.common.plasma_path)
logger.info(f'Started plasma server pid {server.server.pid} {cfg.common.plasma_path}')
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main) |
class Generator(Object):
seed = Int.T(optional=True, help='Random seed for a reproducible scenario.')
def __init__(self, **kwargs):
Object.__init__(self, **kwargs)
self._seed = None
self._parent = None
self.update_hierarchy()
self._retry_offset = 0
def retry(self):
self.clear()
self._retry_offset += 1
for val in self.T.ivals(self):
if isinstance(val, Generator):
val.retry()
def clear(self):
self._seed = None
def hash(self):
return hashlib.sha1((self.dump() + ('\n\n%i' % self._retry_offset)).encode('utf8')).hexdigest()
def get_seed_offset(self):
return (int(self.hash(), base=16) % N)
def update_hierarchy(self, parent=None):
self._parent = parent
for val in self.T.ivals(self):
if isinstance(val, Generator):
val.update_hierarchy(parent=self)
elif isinstance(val, list):
for el in val:
if isinstance(el, Generator):
el.update_hierarchy(parent=self)
def get_root(self):
if (self._parent is None):
return self
else:
return self._parent.get_root()
def get_seed(self):
if (self._seed is None):
if (self.seed is None):
if (self._parent is not None):
self._seed = self._parent.get_seed()
else:
self._seed = num.random.randint(N)
elif (self.seed == 0):
self._seed = num.random.randint(N)
else:
self._seed = self.seed
return (self._seed + self.get_seed_offset())
def get_rstate(self, i):
return num.random.RandomState(int((self.get_seed() + i)))
def get_center_latlon(self):
return self._parent.get_center_latlon()
def get_radius(self):
return self._parent.get_radius()
def get_stations(self):
return [] |
class _ThreadSafeQueue(Generic[_Type]):
def __init__(self) -> None:
self._loop = get_running_loop()
self._queue: Queue[_Type] = Queue()
self._pending: set[_Type] = set()
def put(self, value: _Type) -> None:
if (value not in self._pending):
self._pending.add(value)
self._loop.call_soon_threadsafe(self._queue.put_nowait, value)
async def get(self) -> _Type:
value = (await self._queue.get())
self._pending.remove(value)
return value |
class TestStrategy(Algo):
count = 0
def on_start(self):
self.count = 0
def on_quote(self, instrument):
pass
def on_orderbook(self, instrument):
pass
def on_fill(self, instrument, order):
pass
def on_tick(self, instrument):
self.count += 1
if ((self.count % 10) != 0):
return
tick = instrument.get_ticks(lookback=1, as_dict=True)
if instrument.positions['position']:
print(instrument.symbol, 'still in position. Exiting...')
instrument.exit()
elif instrument.pending_orders:
print(instrument.symbol, 'has a pending order. Wait...')
else:
direction = random.choice(['BUY', 'SELL'])
print(instrument.symbol, 'not in position. Sending a bracket ', direction, 'order...')
if (direction == 'BUY'):
target = (tick['last'] + 0.5)
stoploss = (tick['last'] - 0.5)
else:
target = (tick['last'] - 0.5)
stoploss = (tick['last'] + 0.5)
instrument.order(direction, 1, limit_price=tick['last'], target=target, initial_stop=stoploss, trail_stop_at=0, trail_stop_by=0, expiry=5)
self.record(take_action=1)
def on_bar(self, instrument):
bar = instrument.get_bars(lookback=1, as_dict=True)
print('BAR:', bar) |
class OutputChangeNotify(rq.Event):
_code = None
_fields = rq.Struct(rq.Card8('type'), rq.Card8('sub_code'), rq.Card16('sequence_number'), rq.Card32('timestamp'), rq.Card32('config_timestamp'), rq.Window('window'), rq.Card32('output'), rq.Card32('crtc'), rq.Card32('mode'), rq.Card16('rotation'), rq.Card8('connection'), rq.Card8('subpixel_order')) |
def debounce(interval_s, keyed_by=None):
def wrapper(func):
timers = {}
lock = threading.Lock()
(func)
def debounced(*args, **kwargs):
sig = inspect.signature(func)
call_args = sig.bind(*args, **kwargs)
key = (call_args.arguments[keyed_by] if keyed_by else None)
def run():
with lock:
del timers[key]
return func(*args, **kwargs)
with lock:
old_timer = timers.get(key)
if old_timer:
old_timer.cancel()
timer = threading.Timer(interval_s, run)
timers[key] = timer
timer.start()
return debounced
return wrapper |
class AutoFeatureExtractorTest(unittest.TestCase):
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def test_processor_from_model_shortcut(self):
processor = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h')
self.assertIsInstance(processor, Wav2Vec2Processor)
def test_processor_from_local_directory_from_repo(self):
with tempfile.TemporaryDirectory() as tmpdirname:
model_config = Wav2Vec2Config()
processor = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h')
model_config.save_pretrained(tmpdirname)
processor.save_pretrained(tmpdirname)
processor = AutoProcessor.from_pretrained(tmpdirname)
self.assertIsInstance(processor, Wav2Vec2Processor)
def test_processor_from_local_directory_from_extractor_config(self):
with tempfile.TemporaryDirectory() as tmpdirname:
copyfile(SAMPLE_PROCESSOR_CONFIG, os.path.join(tmpdirname, FEATURE_EXTRACTOR_NAME))
copyfile(SAMPLE_VOCAB, os.path.join(tmpdirname, 'vocab.json'))
processor = AutoProcessor.from_pretrained(tmpdirname)
self.assertIsInstance(processor, Wav2Vec2Processor)
def test_processor_from_feat_extr_processor_class(self):
with tempfile.TemporaryDirectory() as tmpdirname:
feature_extractor = Wav2Vec2FeatureExtractor()
tokenizer = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h')
processor = Wav2Vec2Processor(feature_extractor, tokenizer)
processor.save_pretrained(tmpdirname)
with open(os.path.join(tmpdirname, TOKENIZER_CONFIG_FILE), 'r') as f:
config_dict = json.load(f)
config_dict.pop('processor_class')
with open(os.path.join(tmpdirname, TOKENIZER_CONFIG_FILE), 'w') as f:
f.write(json.dumps(config_dict))
processor = AutoProcessor.from_pretrained(tmpdirname)
self.assertIsInstance(processor, Wav2Vec2Processor)
def test_processor_from_tokenizer_processor_class(self):
with tempfile.TemporaryDirectory() as tmpdirname:
feature_extractor = Wav2Vec2FeatureExtractor()
tokenizer = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h')
processor = Wav2Vec2Processor(feature_extractor, tokenizer)
processor.save_pretrained(tmpdirname)
with open(os.path.join(tmpdirname, FEATURE_EXTRACTOR_NAME), 'r') as f:
config_dict = json.load(f)
config_dict.pop('processor_class')
with open(os.path.join(tmpdirname, FEATURE_EXTRACTOR_NAME), 'w') as f:
f.write(json.dumps(config_dict))
processor = AutoProcessor.from_pretrained(tmpdirname)
self.assertIsInstance(processor, Wav2Vec2Processor)
def test_processor_from_local_directory_from_model_config(self):
with tempfile.TemporaryDirectory() as tmpdirname:
model_config = Wav2Vec2Config(processor_class='Wav2Vec2Processor')
model_config.save_pretrained(tmpdirname)
copyfile(SAMPLE_VOCAB, os.path.join(tmpdirname, 'vocab.json'))
with open(os.path.join(tmpdirname, FEATURE_EXTRACTOR_NAME), 'w') as f:
f.write('{}')
processor = AutoProcessor.from_pretrained(tmpdirname)
self.assertIsInstance(processor, Wav2Vec2Processor)
def test_from_pretrained_dynamic_processor(self):
processor = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor', trust_remote_code=True)
self.assertTrue(processor.special_attribute_present)
self.assertEqual(processor.__class__.__name__, 'NewProcessor')
feature_extractor = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present)
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor')
tokenizer = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast')
processor = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor', trust_remote_code=True, use_fast=False)
tokenizer = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
def test_new_processor_registration(self):
try:
AutoConfig.register('custom', CustomConfig)
AutoFeatureExtractor.register(CustomConfig, CustomFeatureExtractor)
AutoTokenizer.register(CustomConfig, slow_tokenizer_class=CustomTokenizer)
AutoProcessor.register(CustomConfig, CustomProcessor)
with self.assertRaises(ValueError):
AutoProcessor.register(Wav2Vec2Config, Wav2Vec2Processor)
feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_PROCESSOR_CONFIG_DIR)
with tempfile.TemporaryDirectory() as tmp_dir:
vocab_file = os.path.join(tmp_dir, 'vocab.txt')
with open(vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in self.vocab_tokens]))
tokenizer = CustomTokenizer(vocab_file)
processor = CustomProcessor(feature_extractor, tokenizer)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(tmp_dir)
new_processor = AutoProcessor.from_pretrained(tmp_dir)
self.assertIsInstance(new_processor, CustomProcessor)
finally:
if ('custom' in CONFIG_MAPPING._extra_content):
del CONFIG_MAPPING._extra_content['custom']
if (CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content):
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if (CustomConfig in TOKENIZER_MAPPING._extra_content):
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if (CustomConfig in PROCESSOR_MAPPING._extra_content):
del PROCESSOR_MAPPING._extra_content[CustomConfig] |
def conv2d(inputs, filters, kernel_size, strides, activation, is_training, scope):
with tf.variable_scope(scope):
conv2d_output = tf.layers.conv2d(inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding='same')
batch_norm_output = tf.layers.batch_normalization(conv2d_output, training=is_training, name='batch_norm')
if (activation is not None):
conv2d_output = activation(batch_norm_output)
return conv2d_output |
def crop_img(img, meters_ahead=40, meters_behind=10, meters_left=25, meters_right=25, resolution=0.1):
buffer = (max([meters_ahead, meters_behind, meters_left, meters_right]) * 2)
image_side_length = int((buffer / resolution))
(row_crop, col_crop) = get_crops(meters_ahead, meters_behind, meters_left, meters_right, resolution, image_side_length)
return img[(row_crop, col_crop)].astype('uint8') |
def populate_storage_for_gc():
preferred = storage.preferred_locations[0]
for storage_row in ImageStorage.select():
content = b'hello world'
storage.put_content({preferred}, storage.blob_path(storage_row.content_checksum), content)
assert storage.exists({preferred}, storage.blob_path(storage_row.content_checksum))
(yield) |
def _execute_scenario(feature: Feature, scenario: Scenario, request: FixtureRequest) -> None:
__tracebackhide__ = True
request.config.hook.pytest_bdd_before_scenario(request=request, feature=feature, scenario=scenario)
try:
for step in scenario.steps:
step_func_context = get_step_function(request=request, step=step)
if (step_func_context is None):
exc = exceptions.StepDefinitionNotFoundError(f'Step definition is not found: {step}. Line {step.line_number} in scenario "{scenario.name}" in the feature "{scenario.feature.filename}"')
request.config.hook.pytest_bdd_step_func_lookup_error(request=request, feature=feature, scenario=scenario, step=step, exception=exc)
raise exc
_execute_step_function(request, scenario, step, step_func_context)
finally:
request.config.hook.pytest_bdd_after_scenario(request=request, feature=feature, scenario=scenario) |
def ReinstallProtocolInterface(context, params):
handle = params['Handle']
if (handle not in context.protocols):
return EFI_NOT_FOUND
dic = context.protocols[handle]
protocol = params['Protocol']
if (protocol not in dic):
return EFI_NOT_FOUND
dic[protocol] = params['NewInterface']
return EFI_SUCCESS |
.parametrize('q', [quantize(symmetric=True, initialized=True), quantize(symmetric=False, initialized=True), quantize_dequantize(symmetric=True, initialized=True), quantize_dequantize(symmetric=False, initialized=True)])
def test_forward(q: Union[(Quantize, QuantizeDequantize)], x: torch.Tensor):
output = q(x)
if isinstance(q, Quantize):
expected_output = get_backend().quantize(x, q.get_scale(), q.get_offset(), q.bitwidth)
else:
expected_output = get_backend().quantize_dequantize(x, q.get_scale(), q.get_offset(), q.bitwidth)
assert torch.allclose(output, expected_output) |
def add_precedence(plist):
plevel = 0
error = 0
for p in plist:
plevel += 1
try:
prec = p[0]
terms = p[1:]
if ((prec != 'left') and (prec != 'right') and (prec != 'nonassoc')):
sys.stderr.write(("yacc: Invalid precedence '%s'\n" % prec))
return (- 1)
for t in terms:
if Precedence.has_key(t):
sys.stderr.write(("yacc: Precedence already specified for terminal '%s'\n" % t))
error += 1
continue
Precedence[t] = (prec, plevel)
except:
sys.stderr.write('yacc: Invalid precedence table.\n')
error += 1
return error |
class MultivariateNormalLikelihood(GaussianLikelihood):
def __init__(self, num_train: int, rank: int=1, batch_shape=torch.Size(), noise_covar_prior=None, noise_prior=None, noise_constraint=None):
Likelihood.__init__(self)
self.num_train = num_train
self.register_parameter(name='noise_covar_factor', parameter=torch.nn.Parameter(torch.randn(*batch_shape, num_train, rank)))
if (noise_covar_prior is not None):
self.register_prior('ErrorCovariancePrior', noise_covar_prior, (lambda m: m._eval_covar_matrix))
self.register_parameter(name='raw_noise', parameter=torch.nn.Parameter(torch.zeros(*batch_shape, num_train)))
if (noise_constraint is None):
noise_constraint = Positive()
self.register_constraint('raw_noise', noise_constraint)
if (noise_prior is not None):
self.register_prior('raw_noise_prior', noise_prior, (lambda m: m.noise))
def noise(self):
return self.raw_noise_constraint.transform(self.raw_noise)
def _set_noise(self, value: torch.Tensor) -> None:
if (not torch.is_tensor(value)):
value = torch.as_tensor(value).to(self.raw_noise)
self.initialize(raw_noise=self.raw_noise_constraint.inverse_transform(value))
def noise(self, value):
self._set_noise(value)
def noise_covar(self):
if (self.rank > 0):
return self.noise_covar_factor.matmul(self.task_noise_covar_factor.transpose((- 1), (- 2)))
else:
raise AttributeError('Cannot retrieve task noises when covariance is diagonal.')
_covar.setter
def noise_covar(self, value):
if (self.rank > 0):
self.noise_covar_factor.data = pivoted_cholesky(value, max_iter=self.rank)
else:
raise AttributeError('Cannot set non-diagonal task noises when covariance is diagonal.')
def _eval_covar_matrix(self):
covar_factor = self.noise_covar_factor
noise = self.noise.unsqueeze((- 1))
D = (noise * torch.eye(self.num_train, dtype=noise.dtype, device=noise.device))
return (covar_factor.matmul(covar_factor.transpose((- 1), (- 2))) + D)
def marginal(self, function_dist, *params, **kwargs):
(mean, covar) = (function_dist.mean, function_dist.lazy_covariance_matrix)
if (self.training and (mean.shape[(- 1)] == self.num_train)):
covar = (covar + self._eval_covar_matrix())
elif (covar.shape[:(- 1)] == self.noise.shape):
covar = covar.add_diag(self.noise)
elif (covar.shape[:(- 1)] == self.noise.shape[1:]):
covar = (covar.evaluate() + torch.diag_embed(self.noise))
else:
covar = covar.add_diag(self.noise[(..., 0)].unsqueeze((- 1)))
return function_dist.__class__(mean, covar)
def forward(self, function_samples, *params, **kwargs):
if (self.training and (function_samples.shape[(- 1)] == self.num_train)):
return MultivariateNormal(function_samples, self._eval_covar_matrix())
else:
noise = self.noise.view(*self.noise.shape[:(- 1)], *function_samples.shape[(- 2):])
return base_distributions.Independent(base_distributions.Normal(function_samples, noise.sqrt()), 1) |
class Data(aslib.Data):
dmesg = {}
start = 0.0
end = 0.0
dmesgtext = []
testnumber = 0
idstr = ''
html_device_id = 0
valid = False
tUserMode = 0.0
boottime = ''
phases = ['kernel', 'user']
do_one_initcall = False
def __init__(self, num):
self.testnumber = num
self.idstr = 'a'
self.dmesgtext = []
self.dmesg = {'kernel': {'list': dict(), 'start': (- 1.0), 'end': (- 1.0), 'row': 0, 'order': 0, 'color': 'linear-gradient(to bottom, #fff, #bcf)'}, 'user': {'list': dict(), 'start': (- 1.0), 'end': (- 1.0), 'row': 0, 'order': 1, 'color': '#fff'}}
def deviceTopology(self):
return ''
def newAction(self, phase, name, pid, start, end, ret, ulen):
self.html_device_id += 1
devid = ('%s%d' % (self.idstr, self.html_device_id))
list = self.dmesg[phase]['list']
length = (- 1.0)
if ((start >= 0) and (end >= 0)):
length = (end - start)
i = 2
origname = name
while (name in list):
name = ('%s[%d]' % (origname, i))
i += 1
list[name] = {'name': name, 'start': start, 'end': end, 'pid': pid, 'length': length, 'row': 0, 'id': devid, 'ret': ret, 'ulen': ulen}
return name
def deviceMatch(self, pid, cg):
if ((cg.end - cg.start) == 0):
return ''
for p in data.phases:
list = self.dmesg[p]['list']
for devname in list:
dev = list[devname]
if (pid != dev['pid']):
continue
if (cg.name == 'do_one_initcall'):
if ((cg.start <= dev['start']) and (cg.end >= dev['end']) and (dev['length'] > 0)):
dev['ftrace'] = cg
self.do_one_initcall = True
return devname
elif ((cg.start > dev['start']) and (cg.end < dev['end'])):
if ('ftraces' not in dev):
dev['ftraces'] = []
dev['ftraces'].append(cg)
return devname
return ''
def printDetails(self):
sysvals.vprint('Timeline Details:')
sysvals.vprint((' Host: %s' % sysvals.hostname))
sysvals.vprint((' Kernel: %s' % sysvals.kernel))
sysvals.vprint((' Test time: %s' % sysvals.testtime))
sysvals.vprint((' Boot time: %s' % self.boottime))
for phase in self.phases:
dc = len(self.dmesg[phase]['list'])
sysvals.vprint(('%9s mode: %.3f - %.3f (%d initcalls)' % (phase, (self.dmesg[phase]['start'] * 1000), (self.dmesg[phase]['end'] * 1000), dc))) |
def evaluate(preds, golds, entity_path):
print('STARTING EVALUATION')
(acc, total) = (0, 0)
domain2kvr_name_domain = {'all': 'ent_index', 'calendar': 'ent_idx_cal', 'navigate': 'ent_idx_nav', 'weather': 'ent_idx_wet'}
F1_pred = {'all': 0, 'calendar': 0, 'navigate': 0, 'weather': 0}
F1_count = {'all': 0, 'calendar': 0, 'navigate': 0, 'weather': 0}
TP = {'all': 0, 'calendar': 0, 'navigate': 0, 'weather': 0}
FP = {'all': 0, 'calendar': 0, 'navigate': 0, 'weather': 0}
FN = {'all': 0, 'calendar': 0, 'navigate': 0, 'weather': 0}
with open(entity_path) as f:
global_entity = json.load(f)
global_entity_type = {}
global_entity_list = []
for key in global_entity.keys():
if (key != 'poi'):
entity_arr = [item.lower().replace(' ', '_') for item in global_entity[key]]
global_entity_list += entity_arr
for entity in entity_arr:
global_entity_type[entity] = key
else:
for item in global_entity['poi']:
entity_arr = [item[k].lower().replace(' ', '_') for k in item.keys()]
global_entity_list += entity_arr
for key in item:
global_entity_type[item[key].lower().replace(' ', '_')] = key
global_entity_list = list(set(global_entity_list))
for (pred, gold) in zip(preds, golds):
pred_sent = pred.lstrip().rstrip()
gold_sent = gold['response'].lstrip().rstrip()
for (domain, kvr_name) in domain2kvr_name_domain.items():
(single_tp, single_fp, single_fn, single_f1, count) = compute_prf(gold[kvr_name], pred_sent.split(), global_entity_list, gold['kb_arr'])
F1_pred[domain] += single_f1
F1_count[domain] += count
TP[domain] += single_tp
FP[domain] += single_fp
FN[domain] += single_fn
total += 1
if (gold_sent == pred_sent):
acc += 1
acc_score = (acc / float(total))
summary = {}
for domain in domain2kvr_name_domain.keys():
summary['{}_F1_macro'.format(domain)] = (F1_pred[domain] / float(F1_count[domain]))
P_score = {}
R_score = {}
for domain in domain2kvr_name_domain.keys():
P_score[domain] = ((TP[domain] / float((TP[domain] + FP[domain]))) if ((TP[domain] + FP[domain]) != 0) else 0)
R_score[domain] = ((TP[domain] / float((TP[domain] + FN[domain]))) if ((TP[domain] + FN[domain]) != 0) else 0)
summary['{}_F1_micro'.format(domain)] = compute_F1(P_score[domain], R_score[domain])
return summary |
def test_lookup__doesnt_exist(requests_mock):
requests_mock.get(f'{API_V1}/controlled_terms', json=SAMPLE_DATA['get_controlled_terms'], status_code=200)
client = iNatClient()
annotations = [Annotation(controlled_attribute_id=id) for id in [12, 999]]
annotations = client.annotations.lookup(annotations)
assert (len(annotations) == 2)
assert (annotations[0].term == 'Plant Phenology')
assert (annotations[1].term == '999') |
class Rectangles(rq.Request):
_request = rq.Struct(rq.Card8('opcode'), rq.Opcode(1), rq.RequestLength(), OP('operation'), KIND('destination_kind'), rq.Card8('ordering'), rq.Pad(1), rq.Window('destination_window'), rq.Int16('x_offset'), rq.Int16('y_offset'), rq.List('rectangles', structs.Rectangle, pad=0)) |
class Task2DatasetConCat(BaseDataset):
def __getitem__(self, index) -> Tuple:
(query_id, idx) = self.samples[index]
product_id = self.database[self.split_dataset][query_id]['product_id'][idx]
example_id = self.database[self.split_dataset][query_id]['example_id'][idx]
dataset = torch.tensor([self.database[self.split_dataset][query_id]['dataset'][idx]], dtype=torch.long)[None]
esci_label = torch.tensor([self.database[self.split_dataset][query_id]['esci_label'][idx]], dtype=torch.long)
query_encode = _process_encoding(self.database[self.split_dataset][query_id]['query'], encode_map=self.cfg.model.encode)
input_ids = [query_encode]
input_ids_pos = [1]
for name in self.used_col:
if (name == 'product_id'):
input_ids.append(_process_encoding(product_id, self.cfg.model.encode, name, self.token_map))
else:
arr = self.database['product_catalogue'][product_id][name]
input_ids.append(_process_encoding(arr, self.cfg.model.encode, name, self.token_map))
input_ids_pos.append(sum((len(x) for x in input_ids[:(- 1)])))
input_ids = torch.cat(input_ids)
for i in range(len(input_ids_pos)):
if (input_ids_pos[i] >= self.max_length):
if (input_ids_pos[(- 2)] < self.max_length):
input_ids_pos[i] = input_ids_pos[(- 2)]
elif (input_ids_pos[1] < self.max_length):
input_ids_pos[i] = input_ids_pos[1]
else:
input_ids_pos[i] = (self.max_length - 1)
input_ids_pos = torch.tensor(input_ids_pos, dtype=torch.long)[None]
if (len(input_ids) > self.max_length):
tail = input_ids[(- 1)]
input_ids = input_ids[:self.max_length]
input_ids[(- 1)] = tail
token_type_ids = torch.zeros_like(input_ids)
attention_mask = torch.ones_like(input_ids)
feature = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask, 'speical_token_pos': input_ids_pos, 'extra': dataset}
meta = {'product_id': product_id, 'query_id': query_id, 'example_id': example_id, 'pad_token_id': self.cfg.model.pad_token_id, 'sample_length': self.sample_length[query_id]}
return (feature, esci_label, meta)
def collate_fn(batch: List) -> dict:
features = {}
pad_token_id = batch[0][2]['pad_token_id']
features['input_ids'] = pad_sequence([x[0]['input_ids'] for x in batch], batch_first=True, padding_value=pad_token_id)
features['token_type_ids'] = pad_sequence([x[0]['token_type_ids'] for x in batch], batch_first=True)
features['attention_mask'] = pad_sequence([x[0]['attention_mask'] for x in batch], batch_first=True)
features['speical_token_pos'] = torch.cat([x[0]['speical_token_pos'] for x in batch])
features['extra'] = torch.cat([x[0]['extra'] for x in batch])
label = torch.cat([x[1] for x in batch])
meta = {}
meta['product_id'] = [x[2]['product_id'] for x in batch]
meta['example_id'] = [x[2]['example_id'] for x in batch]
meta['query_id'] = [x[2]['query_id'] for x in batch]
meta['sample_length'] = torch.tensor([x[2]['sample_length'] for x in batch], dtype=torch.float)
output = {'features': features, 'label': label, 'meta': meta}
if (len(batch[0]) == 4):
output['kd'] = torch.stack([x[3] for x in batch])
return output |
def test_multiple_variables_merge_override(testdir, file_format):
testdir.makepyfile("\n def test(variables):\n assert variables['capabilities']['browser'] == 'Firefox'\n assert variables['capabilities']['browser_version'] == '53.0'\n assert variables['capabilities']['debug'] == 'true'\n ")
result = run(testdir, file_format, variables=[{'capabilities': {'browser': 'Firefox', 'browser_version': '53.0'}}, {'capabilities': {'debug': 'true'}}])
assert (result.ret == 0) |
class RCAB(nn.Module):
def __init__(self, conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(RCAB, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn:
modules_body.append(nn.BatchNorm2d(n_feat))
if (i == 0):
modules_body.append(act)
modules_body.append(CALayer(n_feat, reduction))
self.body = nn.Sequential(*modules_body)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x)
res += x
return res |
class POS(TokenClassificationTask):
def read_examples_from_file(self, data_dir, mode: Union[(Split, str)]) -> List[InputExample]:
if isinstance(mode, Split):
mode = mode.value
file_path = os.path.join(data_dir, f'{mode}.txt')
guid_index = 1
examples = []
with open(file_path, encoding='utf-8') as f:
for sentence in parse_incr(f):
words = []
labels = []
for token in sentence:
words.append(token['form'])
labels.append(token['upos'])
assert (len(words) == len(labels))
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}', words=words, labels=labels))
guid_index += 1
return examples
def write_predictions_to_file(self, writer: TextIO, test_input_reader: TextIO, preds_list: List):
example_id = 0
for sentence in parse_incr(test_input_reader):
s_p = preds_list[example_id]
out = ''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0)}) "
out += '\n'
writer.write(out)
example_id += 1
def get_labels(self, path: str) -> List[str]:
if path:
with open(path, 'r') as f:
return f.read().splitlines()
else:
return ['ADJ', 'ADP', 'ADV', 'AUX', 'CCONJ', 'DET', 'INTJ', 'NOUN', 'NUM', 'PART', 'PRON', 'PROPN', 'PUNCT', 'SCONJ', 'SYM', 'VERB', 'X'] |
class BundlerManager():
def __init__(self) -> None:
from poetry_plugin_bundle.bundlers.venv_bundler import VenvBundler
self._bundler_classes: dict[(str, type[Bundler])] = {}
self.register_bundler_class(VenvBundler)
def bundler(self, name: str) -> Bundler:
if (name.lower() not in self._bundler_classes):
raise BundlerManagerError(f'The bundler class "{name}" does not exist.')
return self._bundler_classes[name.lower()]()
def register_bundler_class(self, bundler_class: type[Bundler]) -> BundlerManager:
if (not bundler_class.name):
raise BundlerManagerError('A bundler class must have a name')
if (bundler_class.name.lower() in self._bundler_classes):
raise BundlerManagerError(f'A bundler class with the name "{bundler_class.name}" already exists.')
self._bundler_classes[bundler_class.name.lower()] = bundler_class
return self |
def remote_sync(local_dir, remote_dir, protocol):
logging.info('Starting remote sync.')
if (protocol == 's3'):
return remote_sync_s3(local_dir, remote_dir)
elif (protocol == 'fsspec'):
return remote_sync_fsspec(local_dir, remote_dir)
else:
logging.error('Remote protocol not known')
return False |
def parse_args():
parser = argparse.ArgumentParser(description='Initialize PASCAL Context dataset.', epilog='Example: python prepare_pcontext.py', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', default=None, help='dataset directory on disk')
args = parser.parse_args()
return args |
def processForClause(c, table, prior_lcs, prior_globs):
new_schema = None
comp_expr = compile(c.expr.lstrip(), '<string>', 'eval')
for t in table:
if (not new_schema):
new_schema = dict(t.schema)
for (i, v) in enumerate(c.vars):
new_schema[v] = (len(t.schema) + i)
lcs = dict(prior_lcs)
lcs.update(t.getDict())
vals = eval(comp_expr, prior_globs, lcs)
if (len(c.vars) == 1):
for v in vals:
new_t_data = list(t.tuple)
new_t_data.append(v)
new_t = PQTuple(new_t_data, new_schema)
(yield new_t)
else:
for v in vals:
unpack_expr = ('[ %s for %s in [ __v ]]' % ((('(' + ','.join(c.vars)) + ')'), c.unpack))
unpacked_vals = eval(unpack_expr, prior_globs, {'__v': v})
new_t_data = list(t.tuple)
for tv in unpacked_vals[0]:
new_t_data.append(tv)
new_t = PQTuple(new_t_data, new_schema)
(yield new_t) |
def get_files_from_regex(path):
directory_name = dirname(path)
if (directory_name == ''):
directory_name = '.'
regex = basename(path)
file_names = []
pattern = compile(translate(regex), IGNORECASE)
for file in os.listdir(directory_name):
if pattern.fullmatch(file):
file_names.append(join(directory_name, file))
return file_names |
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = (attn / self.temperature)
if (mask is not None):
attn = attn.masked_fill(mask, (- np.inf))
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return (output, attn) |
def flow_model(args, in_channels):
coder = Ff.SequenceINN(in_channels)
print('Normalizing Flow => Feature Dimension: ', in_channels)
for k in range(args.coupling_layers):
coder.append(Fm.AllInOneBlock, subnet_constructor=subnet_fc, affine_clamping=args.clamp_alpha, global_affine_type='SOFTPLUS', permute_soft=True)
return coder |
def test_return_value_consistency():
pid_mem_list = memory_usage(timeout=1)
assert (type(pid_mem_list) == list), 'Memory usage of process should be a list'
pid_mem_max = memory_usage(timeout=1, max_usage=True)
assert (type(pid_mem_max) == float), 'Max memory usage of process should be a number'
func_mem_list = memory_usage((some_func, (42,), dict(a=42)))
assert (type(func_mem_list) == list), 'Memory usage of callable should be a list'
func_mem_max = memory_usage((some_func, (42,), dict(a=42)), max_usage=True)
assert (type(func_mem_max) == float), 'Max memory usage of callable should be a number' |
def test_run_model_from_poa(sapm_dc_snl_ac_system, location, total_irrad):
mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss')
ac = mc.run_model_from_poa(total_irrad).results.ac
expected = pd.Series(np.array([149.280238, 96.678385]), index=total_irrad.index)
assert_series_equal(ac, expected) |
def set_datapipes_seed(datapipes: List[DataPipe], seed_generator: SeedGenerator, distributed_shared: bool) -> None:
for dp in datapipes:
if _is_random_datapipe(dp):
if distributed_shared:
dp.set_seed(seed_generator.generate_shared_seed())
else:
dp.set_seed(seed_generator.generate_seed()) |
def copy_data(input_file, destination_dir, num_threads, tmp_destination_dir):
logging.info(f'Creating directory: {destination_dir}')
if (not ((destination_dir is None) or (destination_dir == ''))):
makedir(destination_dir)
else:
destination_dir = None
if PathManager.isfile(input_file):
(output_file, output_dir) = copy_file(input_file, destination_dir, tmp_destination_dir)
elif PathManager.isdir(input_file):
(output_file, output_dir) = copy_dir(input_file, destination_dir, num_threads)
else:
raise RuntimeError('The input_file is neither a file nor a directory')
return (output_file, output_dir) |
def _process_target_sentence(tokens: List[str], origin_sentence: str, target_sentence: str, max_length: int, label_map: dict, tokenizer: BertTokenizer, cls_token_at_end: Optional[bool]=False):
if ('[UNK]' in tokens):
processed_tokens = []
basic_tokens = tokenizer.basic_tokenizer.tokenize(origin_sentence)
for basic_token in basic_tokens:
current_tokens = tokenizer.tokenize(basic_token)
if ('[UNK]' in current_tokens):
processed_tokens.append(basic_token)
else:
processed_tokens.extend(current_tokens)
else:
processed_tokens = tokens
(prefix_sum_of_token_start_index, sum) = ([0], 0)
for (i, token) in enumerate(processed_tokens):
if token.startswith('##'):
sum += (len(token) - 2)
else:
sum += len(token)
prefix_sum_of_token_start_index.append(sum)
regex_ner = re.compile('<(.+?):[A-Z]{3}>')
regex_filter_res = regex_ner.finditer(target_sentence.replace(' ', ''))
list_of_ner_tag = []
list_of_ner_text = []
list_of_tuple_ner_start_end = []
count_of_match = 0
for match_item in regex_filter_res:
ner_tag = match_item[0][(- 4):(- 1)]
ner_text = match_item[1]
start_index = (match_item.start() - (6 * count_of_match))
end_index = ((match_item.end() - 6) - (6 * count_of_match))
list_of_ner_tag.append(ner_tag)
list_of_ner_text.append(ner_text)
list_of_tuple_ner_start_end.append((start_index, end_index))
count_of_match += 1
label_sequence = []
entity_index = 0
is_entity_still_B = True
for tup in zip(processed_tokens, prefix_sum_of_token_start_index):
(token, index) = tup
if (entity_index < len(list_of_tuple_ner_start_end)):
(start, end) = list_of_tuple_ner_start_end[entity_index]
if (end < index):
is_entity_still_B = True
entity_index = ((entity_index + 1) if ((entity_index + 1) < len(list_of_tuple_ner_start_end)) else entity_index)
(start, end) = list_of_tuple_ner_start_end[entity_index]
if ((start <= index) and (index < end)):
entity_tag = list_of_ner_tag[entity_index]
if (is_entity_still_B is True):
entity_tag = ('B-' + entity_tag)
label_sequence.append(entity_tag)
is_entity_still_B = False
else:
entity_tag = ('I-' + entity_tag)
label_sequence.append(entity_tag)
else:
is_entity_still_B = True
entity_tag = 'O'
label_sequence.append(entity_tag)
else:
entity_tag = 'O'
label_sequence.append(entity_tag)
label_sequence = label_sequence[:(max_length - 2)]
if cls_token_at_end:
label_sequence = (label_sequence + [NER_CLS_TOKEN, NER_SEP_TOKEN])
else:
label_sequence = (([NER_CLS_TOKEN] + label_sequence) + [NER_SEP_TOKEN])
pad_length = max((max_length - len(label_sequence)), 0)
pad_sequence = ([NER_PAD_TOKEN] * pad_length)
label_sequence += pad_sequence
label_ids = [label_map[label] for label in label_sequence]
return label_ids |
def derivatives_in_prolate_spheroidal_coordinates():
a = symbols('a', real=True)
coords = (xi, eta, phi) = symbols('xi eta phi', real=True)
(ps3d, er, eth, ephi) = Ga.build('e_xi e_eta e_phi', X=[(((a * sinh(xi)) * sin(eta)) * cos(phi)), (((a * sinh(xi)) * sin(eta)) * sin(phi)), ((a * cosh(xi)) * cos(eta))], coords=coords, norm=True)
grad = ps3d.grad
f = ps3d.mv('f', 'scalar', f=True)
A = ps3d.mv('A', 'vector', f=True)
B = ps3d.mv('B', 'bivector', f=True)
print('#Derivatives in Prolate Spheroidal Coordinates')
print('f =', f)
print('A =', A)
print('B =', B)
print('grad*f =', (grad * f))
print('grad|A =', (grad | A))
((- ps3d.i) * (grad ^ A)).Fmt(3, '-I*(grad^A)')
(grad ^ B).Fmt(3, 'grad^B')
return |
class Solution():
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
letters = dict()
for i in magazine:
letters[i] = (letters.get(i, 0) + 1)
for letter in ransomNote:
if (letter in letters):
if (letters[letter] <= 0):
return False
letters[letter] -= 1
else:
return False
return True |
class webvision_dataloader():
def __init__(self, batch_size, num_class, num_workers, root_dir, log):
self.batch_size = batch_size
self.num_class = num_class
self.num_workers = num_workers
self.root_dir = root_dir
self.log = log
self.transform_train = transforms.Compose([transforms.Resize(320), transforms.RandomResizedCrop(299), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
self.transform_test = transforms.Compose([transforms.Resize(320), transforms.CenterCrop(299), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
self.transform_imagenet = transforms.Compose([transforms.Resize(320), transforms.CenterCrop(299), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
def run(self, mode, pred=[], prob=[]):
if (mode == 'warmup'):
all_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode='all', num_class=self.num_class)
trainloader = DataLoader(dataset=all_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
return trainloader
elif (mode == 'test'):
test_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_test, mode='test', num_class=self.num_class)
test_loader = DataLoader(dataset=test_dataset, batch_size=(self.batch_size * 20), shuffle=False, num_workers=self.num_workers, pin_memory=True)
return test_loader
elif (mode == 'imagenet'):
imagenet_val = imagenet_dataset(root_dir=self.root_dir, transform=self.transform_imagenet, num_class=self.num_class)
imagenet_loader = DataLoader(dataset=imagenet_val, batch_size=(self.batch_size * 20), shuffle=False, num_workers=self.num_workers, pin_memory=True)
return imagenet_loader |
def sbml_translator(input_file, output_file=None, convention_file=None, naming_conventions=None, user_structures=None, molecule_id=False, atomize=False, pathway_commons=False, verbose=False):
logger = get_logger(__name__, log_level=verbose)
sbmltrans_bin = pf.get_path('atomizer')
sbmltrans_args = [sbmltrans_bin, '-i', input_file]
if (output_file is None):
output_file = (os.path.splitext(input_file)[0] + '.bngl')
sbmltrans_args.extend(['-o', output_file])
if convention_file:
sbmltrans_args.extend(['-c', convention_file])
if naming_conventions:
sbmltrans_args.extend(['-n', naming_conventions])
if user_structures:
sbmltrans_args.extend(['-u', user_structures])
if molecule_id:
sbmltrans_args.append('-id')
if atomize:
sbmltrans_args.append('-a')
if pathway_commons:
sbmltrans_args.append('-p')
logger.debug(('sbmlTranslator command: ' + ' '.join(sbmltrans_args)))
p = subprocess.Popen(sbmltrans_args, cwd=os.getcwd(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if (logger.getEffectiveLevel() <= EXTENDED_DEBUG):
output = '\n'.join([line for line in iter(p.stdout.readline, b'')])
if output:
logger.log(EXTENDED_DEBUG, ('sbmlTranslator output:\n\n' + output))
(p_out, p_err) = p.communicate()
if p.returncode:
raise SbmlTranslationError(((p_out.decode('utf-8') + '\n') + p_err.decode('utf-8')))
return output_file |
def DecodeBase58Check(psz: Union[(bytes, str)]) -> bytes:
vchRet = base_decode(psz, base=58)
payload = vchRet[0:(- 4)]
csum_found = vchRet[(- 4):]
csum_calculated = sha256d(payload)[0:4]
if (csum_calculated != csum_found):
raise InvalidChecksum(f'calculated {csum_calculated.hex()}, found {csum_found.hex()}')
else:
return payload |
_tf
class TFXLMModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = ((TFXLMModel, TFXLMWithLMHeadModel, TFXLMForSequenceClassification, TFXLMForQuestionAnsweringSimple, TFXLMForTokenClassification, TFXLMForMultipleChoice) if is_tf_available() else ())
all_generative_model_classes = ((TFXLMWithLMHeadModel,) if is_tf_available() else ())
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFXLMModelTester(self)
self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_xlm_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*config_and_inputs)
def test_xlm_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*config_and_inputs)
def test_xlm_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*config_and_inputs)
def test_xlm_sequence_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*config_and_inputs)
def test_model_from_pretrained(self):
for model_name in TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFXLMModel.from_pretrained(model_name)
self.assertIsNotNone(model) |
def test_tc_bit_defers():
zc = Zeroconf(interfaces=['127.0.0.1'])
_wait_for_start(zc)
type_ = '_tcbitdefer._tcp.local.'
name = 'knownname'
name2 = 'knownname2'
name3 = 'knownname3'
registration_name = f'{name}.{type_}'
registration2_name = f'{name2}.{type_}'
registration3_name = f'{name3}.{type_}'
desc = {'path': '/~paulsm/'}
server_name = 'ash-2.local.'
server_name2 = 'ash-3.local.'
server_name3 = 'ash-4.local.'
info = r.ServiceInfo(type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton('10.0.1.2')])
info2 = r.ServiceInfo(type_, registration2_name, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton('10.0.1.2')])
info3 = r.ServiceInfo(type_, registration3_name, 80, 0, 0, desc, server_name3, addresses=[socket.inet_aton('10.0.1.2')])
zc.registry.async_add(info)
zc.registry.async_add(info2)
zc.registry.async_add(info3)
protocol = zc.engine.protocols[0]
now = r.current_time_millis()
_clear_cache(zc)
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
generated.add_question(question)
for _ in range(300):
generated.add_answer_at_time(info.dns_pointer(), now)
generated.add_answer_at_time(info2.dns_pointer(), now)
generated.add_answer_at_time(info3.dns_pointer(), now)
packets = generated.packets()
assert (len(packets) == 4)
expected_deferred = []
source_ip = '203.0.113.13'
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
assert (protocol._deferred[source_ip] == expected_deferred)
assert (source_ip in protocol._timers)
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
assert (protocol._deferred[source_ip] == expected_deferred)
assert (source_ip in protocol._timers)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
assert (protocol._deferred[source_ip] == expected_deferred)
assert (source_ip in protocol._timers)
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
assert (protocol._deferred[source_ip] == expected_deferred)
assert (source_ip in protocol._timers)
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
assert (source_ip not in protocol._deferred)
assert (source_ip not in protocol._timers)
zc.unregister_service(info)
zc.close() |
def test_text_battery_charging(monkeypatch):
loaded_bat = BatteryStatus(state=BatteryState.CHARGING, percent=0.5, power=15.0, time=1729)
with monkeypatch.context() as manager:
manager.setattr(battery, 'load_battery', dummy_load_battery(loaded_bat))
batt = Battery()
text = batt.poll()
assert (text == '^ 50% 0:28 15.00 W') |
def get_display_opts(options, argv=sys.argv):
from Xlib import display, Xatom
import os
name = os.path.splitext(os.path.basename(argv[0]))[0]
optdb = ResourceDB()
leftargv = optdb.getopt(name, argv[1:], options)
dname = optdb.get((name + '.display'), (name + '.Display'), None)
d = display.Display(dname)
rdbstring = d.screen(0).root.get_full_property(Xatom.RESOURCE_MANAGER, Xatom.STRING)
if rdbstring:
data = rdbstring.value
else:
data = None
db = ResourceDB(string=data)
db.update(optdb)
return (d, name, db, leftargv) |
def get_compiled_3_regular_maxcut_circuit(problem: ThreeRegularProblem, device: cirq.Device, gammas: Sequence[float], betas: Sequence[float]) -> Tuple[(List[cirq.Qid], cirq.Circuit, List[cirq.Qid])]:
(initial_qubits, circuit, final_qubits) = get_routed_3_regular_maxcut_circuit(problem_graph=problem.graph, device=device, gammas=gammas, betas=betas)
circuit.append(cirq.measure(*final_qubits, key='z'))
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sycamore')
return (initial_qubits, circuit, final_qubits) |
class F27_Url(F18_Url):
removedKeywords = F18_Url.removedKeywords
removedAttrs = F18_Url.removedAttrs
def __init__(self, *args, **kwargs):
F18_Url.__init__(self, *args, **kwargs)
self.metalink = kwargs.get('metalink', None)
self.exclusive_required_options.append(('metalink', '--metalink'))
def __eq__(self, other):
if (not F18_Url.__eq__(self, other)):
return False
return (self.metalink == other.metalink)
def __str__(self):
retval = KickstartCommand.__str__(self)
if (not self.seen):
return retval
retval += '# Use network installation\n'
if self.url:
retval += ('url --url="%s"' % self.url)
elif self.mirrorlist:
retval += ('url --mirrorlist="%s"' % self.mirrorlist)
elif self.metalink:
retval += ('url --metalink="%s"' % self.metalink)
if self.proxy:
retval += (' --proxy="%s"' % self.proxy)
if self.noverifyssl:
retval += ' --noverifyssl'
return (retval + '\n')
def _getParser(self):
op = F18_Url._getParser(self)
op.add_argument('--url', version=F27, help='\n Only one of the --url, --mirrorlist or --metalink can\n be specified.')
op.add_argument('--metalink', metavar='URL', version=F27, help='\n The metalink URL to install from. Variable substitution\n is done for $releasever and $basearch in the url.')
return op |
class RetinaNetLossComputation(RPNLossComputation):
def __init__(self, proposal_matcher, box_coder, generate_labels_func, sigmoid_focal_loss, bbox_reg_beta=0.11, regress_norm=1.0):
self.proposal_matcher = proposal_matcher
self.box_coder = box_coder
self.box_cls_loss_func = sigmoid_focal_loss
self.bbox_reg_beta = bbox_reg_beta
self.copied_fields = ['labels']
self.generate_labels_func = generate_labels_func
self.discard_cases = ['between_thresholds']
self.regress_norm = regress_norm
def __call__(self, anchors, box_cls, box_regression, targets):
anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]
(labels, regression_targets) = self.prepare_targets(anchors, targets)
N = len(labels)
(box_cls, box_regression) = concat_box_prediction_layers(box_cls, box_regression)
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
pos_inds = torch.nonzero((labels > 0)).squeeze(1)
retinanet_regression_loss = (smooth_l1_loss(box_regression[pos_inds], regression_targets[pos_inds], beta=self.bbox_reg_beta, size_average=False) / max(1, (pos_inds.numel() * self.regress_norm)))
labels = labels.int()
retinanet_cls_loss = (self.box_cls_loss_func(box_cls, labels) / (pos_inds.numel() + N))
return (retinanet_cls_loss, retinanet_regression_loss) |
def ex_config():
num_epochs = 20
patience = 100
batch_size = 32
latent_dim = 64
som_dim = [8, 8]
learning_rate = 0.0005
alpha = 1.0
beta = 0.9
gamma = 1.8
tau = 1.4
decay_factor = 0.9
name = ex.get_experiment_info()['name']
ex_name = '{}_{}_{}-{}_{}_{}'.format(name, latent_dim, som_dim[0], som_dim[1], str(date.today()), uuid.uuid4().hex[:5])
logdir = '../logs/{}'.format(ex_name)
modelpath = '../models/{}/{}.ckpt'.format(ex_name, ex_name)
interactive = True
data_set = 'MNIST_data'
save_model = False
time_series = True
mnist = True |
def duration(entry, option_key='Duration', **kwargs):
time_string = entry.split(' ')
seconds = 0
minutes = 0
hours = 0
days = 0
weeks = 0
for interval in time_string:
if _re.match('^[\\d]+s$', interval.lower()):
seconds = (+ int(interval.lower().rstrip('s')))
elif _re.match('^[\\d]+m$', interval):
minutes = (+ int(interval.lower().rstrip('m')))
elif _re.match('^[\\d]+h$', interval):
hours = (+ int(interval.lower().rstrip('h')))
elif _re.match('^[\\d]+d$', interval):
days = (+ int(interval.lower().rstrip('d')))
elif _re.match('^[\\d]+w$', interval):
weeks = (+ int(interval.lower().rstrip('w')))
elif _re.match('^[\\d]+y$', interval):
days = ((+ int(interval.lower().rstrip('y'))) * 365)
else:
raise ValueError(f"Could not convert section '{interval}' to a {option_key}.")
return _dt.timedelta(days, seconds, 0, 0, minutes, hours, weeks) |
def test_main_no_spec(capsys: pytest.CaptureFixture[str]) -> None:
with pytest.raises(SystemExit) as excinfo:
find_extra_reqs.main(arguments=[])
expected_code = 2
assert (excinfo.value.code == expected_code)
err = capsys.readouterr().err
assert err.endswith('error: no source files or directories specified\n') |
class DiamondShifted(unittest.TestCase):
def setUpClass(cls):
cell = gto.Cell()
cell.verbose = 4
cell.output = '/dev/null'
cell.atom = 'C 0 0 0; C 0. 0. 0.'
cell.a = '\n 1. 1. 0.\n 0. 1. 1.\n 1. 0. 1.\n '
cell.pseudo = 'gth-hf-rev'
cell.basis = {'C': [[0, (0.8, 1.0)], [1, (1.0, 1.0)]]}
cell.precision = 1e-10
cell.build()
kpt = np.asarray([0.3721, 0.2077, 0.1415])
mf = scf.RHF(cell, kpt).rs_density_fit(auxbasis='weigend').run()
cls.cell = cell
cls.mf = mf
cls.nstates = 5
cls.nstates_test = 2
def tearDownClass(cls):
cls.cell.stdout.close()
del cls.cell, cls.mf
def kernel(self, TD, ref, **kwargs):
td = getattr(self.mf, TD)().set(nstates=self.nstates, **kwargs).run()
self.assertAlmostEqual(abs(((td.e[:self.nstates_test] * unitev) - ref)).max(), 0, 4)
def test_tda_singlet(self):
ref = [12., 13.]
self.kernel('TDA', ref)
def test_tda_triplet(self):
ref = [8., 9.]
self.kernel('TDA', ref, singlet=False)
def test_tdhf_singlet(self):
ref = [12., 13.]
self.kernel('TDHF', ref)
def test_tdhf_triplet(self):
ref = [3., 7.]
self.kernel('TDHF', ref, singlet=False) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--bert_config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained BERT model. \nThis specifies the model architecture.')
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train.')
parser.add_argument('--vocab_file', default=None, type=str, required=True, help='The vocabulary file that the BERT model was trained on.')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints will be written.')
parser.add_argument('--init_checkpoint', default=None, type=str, help='Initial checkpoint (usually from a pre-trained BERT model).')
parser.add_argument('--do_lower_case', default=False, action='store_true', help='Whether to lower case the input text. True for uncased models, False for cased models.')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_train', default=False, action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', default=False, action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--discr', default=False, action='store_true', help='Whether to do discriminative fine-tuning.')
parser.add_argument('--train_batch_size', default=32, type=int, help='Total batch size for training.')
parser.add_argument('--eval_batch_size', default=8, type=int, help='Total batch size for eval.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
parser.add_argument('--save_checkpoints_steps', default=1000, type=int, help='How often to save the model checkpoint.')
parser.add_argument('--no_cuda', default=False, action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--accumulate_gradients', type=int, default=1, help='Number of steps to accumulate gradient on (divide the batch_size and accumulate)')
parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumualte before performing a backward/update pass.')
parser.add_argument('--frozen_bert', default=False, action='store_true', help='frozen the gradient of bert encoder')
parser.add_argument('--layers', type=int, nargs='+', default=[(- 2)], help='choose the layers that used for downstream tasks, -2 means use pooled output, -1 means all layer,else means the detail layers. default is -2')
parser.add_argument('--num_datas', default=None, type=int, help='the number of data examples')
parser.add_argument('--num_test_datas', default=None, type=int, help='the number of data examples')
parser.add_argument('--pooling_type', default=None, type=str, choices=[None, 'mean', 'max'])
parser.add_argument('--trunc_medium', type=int, default=(- 2), help='choose the trunc ways, -2 means choose the first seq_len tokens, -1 means choose the last seq_len tokens, 0 means choose the first (seq_len // 2) and the last(seq_len // 2). other positive numbers k mean the first k tokens and the last (seq_len - k) tokens')
parser.add_argument('--layer_learning_rate', type=float, nargs='+', default=([2e-05] * 12), help='learning rate in each group')
parser.add_argument('--layer_learning_rate_decay', type=float, default=0.95)
args = parser.parse_args()
processors = {'ag': AGNewsProcessor, 'ag_sep': AGNewsProcessor_sep, 'ag_sep_aug': AGNewsProcessor_sep_aug, 'imdb': IMDBProcessor, 'imdb_t_m': IMDBProcessor_trunc_medium, 'imdb_sep': IMDBProcessor_sep, 'imdb_sep_aug': IMDBProcessor_sep_aug, 'trec': Trec_Processor}
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
n_gpu = torch.cuda.device_count()
else:
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info('device %s n_gpu %d distributed training %r', device, n_gpu, bool((args.local_rank != (- 1))))
if (args.accumulate_gradients < 1):
raise ValueError('Invalid accumulate_gradients parameter: {}, should be >= 1'.format(args.accumulate_gradients))
args.train_batch_size = int((args.train_batch_size / args.accumulate_gradients))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
if ((not args.do_train) and (not args.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
bert_config = BertConfig.from_json_file(args.bert_config_file)
if (args.max_seq_length > bert_config.max_position_embeddings):
raise ValueError('Cannot use sequence length {} because the BERT model was only trained up to sequence length {}'.format(args.max_seq_length, bert_config.max_position_embeddings))
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir)):
raise ValueError('Output directory ({}) already exists and is not empty.'.format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if (task_name not in processors):
raise ValueError(('Task not found: %s' % task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
train_examples = None
num_train_steps = None
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir, data_num=args.num_datas)
num_train_steps = int(((len(train_examples) / args.train_batch_size) * args.num_train_epochs))
model = BertForSequenceClassification(bert_config, len(label_list), args.layers, pooling=args.pooling_type)
if (args.init_checkpoint is not None):
model.bert.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'))
model.to(device)
if (args.local_rank != (- 1)):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)
elif (n_gpu > 1):
model = torch.nn.DataParallel(model)
no_decay = ['bias', 'gamma', 'beta']
if args.discr:
if (len(args.layer_learning_rate) > 1):
groups = [(f'layer.{i}.', args.layer_learning_rate[i]) for i in range(12)]
else:
lr = args.layer_learning_rate[0]
groups = [(f'layer.{i}.', (lr * pow(args.layer_learning_rate_decay, (11 - i)))) for i in range(12)]
group_all = [f'layer.{i}.' for i in range(12)]
no_decay_optimizer_parameters = []
decay_optimizer_parameters = []
for (g, l) in groups:
decay_optimizer_parameters.append({'params': [p for (n, p) in model.named_parameters() if ((not any(((nd in n) for nd in no_decay))) and any(((nd in n) for nd in [g])))], 'weight_decay_rate': 0.01, 'lr': l})
no_decay_optimizer_parameters.append({'params': [p for (n, p) in model.named_parameters() if (any(((nd in n) for nd in no_decay)) and any(((nd in n) for nd in [g])))], 'weight_decay_rate': 0.0, 'lr': l})
group_all_parameters = [{'params': [p for (n, p) in model.named_parameters() if ((not any(((nd in n) for nd in no_decay))) and (not any(((nd in n) for nd in group_all))))], 'weight_decay_rate': 0.01}, {'params': [p for (n, p) in model.named_parameters() if (any(((nd in n) for nd in no_decay)) and (not any(((nd in n) for nd in group_all))))], 'weight_decay_rate': 0.0}]
optimizer_parameters = ((no_decay_optimizer_parameters + decay_optimizer_parameters) + group_all_parameters)
else:
optimizer_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay_rate': 0.01}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay_rate': 0.0}]
if args.frozen_bert:
for p in model.bert.parameters():
p.requires_grad = False
optimizer = BERTAdam(optimizer_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_steps)
global_step = 0
eval_examples = processor.get_dev_examples(args.data_dir, data_num=args.num_test_datas)
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, trunc_medium=args.trunc_medium)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
eval_dataloader = DataLoader(eval_data, batch_size=args.eval_batch_size, shuffle=False)
if args.do_train:
train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer, trunc_medium=args.trunc_medium)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_examples))
logger.info(' Batch size = %d', args.train_batch_size)
logger.info(' Num steps = %d', num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if (args.local_rank == (- 1)):
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
epoch = 0
for _ in trange(int(args.num_train_epochs), desc='Epoch'):
epoch += 1
model.train()
tr_loss = 0
(nb_tr_examples, nb_tr_steps) = (0, 0)
for (step, batch) in enumerate(tqdm(train_dataloader, desc='Iteration')):
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, label_ids) = batch
(loss, _) = model(input_ids, segment_ids, input_mask, label_ids)
if (n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (((step + 1) % args.gradient_accumulation_steps) == 0):
optimizer.step()
model.zero_grad()
global_step += 1
model.eval()
(eval_loss, eval_accuracy) = (0, 0)
(nb_eval_steps, nb_eval_examples) = (0, 0)
with open(os.path.join(args.output_dir, (('results_ep' + str(epoch)) + '.txt')), 'w') as f:
for (input_ids, input_mask, segment_ids, label_ids) in tqdm(eval_dataloader, desc='Evaluation'):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
(tmp_eval_loss, logits) = model(input_ids, segment_ids, input_mask, label_ids)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
outputs = np.argmax(logits, axis=1)
for output in outputs:
f.write((str(output) + '\n'))
tmp_eval_accuracy = np.sum((outputs == label_ids))
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = (eval_loss / nb_eval_steps)
eval_accuracy = (eval_accuracy / nb_eval_examples)
result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'global_step': global_step, 'loss': (tr_loss / nb_tr_steps)}
output_eval_file = os.path.join(args.output_dir, (('eval_results_ep' + str(epoch)) + '.txt'))
print('output_eval_file=', output_eval_file)
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key])))) |
class Conv3x3(nn.Module):
def __init__(self, in_channels, out_channels, bn_norm, stride=1, groups=1):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=stride, padding=1, bias=False, groups=groups)
self.bn = get_norm(bn_norm, out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x |
class panZoomDisplay(QWidget):
updated = pyqtSignal()
def __init__(self):
super().__init__()
self.setMinimumSize(201, 151)
self.scale = (200 / picam2.camera_properties['ScalerCropMaximum'][2])
self.zoom_level_ = 1.0
self.max_zoom = 7.0
self.zoom_step = 0.1
def zoom_level(self):
return self.zoom_level_
_level.setter
def zoom_level(self, val):
if (val != self.zoom_level):
self.zoom_level_ = val
self.setZoom()
def setZoomLevel(self, val):
self.zoom_level = val
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
full_img = picam2.camera_properties['ScalerCropMaximum']
self.scale = (200 / full_img[2])
scaled_full_img = [int((i * self.scale)) for i in full_img]
origin = scaled_full_img[:2]
scaled_full_img[:2] = [0, 0]
painter.drawRect(*scaled_full_img)
scaled_scaler_crop = [int((i * self.scale)) for i in scaler_crop]
scaled_scaler_crop[0] -= origin[0]
scaled_scaler_crop[1] -= origin[1]
painter.drawRect(*scaled_scaler_crop)
painter.end()
self.updated.emit()
def draw_centered(self, pos):
global scaler_crop
center = [int((i / self.scale)) for i in pos]
full_img = picam2.camera_properties['ScalerCropMaximum']
w = scaler_crop[2]
h = scaler_crop[3]
x = ((center[0] - (w // 2)) + picam2.camera_properties['ScalerCropMaximum'][0])
y = ((center[1] - (h // 2)) + picam2.camera_properties['ScalerCropMaximum'][1])
new_scaler_crop = [x, y, w, h]
new_scaler_crop[1] = max(new_scaler_crop[1], full_img[1])
new_scaler_crop[1] = min(new_scaler_crop[1], ((full_img[1] + full_img[3]) - new_scaler_crop[3]))
new_scaler_crop[0] = max(new_scaler_crop[0], full_img[0])
new_scaler_crop[0] = min(new_scaler_crop[0], ((full_img[0] + full_img[2]) - new_scaler_crop[2]))
scaler_crop = tuple(new_scaler_crop)
picam2.controls.ScalerCrop = scaler_crop
self.update()
def mouseMoveEvent(self, event):
pos = event.pos()
pos = (pos.x(), pos.y())
self.draw_centered(pos)
def setZoom(self):
global scaler_crop
if (self.zoom_level < 1):
self.zoom_level = 1.0
if (self.zoom_level > self.max_zoom):
self.zoom_level = self.max_zoom
factor = (1.0 / self.zoom_level)
full_img = picam2.camera_properties['ScalerCropMaximum']
current_center = ((scaler_crop[0] + (scaler_crop[2] // 2)), (scaler_crop[1] + (scaler_crop[3] // 2)))
w = int((factor * full_img[2]))
h = int((factor * full_img[3]))
x = (current_center[0] - (w // 2))
y = (current_center[1] - (h // 2))
new_scaler_crop = [x, y, w, h]
new_scaler_crop[1] = max(new_scaler_crop[1], full_img[1])
new_scaler_crop[1] = min(new_scaler_crop[1], ((full_img[1] + full_img[3]) - new_scaler_crop[3]))
new_scaler_crop[0] = max(new_scaler_crop[0], full_img[0])
new_scaler_crop[0] = min(new_scaler_crop[0], ((full_img[0] + full_img[2]) - new_scaler_crop[2]))
scaler_crop = tuple(new_scaler_crop)
picam2.controls.ScalerCrop = scaler_crop
self.update()
def wheelEvent(self, event):
zoom_dir = np.sign(event.angleDelta().y())
self.zoom_level += (zoom_dir * self.zoom_step)
self.setZoom() |
def _segmentation_evaluation_old(args: SharedArgs, dataset: Dataset, label_map: Optional[LabelMap], results_dir: Path) -> Optional[SegmentationEvaluation]:
if (not label_map):
return None
if (not _segmentation_results_available(results_dir, dataset.video_data)):
return None
logging.info('Running old version (SoccerNet) of segmentation evaluation')
segmentations = [np.load(str(_segmentation_path((results_dir / video_datum.relative_path)))) for video_datum in dataset.video_data]
list_games = GamePathsReader.read_game_list_v2_camera_segmentation(Path(args.splits_dir), args.test_split)
segmentation_evaluation = create_segmentation_evaluation_old(segmentations, label_map, list_games, args.labels_dir, args.frame_rate)
logging.info(f'Segmentation mean IOU (old version): {segmentation_evaluation.mean_iou}')
return segmentation_evaluation |
def optimize(instance, max_time=10000, time_limit=100, threads=1):
model = cp_model.CpModel()
start_vars = dict()
end_vars = dict()
durations = dict()
interval_vars = dict()
for task in instance.tasks:
start_vars[task] = model.NewIntVar(0, max_time, ('start' + task.name))
end_vars[task] = model.NewIntVar(0, max_time, ('end' + task.name))
durations[task] = task.length
interval_vars[task] = model.NewIntervalVar(start_vars[task], durations[task], end_vars[task], ('interval' + task.name))
for task in instance.tasks:
if task.next_task:
model.Add((start_vars[task.next_task] >= end_vars[task]))
for machine in instance.machines:
for task in machine.tasks:
model.AddNoOverlap([interval_vars[task] for task in machine.tasks])
obj_var = model.NewIntVar(0, max_time, 'makespan')
model.AddMaxEquality(obj_var, [end_vars[task] for job in instance.jobs for task in job.tasks])
model.Minimize(obj_var)
cb = cp_model.ObjectiveSolutionPrinter()
solver = cp_model.CpSolver()
solver.parameters.max_time_in_seconds = time_limit
solver.parameters.num_search_workers = threads
status = solver.SolveWithSolutionCallback(model, cb)
solution = Solution(instance)
for job in instance.jobs:
for task in job.tasks:
print(task.name)
print(('Start: %f' % solver.Value(start_vars[task])))
print(('End: %f' % solver.Value(end_vars[task])))
solution.add(task, solver.Value(start_vars[task]), solver.Value(end_vars[task]))
return solution |
def parse_interval_string(interval, delimiter='-'):
numbers = '[0-9]'
age_types = '[smhdwy]'
agetypes_re = re.compile(age_types, re.IGNORECASE)
age_spec = ('(?:%s+%s?)+' % (numbers, age_types))
agespec_re = re.compile(age_spec, re.IGNORECASE)
period_re = re.compile(('^%s( *%s *%s?)?$' % (age_spec, delimiter, age_spec)), re.IGNORECASE)
if (period_re.match(interval) is None):
return (None, None)
period_list = agespec_re.findall(interval)
if (len(period_list) == 1):
period_list.append(period_list[(- 1)])
if ((len(agetypes_re.findall(period_list[0])) == 0) and (len(agetypes_re.findall(period_list[1])) == 0)):
period_list[0] = (period_list[0] + 's')
period_list[1] = (period_list[1] + 's')
elif (len(agetypes_re.findall(period_list[0])) == 0):
period_list[0] = (period_list[0] + agetypes_re.findall(period_list[1])[(- 1)])
elif (len(agetypes_re.findall(period_list[1])) == 0):
period_list[1] = (period_list[1] + agetypes_re.findall(period_list[0])[(- 1)])
if (len(agetypes_re.findall(period_list[0])) == 0):
age_types_found = agetypes_re.findall(period_list[1])
if (len(age_types) > 1):
return (None, None)
period_list[0] = (period_list[0] + age_types_found[0])
elif (len(agetypes_re.findall(period_list[1])) == 0):
age_types_found = agetypes_re.findall(period_list[0])
if (len(age_types) > 1):
return (None, None)
period_list[1] = (period_list[1] + age_types_found[0])
min_seconds = get_seconds_from_age(period_list[0])
max_seconds = get_seconds_from_age(period_list[1])
return (min_seconds, max_seconds) |
class TAG_List(TAG, list):
id = 9
def __init__(self, name: str, data: list) -> None:
TAG.__init__(self, name)
list.__init__(self, data)
def pack_data(self) -> bytes:
if (len(self) > 0):
return ((BufferUtil.pack('b', self[0].id) + BufferUtil.pack('i', len(self))) + b''.join([t.pack_data() for t in self]))
return (BufferUtil.pack('b', 0) + BufferUtil.pack('i', 0))
def unpack_data(buf) -> list:
tag = TYPES[buf.unpack('b')]
length = buf.unpack('i')
out = []
for _ in range(length):
out.append(tag(None, tag.unpack_data(buf)))
return out
def pretty(self, indent: int=0) -> str:
tab = ((' ' * 4) * indent)
nl = ',\n'
return f'''TAG_List("{self.name}"): [
{nl.join([t.pretty((indent + 1)) for t in self])}
{tab}]''' |
class KnownValues(unittest.TestCase):
def test_nohbrid_lda(self):
td = rks.CasidaTDDFT(mf_lda)
es = (td.kernel(nstates=5)[0] * 27.2114)
self.assertAlmostEqual(lib.fp(es), (- 41.), 5)
ref = [9., 9., 14., 30., 30.]
self.assertAlmostEqual(abs((es - ref)).max(), 0, 5)
def test_nohbrid_b88p86(self):
td = rks.CasidaTDDFT(mf_bp86)
es = (td.kernel(nstates=5)[0] * 27.2114)
self.assertAlmostEqual(lib.fp(es), (- 40.), 4)
(a, b) = td.get_ab()
ref = (diagonalize(a, b, nroots=5) * 27.2114)
self.assertAlmostEqual(abs((es - ref)).max(), 0, 7)
def test_tddft_lda(self):
td = rks.TDDFT(mf_lda)
es = (td.kernel(nstates=5)[0] * 27.2114)
self.assertAlmostEqual(lib.fp(es), (- 41.), 4)
def test_tddft_b88p86(self):
td = rks.TDDFT(mf_bp86)
es = (td.kernel(nstates=5)[0] * 27.2114)
self.assertAlmostEqual(lib.fp(es), (- 40.), 4)
def test_tddft_b3lyp(self):
td = rks.TDDFT(mf_b3lyp)
es = (td.kernel(nstates=5)[0] * 27.2114)
self.assertAlmostEqual(lib.fp(es), (- 41.), 4)
(a, b) = td.get_ab()
ref = (diagonalize(a, b, nroots=5) * 27.2114)
self.assertAlmostEqual(abs((es - ref)).max(), 0, 7)
def test_tda_b3lypg(self):
mf = dft.RKS(mol)
mf.xc = 'b3lypg'
mf.grids.prune = None
mf.scf()
td = rks.TDA(mf)
es = (td.kernel(nstates=5)[0] * 27.2114)
self.assertAlmostEqual(lib.fp(es), (- 41.), 4)
def test_tda_lda(self):
td = rks.TDA(mf_lda)
es = (td.kernel(nstates=5)[0] * 27.2114)
self.assertAlmostEqual(lib.fp(es), (- 41.), 4)
(has_xcfun, 'xcfun library not found.')
def test_tddft_b3lyp_xcfun(self):
with lib.temporary_env(dft.numint.NumInt, libxc=dft.xcfun):
td = rks.TDDFT(mf_b3lyp)
es = (td.kernel(nstates=5)[0] * 27.2114)
ref = [9., 9., 15., 30., 30.]
self.assertAlmostEqual(abs((es - ref)).max(), 0, 5)
(has_xcfun, 'xcfun library not found.')
def test_tda_b3lyp_xcfun(self):
with lib.temporary_env(dft.numint.NumInt, libxc=dft.xcfun):
td = rks.TDA(mf_b3lyp)
es = (td.kernel(nstates=5)[0] * 27.2114)
self.assertAlmostEqual(lib.fp(es), (- 41.), 5)
(has_xcfun, 'xcfun library not found.')
def test_tda_lda_xcfun(self):
mf = dft.RKS(mol)
mf.xc = 'lda,vwn'
mf.grids.prune = None
mf._numint.libxc = dft.xcfun
mf.scf()
td = rks.TDA(mf)
es = (td.kernel(nstates=5)[0] * 27.2114)
self.assertAlmostEqual(lib.fp(es), (- 41.), 5)
ref = [9., 9., 15.]
self.assertAlmostEqual(abs((es[:3] - ref)).max(), 0, 5)
def test_tda_b3lyp_triplet(self):
td = rks.TDA(mf_b3lyp)
td.singlet = False
es = (td.kernel(nstates=5)[0] * 27.2114)
self.assertAlmostEqual(lib.fp(es), (- 40.), 5)
td.analyze()
def test_tda_lda_triplet(self):
td = rks.TDA(mf_lda)
td.singlet = False
es = (td.kernel(nstates=5)[0] * 27.2114)
self.assertAlmostEqual(lib.fp(es), (- 39.), 5)
ref = [9.0139312, 9.0139312, 12.]
self.assertAlmostEqual(abs((es[:3] - ref)).max(), 0, 4)
def test_tddft_b88p86_triplet(self):
td = rks.TDDFT(mf_bp86)
td.singlet = False
es = (td.kernel(nstates=5)[0] * 27.2114)
ref = [9., 9., 12., 29., 29.]
self.assertAlmostEqual(abs((es - ref)).max(), 0, 4)
utd = mf_bp86.to_uks().TDDFT()
nocc = (mf_bp86.mol.nelectron // 2)
nvir = (mf_bp86.mo_energy.size - nocc)
nov = (nocc * nvir)
shape = (nov, nov)
(a, b) = utd.get_ab()
(a_aa, a_ab, a_bb) = a
(b_aa, b_ab, b_bb) = b
a = numpy.block([[a_aa.reshape(shape), a_ab.reshape(shape)], [a_ab.reshape(shape).T, a_bb.reshape(shape)]])
b = numpy.block([[b_aa.reshape(shape), b_ab.reshape(shape)], [b_ab.reshape(shape).T, b_bb.reshape(shape)]])
h = numpy.block([[a, b], [(- b.conj()), (- a.conj())]])
e = numpy.linalg.eig(numpy.asarray(h))[0]
ref = (numpy.sort(e[(e.real > 0)])[[0, 1, 4, 6, 7]] * 27.2114)
self.assertAlmostEqual(abs((es - ref)).max(), 0, 4)
def test_tda_rsh(self):
mol = gto.M(atom='H 0 0 0.6; H 0 0 0', basis='6-31g')
mf = dft.RKS(mol)
mf.xc = 'wb97'
e = mf.kernel()
self.assertAlmostEqual(e, (- 1.), 8)
e_td = mf.TDA().set(nstates=5).kernel()[0]
ref = [16., 27., 49.4665691]
self.assertAlmostEqual(abs(((e_td * nist.HARTREE2EV) - ref)).max(), 0, 4)
def test_tda_m06l_singlet(self):
td = mf_m06l.TDA()
es = (td.kernel(nstates=5)[0] * 27.2114)
self.assertAlmostEqual(lib.fp(es), (- 42.), 4)
ref = [10., 10., 16.]
self.assertAlmostEqual(abs((es[:3] - ref)).max(), 0, 4)
def test_ab_hf(self):
(a, b) = rhf.get_ab(mf)
fock = (mf.get_hcore() + mf.get_veff())
ftda = rhf.gen_tda_operation(mf, fock, singlet=True)[0]
ftdhf = rhf.gen_tdhf_operation(mf, singlet=True)[0]
nocc = numpy.count_nonzero((mf.mo_occ == 2))
nvir = numpy.count_nonzero((mf.mo_occ == 0))
numpy.random.seed(2)
(x, y) = xy = numpy.random.random((2, nocc, nvir))
ax = numpy.einsum('iajb,jb->ia', a, x)
self.assertAlmostEqual(abs((ax - ftda([x]).reshape(nocc, nvir))).max(), 0, 5)
ab1 = (ax + numpy.einsum('iajb,jb->ia', b, y))
ab2 = (- numpy.einsum('iajb,jb->ia', b, x))
ab2 -= numpy.einsum('iajb,jb->ia', a, y)
abxy_ref = ftdhf([xy]).reshape(2, nocc, nvir)
self.assertAlmostEqual(abs((ab1 - abxy_ref[0])).max(), 0, 9)
self.assertAlmostEqual(abs((ab2 - abxy_ref[1])).max(), 0, 9)
def test_ab_lda(self):
mf = mf_lda
(a, b) = rhf.get_ab(mf)
ftda = rhf.gen_tda_operation(mf, singlet=True)[0]
ftdhf = rhf.gen_tdhf_operation(mf, singlet=True)[0]
nocc = numpy.count_nonzero((mf.mo_occ == 2))
nvir = numpy.count_nonzero((mf.mo_occ == 0))
numpy.random.seed(2)
(x, y) = xy = numpy.random.random((2, nocc, nvir))
ax = numpy.einsum('iajb,jb->ia', a, x)
self.assertAlmostEqual(abs((ax - ftda([x]).reshape(nocc, nvir))).max(), 0, 9)
ab1 = (ax + numpy.einsum('iajb,jb->ia', b, y))
ab2 = (- numpy.einsum('iajb,jb->ia', b, x))
ab2 -= numpy.einsum('iajb,jb->ia', a, y)
abxy_ref = ftdhf([xy]).reshape(2, nocc, nvir)
self.assertAlmostEqual(abs((ab1 - abxy_ref[0])).max(), 0, 9)
self.assertAlmostEqual(abs((ab2 - abxy_ref[1])).max(), 0, 9)
def test_ab_b3lyp(self):
mf = mf_b3lyp
(a, b) = rks.TDDFT(mf).get_ab()
ftda = rhf.gen_tda_operation(mf, singlet=None)[0]
ftdhf = rhf.gen_tdhf_operation(mf, singlet=True)[0]
nocc = numpy.count_nonzero((mf.mo_occ == 2))
nvir = numpy.count_nonzero((mf.mo_occ == 0))
numpy.random.seed(2)
(x, y) = xy = numpy.random.random((2, nocc, nvir))
ax = numpy.einsum('iajb,jb->ia', a, x)
self.assertAlmostEqual(abs((ax - ftda([x]).reshape(nocc, nvir))).max(), 0, 9)
ab1 = (ax + numpy.einsum('iajb,jb->ia', b, y))
ab2 = (- numpy.einsum('iajb,jb->ia', b, x))
ab2 -= numpy.einsum('iajb,jb->ia', a, y)
abxy_ref = ftdhf([xy]).reshape(2, nocc, nvir)
self.assertAlmostEqual(abs((ab1 - abxy_ref[0])).max(), 0, 9)
self.assertAlmostEqual(abs((ab2 - abxy_ref[1])).max(), 0, 9)
def test_ab_mgga(self):
mf = mf_m06l
(a, b) = rks.TDDFT(mf).get_ab()
ftda = rhf.gen_tda_operation(mf, singlet=None)[0]
ftdhf = rhf.gen_tdhf_operation(mf, singlet=True)[0]
nocc = numpy.count_nonzero((mf.mo_occ == 2))
nvir = numpy.count_nonzero((mf.mo_occ == 0))
numpy.random.seed(2)
(x, y) = xy = numpy.random.random((2, nocc, nvir))
ax = numpy.einsum('iajb,jb->ia', a, x)
self.assertAlmostEqual(abs((ax - ftda([x]).reshape(nocc, nvir))).max(), 0, 9)
ab1 = (ax + numpy.einsum('iajb,jb->ia', b, y))
ab2 = (- numpy.einsum('iajb,jb->ia', b, x))
ab2 -= numpy.einsum('iajb,jb->ia', a, y)
abxy_ref = ftdhf([xy]).reshape(2, nocc, nvir)
self.assertAlmostEqual(abs((ab1 - abxy_ref[0])).max(), 0, 9)
self.assertAlmostEqual(abs((ab2 - abxy_ref[1])).max(), 0, 9)
def test_nto(self):
mf = scf.RHF(mol).run()
td = rks.TDA(mf).run(nstates=5)
(w, nto) = td.get_nto(state=3)
self.assertAlmostEqual(w[0], 0., 7)
self.assertAlmostEqual(lib.fp(w), 0., 7)
(w, nto) = td.get_nto(state=0)
self.assertAlmostEqual(w[0], 0., 7)
self.assertAlmostEqual(lib.fp(w), 0., 7)
pmol = mol.copy(deep=False)
pmol.symmetry = True
pmol.build(0, 0)
mf = scf.RHF(pmol).run()
td = rks.TDA(mf).run(nstates=3)
(w, nto) = td.get_nto(state=(- 1))
self.assertAlmostEqual(w[0], 0., 7)
self.assertAlmostEqual(lib.fp(w), 0., 7)
def test_analyze(self):
f = td_hf.oscillator_strength(gauge='length')
self.assertAlmostEqual(lib.fp(f), (- 0.), 5)
f = td_hf.oscillator_strength(gauge='velocity', order=2)
self.assertAlmostEqual(lib.fp(f), (- 0.), 5)
note_args = []
def temp_logger_note(rec, msg, *args):
note_args.append(args)
with lib.temporary_env(lib.logger.Logger, note=temp_logger_note):
td_hf.analyze()
ref = [(), (1, 11., 104., 0.), (2, 11., 104., 0.), (3, 16., 74., 0.)]
self.assertAlmostEqual(abs((numpy.hstack(ref) - numpy.hstack(note_args))).max(), 0, 4)
self.assertEqual(td_hf.nroots, td_hf.nstates)
self.assertAlmostEqual(lib.fp((td_hf.e_tot - mf.e_tot)), 0., 5)
def test_init(self):
hf = scf.RHF(mol)
ks = scf.RKS(mol)
kshf = scf.RKS(mol).set(xc='HF')
self.assertTrue(isinstance(tdscf.TDA(hf), tdscf.rhf.TDA))
self.assertTrue(isinstance(tdscf.TDA(ks), tdscf.rks.TDA))
self.assertTrue(isinstance(tdscf.TDA(kshf), tdscf.rks.TDA))
self.assertTrue(isinstance(tdscf.RPA(hf), tdscf.rhf.TDHF))
self.assertTrue(isinstance(tdscf.RPA(ks), tdscf.rks.TDDFTNoHybrid))
self.assertTrue(isinstance(tdscf.RPA(kshf), tdscf.rks.TDDFT))
self.assertTrue(isinstance(tdscf.TDDFT(hf), tdscf.rhf.TDHF))
self.assertTrue(isinstance(tdscf.TDDFT(ks), tdscf.rks.TDDFTNoHybrid))
self.assertTrue(isinstance(tdscf.TDDFT(kshf), tdscf.rks.TDDFT))
self.assertRaises(RuntimeError, tdscf.rks.dRPA, hf)
self.assertTrue(isinstance(tdscf.dRPA(kshf), tdscf.rks.dRPA))
self.assertTrue(isinstance(tdscf.dRPA(ks), tdscf.rks.dRPA))
self.assertRaises(RuntimeError, tdscf.rks.dTDA, hf)
self.assertTrue(isinstance(tdscf.dTDA(kshf), tdscf.rks.dTDA))
self.assertTrue(isinstance(tdscf.dTDA(ks), tdscf.rks.dTDA))
kshf.xc = ''
self.assertTrue(isinstance(tdscf.dTDA(kshf), tdscf.rks.dTDA))
self.assertTrue(isinstance(tdscf.dRPA(kshf), tdscf.rks.dRPA))
def test_tda_with_wfnsym(self):
pmol = mol.copy()
pmol.symmetry = True
pmol.build(0, 0)
mf = dft.RKS(pmol).run()
td = rks.TDA(mf)
td.wfnsym = 'A2'
es = td.kernel(nstates=3)[0]
self.assertTrue((len(es) == 2))
self.assertAlmostEqual(lib.fp(es), 2., 5)
note_args = []
def temp_logger_note(rec, msg, *args):
note_args.append(args)
with lib.temporary_env(lib.logger.Logger, note=temp_logger_note):
td.analyze()
ref = [(), (1, 'A2', 38., 32., 0.0), (2, 'A2', 38., 31., 0.0)]
self.assertEqual(note_args[1][1], 'A2')
self.assertEqual(note_args[2][1], 'A2')
self.assertAlmostEqual(abs((numpy.append(ref[1][2:], ref[2][2:]) - numpy.append(note_args[1][2:], note_args[2][2:]))).max(), 0, 4)
def test_tdhf_with_wfnsym(self):
pmol = mol.copy()
pmol.symmetry = True
pmol.build()
mf = scf.RHF(pmol).run()
td = rhf.TDHF(mf)
td.wfnsym = 'A2'
td.nroots = 3
es = td.kernel()[0]
self.assertAlmostEqual(lib.fp(es), 2., 5)
td.analyze()
def test_tddft_with_wfnsym(self):
pmol = mol.copy()
pmol.symmetry = True
pmol.build()
mf = dft.RKS(pmol).run()
td = rks.CasidaTDDFT(mf)
td.wfnsym = 'A2'
td.nroots = 3
es = td.kernel()[0]
self.assertTrue((len(es) == 2))
self.assertAlmostEqual(lib.fp(es), 2., 5)
td.analyze()
def test_scanner(self):
td_scan = td_hf.as_scanner().as_scanner()
td_scan.nroots = 3
td_scan(mol)
self.assertAlmostEqual(lib.fp(td_scan.e), 0., 5)
def test_transition_multipoles(self):
self.assertAlmostEqual(abs(lib.fp(td_hf.transition_dipole()[2])), 0., 5)
self.assertAlmostEqual(abs(lib.fp(td_hf.transition_quadrupole()[2])), 0., 5)
self.assertAlmostEqual(abs(lib.fp(td_hf.transition_octupole()[2])), 2., 5)
self.assertAlmostEqual(abs(lib.fp(td_hf.transition_velocity_dipole()[2])), 0., 5)
self.assertAlmostEqual(abs(lib.fp(td_hf.transition_magnetic_dipole()[2])), 0, 5)
self.assertAlmostEqual(abs(lib.fp(td_hf.transition_magnetic_quadrupole()[2])), 0., 5)
def test_dRPA(self):
td = rks.dRPA(mf_lda)
td._scf.xc = ''
es = td.kernel(nstates=5)[0]
self.assertAlmostEqual(lib.fp(es[:3]), 0., 5)
ref = [10., 10., 15., 30., 30.]
self.assertAlmostEqual(abs(((es * 27.2114) - ref)).max(), 0, 5)
def test_dTDA(self):
td = rks.dTDA(mf_lda)
td._scf.xc = ''
es = td.kernel(nstates=3)[0]
self.assertAlmostEqual(lib.fp(es), 0., 5)
td = rks.dTDA(mf_lda)
es = td.kernel(nstates=5)[0]
self.assertAlmostEqual(lib.fp(es[:3]), 0., 5)
ref = [10., 10., 16., 30.7120363, 30.7120363]
self.assertAlmostEqual(abs(((es * 27.2114) - ref)).max(), 0, 5)
def test_reset(self):
mol1 = gto.M(atom='C')
td = scf.RHF(mol).newton().TDHF()
td.reset(mol1)
self.assertTrue((td.mol is mol1))
self.assertTrue((td._scf.mol is mol1))
self.assertTrue((td._scf._scf.mol is mol1))
(has_xcfun, 'xcfun library not found.')
def test_custom_rsh(self):
mol = gto.M(atom='H 0 0 0.6; H 0 0 0', basis='6-31g')
mf = dft.RKS(mol)
mf._numint.libxc = dft.xcfun
mf.xc = 'camb3lyp'
mf.omega = 0.2
e = mf.kernel()
self.assertAlmostEqual(e, (- 1.), 8)
e_td = mf.TDDFT().kernel()[0]
ref = [16., 28., 49.]
self.assertAlmostEqual(abs(((e_td * nist.HARTREE2EV) - ref)).max(), 0, 4)
def test_symmetry_init_guess(self):
mol = gto.M(atom='N 0 0 0; N 0 0 1.2', basis='631g', output='/dev/null', symmetry=True)
td = mol.RHF.run().TDA().run(nstates=1)
self.assertAlmostEqual(td.e[0], 0., 7) |
.parametrize('history_num_frames', [1, 2, 3, 4])
.parametrize('dataset_cls', [EgoDataset, AgentDataset])
def test_non_zero_history(history_num_frames: int, dataset_cls: Callable, zarr_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict) -> None:
cfg['model_params']['history_num_frames'] = history_num_frames
rast_params = cfg['raster_params']
rast_params['map_type'] = 'box_debug'
rasterizer = build_rasterizer(cfg, dmg)
dataset = dataset_cls(cfg, zarr_dataset, rasterizer, None)
indexes = [0, 1, 10, (- 1)]
for idx in indexes:
data = dataset[idx]
assert (data['image'].shape == ((2 * (history_num_frames + 1)), *rast_params['raster_size'])) |
class ConvLayer(nn.Module):
def __init__(self, c_in):
super(ConvLayer, self).__init__()
self.downConv = nn.Conv1d(in_channels=c_in, out_channels=c_in, kernel_size=3, padding=2, padding_mode='circular')
self.norm = nn.BatchNorm1d(c_in)
self.activation = nn.ELU()
self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.downConv(x.permute(0, 2, 1))
x = self.norm(x)
x = self.activation(x)
x = self.maxPool(x)
x = x.transpose(1, 2)
return x |
class DataTrainingArguments():
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_seq_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total input sequence length after tokenization. If passed, sequences longer than this will be truncated, sequences shorter will be padded.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to the maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
def __post_init__(self):
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.' |
def test_new_style():
assert (get_attrs_shape(NewStyle) == Shape(input=InputShape(constructor=NewStyle, kwargs=None, fields=(InputField(type=int, id='a', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY), InputField(type=str, id='_b', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY), InputField(type=int, id='d', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY), InputField(type=int, id='e', default=DefaultValue(1), is_required=False, metadata=MappingProxyType({}), original=ANY), InputField(type=int, id='f', default=DefaultValue(2), is_required=False, metadata=MappingProxyType({}), original=ANY), InputField(type=list, id='g', default=DefaultFactory(list), is_required=False, metadata=MappingProxyType({}), original=ANY), InputField(type=str, id='h', default=DefaultValue(''), is_required=False, metadata=MappingProxyType({'meta': 'data'}), original=ANY), InputField(type=int, id='i', default=DefaultValue(3), is_required=False, metadata=MappingProxyType({}), original=ANY), InputField(type=int, id='j', default=DefaultFactoryWithSelf(int_factory_with_self), is_required=False, metadata=MappingProxyType({}), original=ANY)), params=(Param(field_id='a', name='a', kind=ParamKind.POS_OR_KW), Param(field_id='_b', name='b', kind=ParamKind.POS_OR_KW), Param(field_id='e', name='e', kind=ParamKind.POS_OR_KW), Param(field_id='f', name='f', kind=ParamKind.POS_OR_KW), Param(field_id='g', name='g', kind=ParamKind.POS_OR_KW), Param(field_id='h', name='h', kind=ParamKind.POS_OR_KW), Param(field_id='i', name='i', kind=ParamKind.POS_OR_KW), Param(field_id='j', name='j', kind=ParamKind.POS_OR_KW), Param(field_id='d', name='d', kind=ParamKind.KW_ONLY)), overriden_types=frozenset({'a', '_b', 'd', 'e', 'f', 'g', 'h', 'i', 'j'})), output=OutputShape(fields=(OutputField(type=int, id='a', default=NoDefault(), accessor=create_attr_accessor('a', is_required=True), metadata=MappingProxyType({}), original=ANY), OutputField(type=str, id='_b', default=NoDefault(), accessor=create_attr_accessor('_b', is_required=True), metadata=MappingProxyType({}), original=ANY), OutputField(type=str, id='c', default=NoDefault(), accessor=create_attr_accessor('c', is_required=True), metadata=MappingProxyType({}), original=ANY), OutputField(type=int, id='d', default=NoDefault(), accessor=create_attr_accessor('d', is_required=True), metadata=MappingProxyType({}), original=ANY), OutputField(type=int, id='e', default=DefaultValue(1), accessor=create_attr_accessor('e', is_required=True), metadata=MappingProxyType({}), original=ANY), OutputField(type=int, id='f', default=DefaultValue(2), accessor=create_attr_accessor('f', is_required=True), metadata=MappingProxyType({}), original=ANY), OutputField(type=list, id='g', default=DefaultFactory(list), accessor=create_attr_accessor('g', is_required=True), metadata=MappingProxyType({}), original=ANY), OutputField(type=str, id='h', default=DefaultValue(''), accessor=create_attr_accessor('h', is_required=True), metadata=MappingProxyType({'meta': 'data'}), original=ANY), OutputField(type=int, id='i', default=DefaultValue(3), accessor=create_attr_accessor('i', is_required=True), metadata=MappingProxyType({}), original=ANY), OutputField(type=int, id='j', default=DefaultFactoryWithSelf(int_factory_with_self), accessor=create_attr_accessor('j', is_required=True), metadata=MappingProxyType({}), original=ANY)), overriden_types=frozenset({'a', '_b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'})))) |
class _PrefetchData():
def __init__(self, source_datapipe, buffer_size: int):
self.run_prefetcher: bool = True
self.prefetch_buffer: Deque = deque()
self.buffer_size: int = buffer_size
self.source_datapipe = source_datapipe
self.stop_iteration: bool = False
self.paused: bool = False |
def test_generate_range_error():
err_str = generate_range_error(1, constants.INFINITY)
assert (err_str == 'expected at least 1 argument')
err_str = generate_range_error(2, constants.INFINITY)
assert (err_str == 'expected at least 2 arguments')
err_str = generate_range_error(1, 1)
assert (err_str == 'expected 1 argument')
err_str = generate_range_error(2, 2)
assert (err_str == 'expected 2 arguments')
err_str = generate_range_error(0, 1)
assert (err_str == 'expected 0 to 1 argument')
err_str = generate_range_error(0, 2)
assert (err_str == 'expected 0 to 2 arguments') |
class TestTransformerPhaser(unittest.TestCase):
def test_default(self):
tfm = new_transformer()
tfm.phaser()
actual_args = tfm.effects
expected_args = ['phaser', '0.800000', '0.740000', '3.000000', '0.400000', '0.500000', '-s']
self.assertEqual(expected_args, actual_args)
actual_log = tfm.effects_log
expected_log = ['phaser']
self.assertEqual(expected_log, actual_log)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm)
def test_gain_in_valid(self):
tfm = new_transformer()
tfm.phaser(gain_in=0.5)
actual_args = tfm.effects
expected_args = ['phaser', '0.500000', '0.740000', '3.000000', '0.400000', '0.500000', '-s']
self.assertEqual(expected_args, actual_args)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm)
def test_gain_in_invalid(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.phaser(gain_in=0)
def test_gain_out_valid(self):
tfm = new_transformer()
tfm.phaser(gain_out=1.0)
actual_args = tfm.effects
expected_args = ['phaser', '0.800000', '1.000000', '3.000000', '0.400000', '0.500000', '-s']
self.assertEqual(expected_args, actual_args)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm)
def test_gain_out_invalid(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.phaser(gain_out=1.1)
def test_delay_valid(self):
tfm = new_transformer()
tfm.phaser(delay=5)
actual_args = tfm.effects
expected_args = ['phaser', '0.800000', '0.740000', '5.000000', '0.400000', '0.500000', '-s']
self.assertEqual(expected_args, actual_args)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm)
def test_delay_invalid(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.phaser(delay=None)
def test_decay_valid(self):
tfm = new_transformer()
tfm.phaser(decay=0.1)
actual_args = tfm.effects
expected_args = ['phaser', '0.800000', '0.740000', '3.000000', '0.100000', '0.500000', '-s']
self.assertEqual(expected_args, actual_args)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm)
def test_decay_invalid(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.phaser(decay=0.0)
def test_speed_valid(self):
tfm = new_transformer()
tfm.phaser(speed=2)
actual_args = tfm.effects
expected_args = ['phaser', '0.800000', '0.740000', '3.000000', '0.400000', '2.000000', '-s']
self.assertEqual(expected_args, actual_args)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm)
def test_speed_invalid(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.phaser(speed=(- 1))
def test_modulation_shape_valid(self):
tfm = new_transformer()
tfm.phaser(modulation_shape='triangular')
actual_args = tfm.effects
expected_args = ['phaser', '0.800000', '0.740000', '3.000000', '0.400000', '0.500000', '-t']
self.assertEqual(expected_args, actual_args)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm)
def test_modulation_shape_invalid(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.phaser(modulation_shape='square') |
.parametrize('map_variables', [True, False])
.parametrize('endpoint,function,params,json_response', [('forecast/radiation_and_weather', pvlib.iotools.get_solcast_forecast, dict(api_key='1234', latitude=(- 33.856784), longitude=51.215297, hours='5', period='PT1H', output_parameters='dni'), {'forecast': [{'dni': 0, 'period_end': '2023-12-13T01:00:00.0000000Z', 'period': 'PT1H'}, {'dni': 1, 'period_end': '2023-12-13T02:00:00.0000000Z', 'period': 'PT1H'}, {'dni': 2, 'period_end': '2023-12-13T03:00:00.0000000Z', 'period': 'PT1H'}, {'dni': 3, 'period_end': '2023-12-13T04:00:00.0000000Z', 'period': 'PT1H'}, {'dni': 4, 'period_end': '2023-12-13T05:00:00.0000000Z', 'period': 'PT1H'}, {'dni': 5, 'period_end': '2023-12-13T06:00:00.0000000Z', 'period': 'PT1H'}]})])
def test_get_solcast_forecast(requests_mock, endpoint, function, params, json_response, map_variables):
mock_url = f"
requests_mock.get(mock_url, json=json_response)
if map_variables:
pd.testing.assert_frame_equal(function(**params, map_variables=map_variables)[0], pvlib.iotools.solcast._solcast2pvlib(pd.DataFrame.from_dict(json_response[list(json_response.keys())[0]])))
else:
pd.testing.assert_frame_equal(function(**params, map_variables=map_variables)[0], pd.DataFrame.from_dict(json_response[list(json_response.keys())[0]])) |
class Synapse_AMOS(Dataset):
def __init__(self, split='train', repeat=None, transform=None, unlabeled=False, is_val=False, task='synapse', num_cls=1):
self.ids_list = read_list(split, task=task)
self.repeat = repeat
self.task = task
if (self.repeat is None):
self.repeat = len(self.ids_list)
print('total {} datas'.format(self.repeat))
self.transform = transform
self.unlabeled = unlabeled
self.num_cls = num_cls
self._weight = None
self.is_val = is_val
if self.is_val:
self.data_list = {}
for data_id in tqdm(self.ids_list):
(image, label) = read_data(data_id, task=task)
self.data_list[data_id] = (image, label)
def __len__(self):
return self.repeat
def _get_data(self, data_id):
if self.is_val:
(image, label) = self.data_list[data_id]
else:
(image, label) = read_data(data_id, task=self.task)
return (data_id, image, label)
def __getitem__(self, index):
index = (index % len(self.ids_list))
data_id = self.ids_list[index]
(_, image, label) = self._get_data(data_id)
if self.unlabeled:
label[:] = 0
image = image.clip(min=(- 75), max=275)
image = ((image - image.min()) / (image.max() - image.min()))
sample = {'image': image, 'label': label}
if self.transform:
sample = self.transform(sample)
return sample |
def _create_completion(model: str, messages: list, stream: bool, temperature: float=0.7, **kwargs):
headers = {'Content-Type': 'application/json', 'Accept': '*/*', 'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6,zh-TW;q=0.5,zh;q=0.4'}
data = {'messages': messages, 'model': model}
response = requests.post(' json=data, stream=True)
print(response)
for token in response.iter_content(chunk_size=None):
(yield token.decode('utf-8')) |
(scope='module')
def chat_permissions():
return ChatPermissions(can_send_messages=True, can_send_polls=True, can_send_other_messages=True, can_add_web_page_previews=True, can_change_info=True, can_invite_users=True, can_pin_messages=True, can_manage_topics=True, can_send_audios=True, can_send_documents=True, can_send_photos=True, can_send_videos=True, can_send_video_notes=True, can_send_voice_notes=True) |
class CarveFileSystem(MountpointFileSystemMixin, LoopbackFileSystemMixin, FileSystem):
type = 'carve'
def __init__(self, volume, freespace=True):
super().__init__(volume)
self.freespace = freespace
(dependencies.photorec)
def mount(self):
self._make_mountpoint(suffix='carve')
if (not self.volume.slot):
loopback = self.volume.loopback
if (loopback is None):
self._find_loopback()
loopback = self.loopback
try:
_util.check_call_(['photorec', '/d', (self.mountpoint + os.sep), '/cmd', loopback, (('freespace,' if self.freespace else '') + 'search')])
except Exception as e:
logger.exception('Failed carving the volume.')
self._clear_mountpoint()
raise SubsystemError(e)
else:
try:
_util.check_call_(['photorec', '/d', (self.mountpoint + os.sep), '/cmd', self.volume.get_raw_path(), ((str(self.volume.slot) + (',freespace' if self.freespace else '')) + ',search')])
except Exception as e:
logger.exception('Failed carving the volume.')
self._clear_mountpoint()
raise SubsystemError(e)
def unmount(self, allow_lazy=False):
if (self.mountpoint is not None):
logger.debug('Clearing out {}'.format(self.mountpoint))
shutil.rmtree(self.mountpoint)
self.mountpoint = None
super().unmount(allow_lazy) |
def learn_density(threshold, use_threshold, distribution, train_set_x, train_set_y, test_set_x, test_set_y):
set_data_type(distribution)
if (distribution == Distribution.RANDOM):
parameter = ParameterPool.RANDOM.value
elif (distribution == Distribution.LOGNORMAL):
parameter = ParameterPool.LOGNORMAL.value
elif (distribution == Distribution.EXPONENTIAL):
parameter = ParameterPool.EXPONENTIAL.value
elif (distribution == Distribution.NORMAL):
parameter = ParameterPool.NORMAL.value
else:
return
stage_set = parameter.stage_set
stage_set[1] = int((STORE_NUMBER / 10000))
core_set = parameter.core_set
train_step_set = parameter.train_step_set
batch_size_set = parameter.batch_size_set
learning_rate_set = parameter.learning_rate_set
keep_ratio_set = parameter.keep_ratio_set
print('start Learned NN')
print('Start Train')
start_time = time.time()
trained_index = hybrid_training(threshold, use_threshold, stage_set, core_set, train_step_set, batch_size_set, learning_rate_set, keep_ratio_set, train_set_x, train_set_y, test_set_x, test_set_y)
end_time = time.time()
learn_time = (end_time - start_time)
print(('Build Learned NN time %f ' % learn_time))
print('end Learned NN')
return trained_index |
class ConvBnReLU3D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1):
super(ConvBnReLU3D, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, bias=False)
self.bn = nn.BatchNorm3d(out_channels)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
return self.activation(self.bn(self.conv(x))) |
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul((q / self.temperature), k.transpose(2, 3))
if (mask is not None):
attn = attn.masked_fill((~ mask), (- .0))
attn = self.dropout(F.softmax(attn, dim=(- 1)))
output = torch.matmul(attn, v)
return (output, attn) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.