code stringlengths 281 23.7M |
|---|
def _parse_datetimes(request: WSGIRequest) -> Tuple[(datetime, datetime)]:
if (request.method == 'GET'):
params = request.GET
else:
params = request.POST
startdate = params.get('startdate')
starttime = params.get('starttime')
enddate = params.get('enddate')
endtime = params.get('endtime')
if ((not startdate) or (not starttime) or (not enddate) or (not endtime)):
raise ValueError('All fields are required')
start = dateparse.parse_datetime(((startdate + 'T') + starttime))
end = dateparse.parse_datetime(((enddate + 'T') + endtime))
if ((start is None) or (end is None)):
raise ValueError('invalid start-/endtime given')
if (start >= end):
raise ValueError('start has to be before end')
start = timezone.make_aware(start)
end = timezone.make_aware(end)
return (start, end) |
def test_guess_cmake_lexer_from_header():
headers = ['CMAKE_MINIMUM_REQUIRED(VERSION 2.6 FATAL_ERROR)', 'cmake_minimum_required(version 3.13) # CMake version check', ' CMAKE_MINIMUM_REQUIRED\t( VERSION 2.6 FATAL_ERROR ) ']
for header in headers:
code = '\n'.join([header, 'project(example)', 'set(CMAKE_CXX_STANDARD 14)', 'set(SOURCE_FILES main.cpp)', 'add_executable(example ${SOURCE_FILES})'])
lexer = guess_lexer(code)
assert (lexer.__class__.__name__ == 'CMakeLexer'), ('header must be detected as CMake: %r' % header) |
def test_oauth_token_auth():
gl = Gitlab(' oauth_token='oauth_token', api_version='4')
p = PreparedRequest()
p.prepare(url=gl.url, auth=gl._auth)
assert (gl.private_token is None)
assert (gl.oauth_token == 'oauth_token')
assert (gl.job_token is None)
assert isinstance(gl._auth, OAuthTokenAuth)
assert (gl._auth.token == 'oauth_token')
assert (p.headers['Authorization'] == 'Bearer oauth_token')
assert ('PRIVATE-TOKEN' not in p.headers)
assert ('JOB-TOKEN' not in p.headers) |
('pypyr.retries.random.uniform', side_effect=[11, 12, 13])
('time.sleep')
def test_retry_all_substitutions_backoff_jitter_list(mock_sleep, mock_random):
rd = RetryDecorator({'max': '{k3[1][k031]}', 'sleep': '{k2}', 'backoff': '{k6}', 'jrc': '{k4}', 'sleepMax': '{k5}'})
context = Context({'k1': False, 'k2': [0.3, 0.2, 0.1], 'k3': [0, {'k031': 4, 'k032': False}], 'k4': 2, 'k5': 0.25, 'k6': 'jitter', 'step_count': 0})
def mock_step(context):
context['step_count'] += 1
if (context['step_count'] != 4):
raise ValueError()
rd.retry_loop(context, mock_step)
assert (context['retryCounter'] == 4)
assert (rd.retry_counter == 4)
assert (context['step_count'] == 4)
assert (mock_sleep.mock_calls == [call(11), call(12), call(13)])
assert (mock_random.mock_calls == [call(0.5, 0.25), call(0.4, 0.2), call(0.2, 0.1)]) |
def get_acis_prism(latitude, longitude, start, end, map_variables=True, url=' **kwargs):
elems = [{'name': 'pcpn', 'interval': 'dly', 'units': 'mm'}, {'name': 'maxt', 'interval': 'dly', 'units': 'degreeC'}, {'name': 'mint', 'interval': 'dly', 'units': 'degreeC'}, {'name': 'avgt', 'interval': 'dly', 'units': 'degreeC'}, {'name': 'cdd', 'interval': 'dly', 'units': 'degreeC'}, {'name': 'hdd', 'interval': 'dly', 'units': 'degreeC'}, {'name': 'gdd', 'interval': 'dly', 'units': 'degreeC'}]
params = {'loc': f'{longitude},{latitude}', 'grid': '21', 'elems': elems, 'meta': ['ll', 'elev']}
(df, meta) = _get_acis(start, end, params, map_variables, url, **kwargs)
df = df.replace((- 999), np.nan)
return (df, meta) |
class NSDR(Unfolding_Loss):
def __init__(self, window_length, hop_length, **kwargs):
super().__init__(window_length, hop_length)
def criterion(self, target_signal_hat, target_signal):
s_target = ((((target_signal_hat * target_signal).sum((- 1), keepdims=True) + 1e-08) / ((target_signal ** 2).sum(axis=(- 1), keepdims=True) + 1e-08)) * target_signal)
distortion = (target_signal_hat - s_target)
loss = ((- ((s_target ** 2).sum((- 1)) + 1e-08)) / ((distortion ** 2).sum((- 1)) + 1e-08))
return loss.mean() |
class CollaborationArguments(AveragerArguments, CollaborativeOptimizerArguments, BaseTrainingArguments):
statistics_expiration: float = field(default=600, metadata={'help': 'Statistics will be removed if not updated in this many seconds'})
endpoint: Optional[str] = field(default=None, metadata={'help': "This node's IP for inbound connections, used when running from behind a proxy"}) |
def test_cache_reportheader_external_abspath(pytester: Pytester, tmp_path_factory: TempPathFactory) -> None:
external_cache = tmp_path_factory.mktemp('test_cache_reportheader_external_abspath_abs')
pytester.makepyfile('def test_hello(): pass')
pytester.makeini('\n [pytest]\n cache_dir = {abscache}\n '.format(abscache=external_cache))
result = pytester.runpytest('-v')
result.stdout.fnmatch_lines([f'cachedir: {external_cache}']) |
def HANP_Miner(filename, mingap, maxgap, minsup, output_filename='result_file.txt'):
clear_mem()
read_file(filename)
cannum = 0
compnum = 0
global S
global ww
global candidate
begin_time = time_now()
min_freItem()
f_level = 1
gen_candidate(f_level)
while (len(candidate) != 0):
for can in candidate:
cannum += 1
occnum = 0
hupval = 0
p = can
for s in p:
hupval = (hupval + my_dict[s])
compnum += 1
for t in range(NumbS):
if (len(sDB[t].S) > 0):
S = sDB[t].S
deal_range(p, maxgap, mingap)
num = 0
if ((ptn_len + 1) <= len(S)):
nettree = [0 for i in range((ptn_len + 1))]
num = create_nettree(nettree)
del nettree
occnum = (occnum + num)
hupval = ((occnum * hupval) / len(p))
if (hupval >= minsup):
freArr[f_level].append(p)
canArr[f_level].append(p)
unum[ww] = hupval
ww += 1
else:
uphupval = (occnum * 6)
if (uphupval >= minsup):
canArr[f_level].append(p)
f_level += 1
candidate.clear()
gen_candidate(f_level)
end_time = time_now()
output(f_level, begin_time, end_time, compnum, filename, output_filename) |
class BoxToMaskTestOptions(BoxToMaskOptions):
def initialize(self):
BoxToMaskOptions.initialize(self)
self.parser.add_argument('--ntest', type=int, default=float('inf'))
self.parser.add_argument('--results_dir', type=str, default='results/')
self.parser.add_argument('--aspect_ratio', type=float, default=1.0)
self.parser.add_argument('--phase', type=str, default='test')
self.parser.add_argument('--which_epoch', type=str, default='latest')
self.parser.add_argument('--how_many', type=int, default=50)
self.parser.add_argument('--num_samples', type=int, default=1)
self.parser.add_argument('--gendata_dir', type=str, default='gen_ae_512p')
self.parser.add_argument('--gtdata_dir', type=str, default='gt_512p')
self.isTrain = False |
class TempMsg(object):
def __init__(self, senders=None, receivers=None, channels=None, message='', header='', type='', lockstring='', hide_from=None):
self.senders = ((senders and make_iter(senders)) or [])
self.receivers = ((receivers and make_iter(receivers)) or [])
self.channels = ((channels and make_iter(channels)) or [])
self.type = type
self.header = header
self.message = message
self.lock_storage = lockstring
self.hide_from = ((hide_from and make_iter(hide_from)) or [])
self.date_created = timezone.now()
_property
def locks(self):
return LockHandler(self)
def __str__(self):
senders = ','.join((obj.key for obj in self.senders))
receivers = ','.join(([('[%s]' % obj.key) for obj in self.channels] + [obj.key for obj in self.receivers]))
return ('%s->%s: %s' % (senders, receivers, crop(self.message, width=40)))
def remove_sender(self, sender):
for o in make_iter(sender):
try:
self.senders.remove(o)
except ValueError:
pass
def remove_receiver(self, receiver):
for o in make_iter(receiver):
try:
self.senders.remove(o)
except ValueError:
pass
def access(self, accessing_obj, access_type='read', default=False):
return self.locks.check(accessing_obj, access_type=access_type, default=default) |
def blas_header_text():
blas_code = ''
if (not config.blas__ldflags):
current_filedir = dirname(__file__)
blas_common_filepath = os.path.join(current_filedir, 'c_code', 'alt_blas_common.h')
blas_template_filepath = os.path.join(current_filedir, 'c_code', 'alt_blas_template.c')
common_code = ''
sblas_code = ''
dblas_code = ''
with open(blas_common_filepath) as code:
common_code = code.read()
with open(blas_template_filepath) as code:
template_code = code.read()
sblas_code = (template_code % {'float_type': 'float', 'float_size': 4, 'npy_float': 'NPY_FLOAT32', 'precision': 's'})
dblas_code = (template_code % {'float_type': 'double', 'float_size': 8, 'npy_float': 'NPY_FLOAT64', 'precision': 'd'})
if ((not common_code) or (not template_code)):
raise OSError('Unable to load NumPy implementation of BLAS functions from C source files.')
blas_code += common_code
blas_code += sblas_code
blas_code += dblas_code
header = '\n extern "C"\n {\n\n void xerbla_(char*, void *);\n\n //\n /* Level 1 */\n //\n\n /* Single Precision */\n\n void srot_(const int*, float *, const int*, float *, const int*, const float *, const float *);\n void srotg_(float *,float *,float *,float *);\n void srotm_( const int*, float *, const int*, float *, const int*, const float *);\n void srotmg_(float *,float *,float *,const float *, float *);\n void sswap_( const int*, float *, const int*, float *, const int*);\n void scopy_( const int*, const float *, const int*, float *, const int*);\n void saxpy_( const int*, const float *, const float *, const int*, float *, const int*);\n float sdot_(const int*, const float *, const int*, const float *, const int*);\n void sdot_sub_(const int*, const float *, const int*, const float *, const int*, float *);\n void sdsdot_sub_( const int*, const float *, const float *, const int*, const float *, const int*, float *);\n void sscal_( const int*, const float *, float *, const int*);\n void snrm2_sub_( const int*, const float *, const int*, float *);\n void sasum_sub_( const int*, const float *, const int*, float *);\n void isamax_sub_( const int*, const float * , const int*, const int*);\n\n /* Double Precision */\n\n void drot_(const int*, double *, const int*, double *, const int*, const double *, const double *);\n void drotg_(double *,double *,double *,double *);\n void drotm_( const int*, double *, const int*, double *, const int*, const double *);\n void drotmg_(double *,double *,double *,const double *, double *);\n void dswap_( const int*, double *, const int*, double *, const int*);\n void dcopy_( const int*, const double *, const int*, double *, const int*);\n void daxpy_( const int*, const double *, const double *, const int*, double *, const int*);\n void dswap_( const int*, double *, const int*, double *, const int*);\n double ddot_(const int*, const double *, const int*, const double *, const int*);\n void dsdot_sub_(const int*, const float *, const int*, const float *, const int*, double *);\n void ddot_sub_( const int*, const double *, const int*, const double *, const int*, double *);\n void dscal_( const int*, const double *, double *, const int*);\n void dnrm2_sub_( const int*, const double *, const int*, double *);\n void dasum_sub_( const int*, const double *, const int*, double *);\n void idamax_sub_( const int*, const double * , const int*, const int*);\n\n /* Single Complex Precision */\n\n void cswap_( const int*, void *, const int*, void *, const int*);\n void ccopy_( const int*, const void *, const int*, void *, const int*);\n void caxpy_( const int*, const void *, const void *, const int*, void *, const int*);\n void cswap_( const int*, void *, const int*, void *, const int*);\n void cdotc_sub_( const int*, const void *, const int*, const void *, const int*, void *);\n void cdotu_sub_( const int*, const void *, const int*, const void *, const int*, void *);\n void cscal_( const int*, const void *, void *, const int*);\n void icamax_sub_( const int*, const void *, const int*, const int*);\n void csscal_( const int*, const float *, void *, const int*);\n void scnrm2_sub_( const int*, const void *, const int*, float *);\n void scasum_sub_( const int*, const void *, const int*, float *);\n\n /* Double Complex Precision */\n\n void zswap_( const int*, void *, const int*, void *, const int*);\n void zcopy_( const int*, const void *, const int*, void *, const int*);\n void zaxpy_( const int*, const void *, const void *, const int*, void *, const int*);\n void zswap_( const int*, void *, const int*, void *, const int*);\n void zdotc_sub_( const int*, const void *, const int*, const void *, const int*, void *);\n void zdotu_sub_( const int*, const void *, const int*, const void *, const int*, void *);\n void zdscal_( const int*, const double *, void *, const int*);\n void zscal_( const int*, const void *, void *, const int*);\n void dznrm2_sub_( const int*, const void *, const int*, double *);\n void dzasum_sub_( const int*, const void *, const int*, double *);\n void izamax_sub_( const int*, const void *, const int*, const int*);\n\n //\n /* Level 2 */\n //\n\n /* Single Precision */\n\n void sgemv_(char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\n void sgbmv_(char*, const int*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\n void ssymv_(char*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\n void ssbmv_(char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\n void sspmv_(char*, const int*, const float *, const float *, const float *, const int*, const float *, float *, const int*);\n void strmv_( char*, char*, char*, const int*, const float *, const int*, float *, const int*);\n void stbmv_( char*, char*, char*, const int*, const int*, const float *, const int*, float *, const int*);\n void strsv_( char*, char*, char*, const int*, const float *, const int*, float *, const int*);\n void stbsv_( char*, char*, char*, const int*, const int*, const float *, const int*, float *, const int*);\n void stpmv_( char*, char*, char*, const int*, const float *, float *, const int*);\n void stpsv_( char*, char*, char*, const int*, const float *, float *, const int*);\n void sger_( const int*, const int*, const float *, const float *, const int*, const float *, const int*, float *, const int*);\n void ssyr_(char*, const int*, const float *, const float *, const int*, float *, const int*);\n void sspr_(char*, const int*, const float *, const float *, const int*, float *);\n void sspr2_(char*, const int*, const float *, const float *, const int*, const float *, const int*, float *);\n void ssyr2_(char*, const int*, const float *, const float *, const int*, const float *, const int*, float *, const int*);\n\n /* Double Precision */\n\n void dgemv_(char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\n void dgbmv_(char*, const int*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\n void dsymv_(char*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\n void dsbmv_(char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\n void dspmv_(char*, const int*, const double *, const double *, const double *, const int*, const double *, double *, const int*);\n void dtrmv_( char*, char*, char*, const int*, const double *, const int*, double *, const int*);\n void dtbmv_( char*, char*, char*, const int*, const int*, const double *, const int*, double *, const int*);\n void dtrsv_( char*, char*, char*, const int*, const double *, const int*, double *, const int*);\n void dtbsv_( char*, char*, char*, const int*, const int*, const double *, const int*, double *, const int*);\n void dtpmv_( char*, char*, char*, const int*, const double *, double *, const int*);\n void dtpsv_( char*, char*, char*, const int*, const double *, double *, const int*);\n void dger_( const int*, const int*, const double *, const double *, const int*, const double *, const int*, double *, const int*);\n void dsyr_(char*, const int*, const double *, const double *, const int*, double *, const int*);\n void dspr_(char*, const int*, const double *, const double *, const int*, double *);\n void dspr2_(char*, const int*, const double *, const double *, const int*, const double *, const int*, double *);\n void dsyr2_(char*, const int*, const double *, const double *, const int*, const double *, const int*, double *, const int*);\n\n /* Single Complex Precision */\n\n void cgemv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\n void cgbmv_(char*, const int*, const int*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\n void chemv_(char*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\n void chbmv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\n void chpmv_(char*, const int*, const void *, const void *, const void *, const int*, const void *, void *, const int*);\n void ctrmv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\n void ctbmv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\n void ctpmv_( char*, char*, char*, const int*, const void *, void *, const int*);\n void ctrsv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\n void ctbsv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\n void ctpsv_( char*, char*, char*, const int*, const void *, void *,const int*);\n void cgerc_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\n void cgeru_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\n void cher_(char*, const int*, const float *, const void *, const int*, void *, const int*);\n void cher2_(char*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\n void chpr_(char*, const int*, const float *, const void *, const int*, void *);\n void chpr2_(char*, const int*, const float *, const void *, const int*, const void *, const int*, void *);\n\n /* Double Complex Precision */\n\n void zgemv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\n void zgbmv_(char*, const int*, const int*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\n void zhemv_(char*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\n void zhbmv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\n void zhpmv_(char*, const int*, const void *, const void *, const void *, const int*, const void *, void *, const int*);\n void ztrmv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\n void ztbmv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\n void ztpmv_( char*, char*, char*, const int*, const void *, void *, const int*);\n void ztrsv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\n void ztbsv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\n void ztpsv_( char*, char*, char*, const int*, const void *, void *,const int*);\n void zgerc_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\n void zgeru_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\n void zher_(char*, const int*, const double *, const void *, const int*, void *, const int*);\n void zher2_(char*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\n void zhpr_(char*, const int*, const double *, const void *, const int*, void *);\n void zhpr2_(char*, const int*, const double *, const void *, const int*, const void *, const int*, void *);\n\n //\n /* Level 3 */\n //\n\n /* Single Precision */\n\n void sgemm_(char*, char*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\n void ssymm_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\n void ssyrk_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, float *, const int*);\n void ssyr2k_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\n void strmm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\n void strsm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\n\n /* Double Precision */\n\n void dgemm_(char*, char*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\n void dsymm_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\n void dsyrk_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, double *, const int*);\n void dsyr2k_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\n void dtrmm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\n void dtrsm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\n\n /* Single Complex Precision */\n\n void cgemm_(char*, char*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\n void csymm_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\n void chemm_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\n void csyrk_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, float *, const int*);\n void cherk_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, float *, const int*);\n void csyr2k_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\n void cher2k_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\n void ctrmm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\n void ctrsm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\n\n /* Double Complex Precision */\n\n void zgemm_(char*, char*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\n void zsymm_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\n void zhemm_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\n void zsyrk_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, double *, const int*);\n void zherk_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, double *, const int*);\n void zsyr2k_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\n void zher2k_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\n void ztrmm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\n void ztrsm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\n\n }\n '
if detect_macos_sdot_bug():
if detect_macos_sdot_bug.fix_works:
header += textwrap.dedent(' extern "C" float cblas_sdot(int, float*, int, float*, int);\n static float sdot_(int* Nx, float* x, int* Sx, float* y, int* Sy)\n {\n return cblas_sdot(*Nx, x, *Sx, y, *Sy);\n }\n ')
else:
header += textwrap.dedent(' static float sdot_(int* Nx, float* x, int* Sx, float* y, int* Sy)\n {\n fprintf(stderr,\n "FATAL: The implementation of BLAS SDOT "\n "routine in your system has a bug that "\n "makes it return wrong results.\\n"\n "You can work around this bug by using a "\n "different BLAS library, or disabling BLAS\\n");\n assert(0);\n }\n ')
return (header + blas_code) |
def generate_ann(root_path, split, image_infos, preserve_vertical, format):
print('Cropping images...')
dst_image_root = osp.join(root_path, 'crops', split)
ignore_image_root = osp.join(root_path, 'ignores', split)
if (split == 'training'):
dst_label_file = osp.join(root_path, f'train_label.{format}')
elif (split == 'val'):
dst_label_file = osp.join(root_path, f'val_label.{format}')
mmcv.mkdir_or_exist(dst_image_root)
mmcv.mkdir_or_exist(ignore_image_root)
lines = []
for image_info in image_infos:
index = 1
src_img_path = osp.join(root_path, 'imgs', image_info['file_name'])
image = mmcv.imread(src_img_path)
src_img_root = image_info['file_name'].split('.')[0]
for anno in image_info['anno_info']:
word = anno['word']
dst_img = crop_img(image, anno['bbox'], 0, 0)
(h, w, _) = dst_img.shape
dst_img_name = f'{src_img_root}_{index}.png'
index += 1
if (min(dst_img.shape) == 0):
continue
if ((not preserve_vertical) and ((h / w) > 2) and (split == 'training')):
dst_img_path = osp.join(ignore_image_root, dst_img_name)
else:
dst_img_path = osp.join(dst_image_root, dst_img_name)
mmcv.imwrite(dst_img, dst_img_path)
if (format == 'txt'):
lines.append(f'{osp.basename(dst_image_root)}/{dst_img_name} {word}')
elif (format == 'jsonl'):
lines.append(json.dumps({'filename': f'{osp.basename(dst_image_root)}/{dst_img_name}', 'text': word}, ensure_ascii=False))
else:
raise NotImplementedError
list_to_file(dst_label_file, lines) |
class DiffDB(ProductionCommand):
keyword = 'diffdb'
def assemble(self):
super(DiffDB, self).assemble()
self.parser.add_argument('-s', '--output_sql', action='store_true', dest='output_sql', help='show differences as sql')
def execute(self, args):
super().execute(args)
with self.context, self.sys_control.auto_connected():
changes = self.sys_control.diff_db(output_sql=args.output_sql)
if (not changes):
print('No difference detected') |
def write_pkg_info(self, base_dir):
temp = ''
final = os.path.join(base_dir, 'PKG-INFO')
try:
with NamedTemporaryFile('w', encoding='utf-8', dir=base_dir, delete=False) as f:
temp = f.name
self.write_pkg_file(f)
permissions = stat.S_IMODE(os.lstat(temp).st_mode)
os.chmod(temp, ((permissions | stat.S_IRGRP) | stat.S_IROTH))
os.replace(temp, final)
finally:
if (temp and os.path.exists(temp)):
os.remove(temp) |
def convertLDAmallet(dataDir='data/topic_models/SemevalA/', filename='state.mallet.gz'):
def extract_params(statefile):
with gzip.open(statefile, 'r') as state:
params = [x.decode('utf8').strip() for x in state.readlines()[1:3]]
return (list(params[0].split(':')[1].split(' ')), float(params[1].split(':')[1]))
def state_to_df(statefile):
return pd.read_csv(statefile, compression='gzip', sep=' ', skiprows=[1, 2])
params = extract_params(os.path.join(dataDir, filename))
alpha = [float(x) for x in params[0][1:]]
beta = params[1]
df = state_to_df(os.path.join(dataDir, filename))
df['type'] = df.type.astype(str)
docs = df.groupby('#doc')['type'].count().reset_index(name='doc_length')
vocab = df['type'].value_counts().reset_index()
vocab.columns = ['type', 'term_freq']
vocab = vocab.sort_values(by='type', ascending=True)
import sklearn.preprocessing
def pivot_and_smooth(df, smooth_value, rows_variable, cols_variable, values_variable):
matrix = df.pivot(index=rows_variable, columns=cols_variable, values=values_variable).fillna(value=0)
matrix = (matrix.values + smooth_value)
normed = sklearn.preprocessing.normalize(matrix, norm='l1', axis=1)
return pd.DataFrame(normed)
phi_df = df.groupby(['topic', 'type'])['type'].count().reset_index(name='token_count')
phi_df = phi_df.sort_values(by='type', ascending=True)
phi = pivot_and_smooth(phi_df, beta, 'topic', 'type', 'token_count')
theta_df = df.groupby(['#doc', 'topic'])['topic'].count().reset_index(name='topic_count')
theta = pivot_and_smooth(theta_df, alpha, '#doc', 'topic', 'topic_count')
data = {'topic_term_dists': phi, 'doc_topic_dists': theta, 'doc_lengths': list(docs['doc_length']), 'vocab': list(vocab['type']), 'term_frequency': list(vocab['term_freq'])}
return data |
def run_louvain(gfile, gamma, nruns, weight=None, node_subset=None, attribute=None, output_dictionary=False):
np.random.seed()
g = ig.Graph.Read_GraphMLz(gfile)
if (node_subset != None):
if (attribute == None):
gdel = node_subset
else:
gdel = [i for (i, val) in enumerate(g.vs[attribute]) if (val not in node_subset)]
g.delete_vertices(gdel)
if (weight is True):
weight = 'weight'
outparts = []
for i in range(nruns):
rand_perm = list(np.random.permutation(g.vcount()))
rperm = rev_perm(rand_perm)
gr = g.permute_vertices(rand_perm)
rp = louvain.find_partition(gr, louvain.RBConfigurationVertexPartition, weights=weight, resolution_parameter=gamma)
A = get_sum_internal_edges(rp, weight)
P = get_expected_edges(rp, weight, directed=g.is_directed())
outparts.append({'partition': permute_vector(rperm, rp.membership), 'resolution': gamma, 'orig_mod': rp.quality(), 'int_edges': A, 'exp_edges': P})
if (not output_dictionary):
return PartitionEnsemble(graph=g, listofparts=outparts)
else:
return outparts
return part_ensemble |
class MinLeverage(AccountControl):
_types(__funcname='MinLeverage', min_leverage=(int, float), deadline=datetime)
_bounded(__funcname='MinLeverage', min_leverage=(0, None))
def __init__(self, min_leverage, deadline):
super(MinLeverage, self).__init__(min_leverage=min_leverage, deadline=deadline)
self.min_leverage = min_leverage
self.deadline = deadline
def validate(self, _portfolio, account, algo_datetime, _algo_current_data):
if ((algo_datetime > self.deadline) and (account.leverage < self.min_leverage)):
self.fail() |
class HeadphoneMonitorPlugin(EventPlugin):
PLUGIN_ID = 'HeadphoneMonitor'
PLUGIN_NAME = _('Pause on Headphone Unplug')
PLUGIN_DESC = _('Pauses in case headphones get unplugged and unpauses in case they get plugged in again.')
PLUGIN_ICON = Icons.MEDIA_PLAYBACK_PAUSE
def enabled(self):
self._was_paused = False
self._do_act = False
self._mon = HeadphoneMonitor()
self._mon.connect('action', self._changed)
self._mon.start()
def _changed(self, mon, action):
if (action == HeadphoneAction.DISCONNECTED):
print_d('Headphones disconnected')
if self._do_act:
do_act = self._do_act
self._was_paused = app.player.paused
app.player.paused = True
self._do_act = do_act
elif (action == HeadphoneAction.CONNECTED):
print_d('Headphones connected')
if self._do_act:
do_act = self._do_act
app.player.paused = self._was_paused
self._do_act = do_act
def disabled(self):
self._mon.stop()
del self._mon
def plugin_on_paused(self):
self._do_act = False
def plugin_on_unpaused(self):
self._do_act = self._mon.is_connected() |
def get_context_templates(model, tok):
global CONTEXT_TEMPLATES_CACHE
if (CONTEXT_TEMPLATES_CACHE is None):
CONTEXT_TEMPLATES_CACHE = ([['{}']] + [[(f.replace('{', ' ').replace('}', ' ') + '. {}') for f in generate_fast(model, tok, ['The', 'Therefore', 'Because', 'I', 'You'], n_gen_per_prompt=(n_gen // 5), max_out_len=length)] for (length, n_gen) in [(10, 5)]])
print(f'Cached context templates {CONTEXT_TEMPLATES_CACHE}')
return CONTEXT_TEMPLATES_CACHE |
class OutputLayerFunction(Function):
def forward(ctx, dimension, metadata, input_features):
output_features = input_features.new()
ctx.metadata_ = metadata
ctx.dimension = dimension
sparseconvnet.SCN.OutputLayer_updateOutput(metadata, input_features.contiguous(), output_features)
return output_features
def backward(ctx, grad_output):
grad_input = grad_output.new()
grad_output = grad_output.contiguous()
sparseconvnet.SCN.OutputLayer_updateGradInput(ctx.metadata_, grad_input, grad_output.contiguous())
return (None, None, grad_input) |
class FixedOptionPolicy(object):
def __init__(self, base_policy, num_skills, z):
self._z = z
self._base_policy = base_policy
self._num_skills = num_skills
def reset(self):
pass
def get_action(self, obs):
aug_obs = concat_obs_z(obs, self._z, self._num_skills)
return self._base_policy.get_action(aug_obs)
def get_distribution_for(self, obs_t, reuse=False):
shape = [tf.shape(obs_t)[0]]
z = tf.tile([self._z], shape)
z_one_hot = tf.one_hot(z, self._num_skills, dtype=obs_t.dtype)
aug_obs_t = tf.concat([obs_t, z_one_hot], axis=1)
return self._base_policy.get_distribution_for(aug_obs_t, reuse=reuse) |
def _set_thing_style(caller, raw_string, **kwargs):
room = caller.location
options = caller.attributes.get('options', category=room.tagcategory, default={})
options['things_style'] = kwargs.get('value', 2)
caller.attributes.add('options', options, category=room.tagcategory)
return (None, kwargs) |
class TestCorrelation():
def _test_correlation(self, dtype=torch.float):
layer = Correlation(max_displacement=0)
input1 = torch.tensor(_input1, dtype=dtype).cuda()
input2 = torch.tensor(_input2, dtype=dtype).cuda()
input1.requires_grad = True
input2.requires_grad = True
out = layer(input1, input2)
out.backward(torch.ones_like(out))
gt_out = torch.tensor(_gt_out, dtype=dtype)
assert_equal_tensor(out.cpu(), gt_out)
assert_equal_tensor(input1.grad.detach().cpu(), input2.cpu())
assert_equal_tensor(input2.grad.detach().cpu(), input1.cpu())
.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_correlation(self):
self._test_correlation(torch.float)
self._test_correlation(torch.double)
self._test_correlation(torch.half) |
class _SofMarker(_Marker):
def __init__(self, marker_code, offset, segment_length, px_width, px_height):
super(_SofMarker, self).__init__(marker_code, offset, segment_length)
self._px_width = px_width
self._px_height = px_height
def from_stream(cls, stream, marker_code, offset):
segment_length = stream.read_short(offset)
px_height = stream.read_short(offset, 3)
px_width = stream.read_short(offset, 5)
return cls(marker_code, offset, segment_length, px_width, px_height)
def px_height(self):
return self._px_height
def px_width(self):
return self._px_width |
def get_image_processor_config(pretrained_model_name_or_path: Union[(str, os.PathLike)], cache_dir: Optional[Union[(str, os.PathLike)]]=None, force_download: bool=False, resume_download: bool=False, proxies: Optional[Dict[(str, str)]]=None, use_auth_token: Optional[Union[(bool, str)]]=None, revision: Optional[str]=None, local_files_only: bool=False, **kwargs):
resolved_config_file = get_file_from_repo(pretrained_model_name_or_path, IMAGE_PROCESSOR_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, use_auth_token=use_auth_token, revision=revision, local_files_only=local_files_only)
if (resolved_config_file is None):
logger.info('Could not locate the image processor configuration file, will try to use the model config instead.')
return {}
with open(resolved_config_file, encoding='utf-8') as reader:
return json.load(reader) |
def adaptive_isotropic_gaussian_kernel(xs, ys, h_min=0.001):
(Kx, D) = xs.get_shape().as_list()[(- 2):]
(Ky, D2) = ys.get_shape().as_list()[(- 2):]
assert (D == D2)
leading_shape = tf.shape(xs)[:(- 2)]
diff = (tf.expand_dims(xs, (- 2)) - tf.expand_dims(ys, (- 3)))
if (LooseVersion(tf.__version__) <= LooseVersion('1.5.0')):
dist_sq = tf.reduce_sum((diff ** 2), axis=(- 1), keep_dims=False)
else:
dist_sq = tf.reduce_sum((diff ** 2), axis=(- 1), keepdims=False)
input_shape = tf.concat((leading_shape, [(Kx * Ky)]), axis=0)
(values, _) = tf.nn.top_k(input=tf.reshape(dist_sq, input_shape), k=(((Kx * Ky) // 2) + 1), sorted=True)
medians_sq = values[(..., (- 1))]
h = (medians_sq / np.log(Kx))
h = tf.maximum(h, h_min)
h = tf.stop_gradient(h)
h_expanded_twice = tf.expand_dims(tf.expand_dims(h, (- 1)), (- 1))
kappa = tf.exp(((- dist_sq) / h_expanded_twice))
h_expanded_thrice = tf.expand_dims(h_expanded_twice, (- 1))
kappa_expanded = tf.expand_dims(kappa, (- 1))
kappa_grad = ((((- 2) * diff) / h_expanded_thrice) * kappa_expanded)
return {'output': kappa, 'gradient': kappa_grad} |
def convert():
source = (BASE / 'scratch_projects')
target = (BASE / 'correct_results')
for file in source.iterdir():
if file.is_dir():
for f in file.iterdir():
if (f.is_file() and (f.suffix == '.sb3')):
path = f.as_posix()
dest = (target / file.stem).as_posix()
converter = Converter(path, dest)
converter.convert()
else:
print(f'Skipping {file}')
return True |
class SdistBuilderConfig(BuilderConfig):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.__core_metadata_constructor: (Callable[(..., str)] | None) = None
self.__strict_naming: (bool | None) = None
self.__support_legacy: (bool | None) = None
def core_metadata_constructor(self) -> Callable[(..., str)]:
if (self.__core_metadata_constructor is None):
core_metadata_version = self.target_config.get('core-metadata-version', DEFAULT_METADATA_VERSION)
if (not isinstance(core_metadata_version, str)):
message = f'Field `tool.hatch.build.targets.{self.plugin_name}.core-metadata-version` must be a string'
raise TypeError(message)
constructors = get_core_metadata_constructors()
if (core_metadata_version not in constructors):
message = f"Unknown metadata version `{core_metadata_version}` for field `tool.hatch.build.targets.{self.plugin_name}.core-metadata-version`. Available: {', '.join(sorted(constructors))}"
raise ValueError(message)
self.__core_metadata_constructor = constructors[core_metadata_version]
return self.__core_metadata_constructor
def strict_naming(self) -> bool:
if (self.__strict_naming is None):
if ('strict-naming' in self.target_config):
strict_naming = self.target_config['strict-naming']
if (not isinstance(strict_naming, bool)):
message = f'Field `tool.hatch.build.targets.{self.plugin_name}.strict-naming` must be a boolean'
raise TypeError(message)
else:
strict_naming = self.build_config.get('strict-naming', True)
if (not isinstance(strict_naming, bool)):
message = 'Field `tool.hatch.build.strict-naming` must be a boolean'
raise TypeError(message)
self.__strict_naming = strict_naming
return self.__strict_naming
def support_legacy(self) -> bool:
if (self.__support_legacy is None):
self.__support_legacy = bool(self.target_config.get('support-legacy', False))
return self.__support_legacy |
class FP16_Optimizer(object):
def __init__(self, init_optimizer, static_loss_scale=1.0, dynamic_loss_scale=False, dynamic_loss_args=None, verbose=True):
if (not torch.cuda.is_available):
raise SystemError('Cannot use fp16 without CUDA.')
self.verbose = verbose
self.optimizer = init_optimizer
self.fp16_groups = []
self.fp32_from_fp16_groups = []
self.fp32_from_fp32_groups = []
for (i, param_group) in enumerate(self.optimizer.param_groups):
self.maybe_print('FP16_Optimizer processing param group {}:'.format(i))
fp16_params_this_group = []
fp32_params_this_group = []
fp32_from_fp16_params_this_group = []
for (i, param) in enumerate(param_group['params']):
if param.requires_grad:
if (param.type() == 'torch.cuda.HalfTensor'):
self.maybe_print('FP16_Optimizer received torch.cuda.HalfTensor with {}'.format(param.size()))
fp16_params_this_group.append(param)
master_param = param.detach().clone().float()
master_param.requires_grad = True
param_group['params'][i] = master_param
fp32_from_fp16_params_this_group.append(master_param)
if (param in self.optimizer.state):
self.optimizer.state[master_param] = self.optimizer.state.pop(param)
elif (param.type() == 'torch.cuda.FloatTensor'):
self.maybe_print('FP16_Optimizer received torch.cuda.FloatTensor with {}'.format(param.size()))
fp32_params_this_group.append(param)
param_group['params'][i] = param
else:
raise TypeError('Wrapped parameters must be either torch.cuda.FloatTensor or torch.cuda.HalfTensor. Received {}'.format(param.type()))
self.fp16_groups.append(fp16_params_this_group)
self.fp32_from_fp16_groups.append(fp32_from_fp16_params_this_group)
self.fp32_from_fp32_groups.append(fp32_params_this_group)
self.optimizer.load_state_dict(self.optimizer.state_dict())
if dynamic_loss_scale:
self.dynamic_loss_scale = True
if (dynamic_loss_args is not None):
self.loss_scaler = DynamicLossScaler(**dynamic_loss_args)
else:
self.loss_scaler = DynamicLossScaler()
else:
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(static_loss_scale)
self.overflow = False
self.first_closure_call_this_step = True
self.clip_grad_norm = clip_grad_norm
def maybe_print(self, msg):
if self.verbose:
print(msg)
def __getstate__(self):
raise RuntimeError('FP16_Optimizer should be serialized using state_dict().')
def __setstate__(self, state):
raise RuntimeError('FP16_Optimizer should be deserialized using load_state_dict().')
def zero_grad(self, set_grads_to_None=False):
for group in self.optimizer.param_groups:
for p in group['params']:
if set_grads_to_None:
p.grad = None
elif (p.grad is not None):
p.grad.detach_()
p.grad.zero_()
for fp16_group in self.fp16_groups:
for param in fp16_group:
if set_grads_to_None:
param.grad = None
elif (param.grad is not None):
param.grad.detach_()
param.grad.zero_()
def _check_overflow(self):
params = []
for group in self.fp16_groups:
for param in group:
params.append(param)
for group in self.fp32_from_fp32_groups:
for param in group:
params.append(param)
self.overflow = self.loss_scaler.has_overflow(params)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
def _master_params_to_model_params(self):
for (fp16_group, fp32_from_fp16_group) in zip(self.fp16_groups, self.fp32_from_fp16_groups):
master_params_to_model_params(fp16_group, fp32_from_fp16_group)
def _model_grads_to_master_grads(self):
for (fp16_group, fp32_from_fp16_group) in zip(self.fp16_groups, self.fp32_from_fp16_groups):
model_grads_to_master_grads(fp16_group, fp32_from_fp16_group)
def _downscale_master(self):
if (self.loss_scale != 1.0):
for group in self.optimizer.param_groups:
for param in group['params']:
if (param.grad is not None):
param.grad.data.mul_((1.0 / self.loss_scale))
def clip_master_grads(self, max_norm, norm_type=2):
if (not self.overflow):
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
return self.clip_grad_norm(fp32_params, max_norm, norm_type)
else:
return (- 1)
def state_dict(self):
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_from_fp16'] = self.fp32_from_fp16_groups
return state_dict
def load_state_dict(self, state_dict):
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
for (current_group, saved_group) in zip(self.fp32_from_fp16_groups, state_dict['fp32_from_fp16']):
for (current, saved) in zip(current_group, saved_group):
current.data.copy_(saved.data)
def step(self, closure=None):
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
if self.overflow:
print('OVERFLOW! Skipping step. Attempted loss scale: {}, reducing to {}'.format(scale, self.loss_scale))
return
if (closure is not None):
retval = self._step_with_closure(closure)
else:
retval = self.optimizer.step()
self._master_params_to_model_params()
return retval
def _step_with_closure(self, closure):
def wrapped_closure():
if self.first_closure_call_this_step:
self.first_closure_call_this_step = False
else:
self._master_params_to_model_params()
temp_loss = closure()
while self.overflow:
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
print('OVERFLOW within closure! Skipping step. Attempted loss scale: {}, reducing to {}'.format(scale, self.loss_scale))
temp_loss = closure()
return temp_loss
retval = self.optimizer.step(wrapped_closure)
self.first_closure_call_this_step = True
return retval
def backward(self, loss, update_master_grads=True, retain_graph=False):
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
if update_master_grads:
self.update_master_grads()
def update_master_grads(self):
if self.dynamic_loss_scale:
self._check_overflow()
if self.overflow:
return
self._model_grads_to_master_grads()
self._downscale_master()
def inspect_master_grad_data(self):
if self.overflow:
print('Warning: calling FP16_Optimizer.inspect_master_grad_data while in an overflow state. Gradients are currently invalid (may be inf, nan, or stale). Returning None.')
return None
else:
master_grads_data = []
for param_group in self.optimizer.param_groups:
master_grads_this_group = []
for param in param_group['params']:
if (param.grad is not None):
master_grads_this_group.append(param.grad.data)
else:
master_grads_this_group.append(None)
master_grads_data.append(master_grads_this_group)
return master_grads_data
def _get_loss_scale(self):
return self.loss_scaler.loss_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups) |
def main():
parser = HfArgumentParser((DataTrainingArguments, TeacherModelArguments, StudentModelArguments, DistillTrainingArguments), description=DESCRIPTION)
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(data_args, teacher_args, student_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(data_args, teacher_args, student_args, training_args) = parser.parse_args_into_dataclasses()
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
utils.logging.set_verbosity_info()
utils.logging.enable_default_handler()
utils.logging.enable_explicit_format()
if (training_args.local_rank != (- 1)):
raise ValueError('Distributed training is not currently supported.')
if (training_args.tpu_num_cores is not None):
raise ValueError('TPU acceleration is not currently supported.')
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
examples = read_lines(data_args.data_file)
class_names = read_lines(data_args.class_names_file)
logger.info('Generating predictions from zero-shot teacher model')
teacher_soft_preds = get_teacher_predictions(teacher_args.teacher_name_or_path, examples, class_names, teacher_args.hypothesis_template, teacher_args.teacher_batch_size, teacher_args.temperature, teacher_args.multi_label, data_args.use_fast_tokenizer, training_args.no_cuda, training_args.fp16)
dataset = Dataset.from_dict({'text': examples, 'labels': teacher_soft_preds})
logger.info('Initializing student model')
model = AutoModelForSequenceClassification.from_pretrained(student_args.student_name_or_path, num_labels=len(class_names))
tokenizer = AutoTokenizer.from_pretrained(student_args.student_name_or_path, use_fast=data_args.use_fast_tokenizer)
model.config.id2label = dict(enumerate(class_names))
model.config.label2id = {label: i for (i, label) in enumerate(class_names)}
dataset = dataset.map(tokenizer, input_columns='text')
dataset.set_format('torch')
def compute_metrics(p, return_outputs=False):
preds = p.predictions.argmax((- 1))
proxy_labels = p.label_ids.argmax((- 1))
return {'agreement': (preds == proxy_labels).mean().item()}
trainer = DistillationTrainer(model=model, tokenizer=tokenizer, args=training_args, train_dataset=dataset, compute_metrics=compute_metrics)
if training_args.do_train:
logger.info('Training student model on teacher predictions')
trainer.train()
if training_args.do_eval:
agreement = trainer.evaluate(eval_dataset=dataset)['eval_agreement']
logger.info(f'Agreement of student and teacher predictions: {(agreement * 100):0.2f}%')
trainer.save_model() |
class TestExportModels(unittest.TestCase):
def test_export_multihead_attention(self):
module = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
scripted = torch.jit.script(module)
_test_save_and_load(scripted)
def test_incremental_state_multihead_attention(self):
module1 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
module1 = torch.jit.script(module1)
module2 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
module2 = torch.jit.script(module2)
state = {}
state = module1.set_incremental_state(state, 'key', {'a': torch.tensor([1])})
state = module2.set_incremental_state(state, 'key', {'a': torch.tensor([2])})
v1 = module1.get_incremental_state(state, 'key')['a']
v2 = module2.get_incremental_state(state, 'key')['a']
self.assertEqual(v1, 1)
self.assertEqual(v2, 2)
def test_positional_embedding(self):
module = sinusoidal_positional_embedding.SinusoidalPositionalEmbedding(embedding_dim=8, padding_idx=1)
scripted = torch.jit.script(module)
_test_save_and_load(scripted)
((torch.__version__ < '1.6.0'), 'Targeting OSS scriptability for the 1.6 release')
def test_export_transformer(self):
(task, parser) = get_dummy_task_and_parser()
TransformerModel.add_args(parser)
args = parser.parse_args([])
model = TransformerModel.build_model(args, task)
scripted = torch.jit.script(model)
_test_save_and_load(scripted)
((torch.__version__ < '1.6.0'), 'Targeting OSS scriptability for the 1.6 release')
def test_export_transformer_no_token_pos_emb(self):
(task, parser) = get_dummy_task_and_parser()
TransformerModel.add_args(parser)
args = parser.parse_args([])
args.no_token_positional_embeddings = True
model = TransformerModel.build_model(args, task)
scripted = torch.jit.script(model)
_test_save_and_load(scripted) |
class Command(BaseCommand):
help = 'Create dataset'
def add_arguments(self, parser):
parser.add_argument('cnt_parts', type=int)
parser.add_argument('percent', type=int)
parser.add_argument('items_folder', type=str)
parser.add_argument('add_folder', type=str)
def handle(self, *args, **options):
items_data = []
items_data.extend(load_data_from_folder(options['add_folder']))
items_data.extend(load_data_from_folder(options['items_folder']))
random.shuffle(items_data)
items_cnt = len(items_data)
train_size = math.ceil((items_cnt * (options['percent'] / 100)))
test_size = (items_cnt - train_size)
train_part_size = math.ceil((train_size / options['cnt_parts']))
test_part_size = math.ceil((test_size / options['cnt_parts']))
train_set = items_data[:train_size]
test_set = items_data[train_size:]
for part in range(options['cnt_parts']):
train_name = f'train_{train_part_size}_{part}.json'
test_name = f'test_{test_part_size}_{part}.json'
save_dataset(train_set[(part * train_part_size):((part + 1) * train_part_size)], train_name)
save_dataset(test_set[(part * test_part_size):((part + 1) * test_part_size)], test_name) |
class TTRBase(TTR):
name = 'TTRBase'
def __init__(self, source, alpha: float=0.15, beta: float=0.8, epsilon: float=1e-05):
super().__init__(source, alpha, beta, epsilon)
self.p = dict()
self.r = {source: 1.0}
self._vis = set()
def push(self, node, edges: list, **kwargs):
if (self.r.get(node) is None):
self.r[node] = 0
r = self.r[node]
self.r[node] = 0
self._self_push(node, r)
self._forward_push(node, edges, r)
self._backward_push(node, edges, r)
if (node not in self._vis):
self._vis.add(node)
(yield from edges)
def _self_push(self, node, r):
self.p[node] = (self.p.get(node, 0) + (self.alpha * r))
def _forward_push(self, node, edges: list, r):
out_edges = list()
for e in edges:
if (e['from'] == node):
out_edges.append(e)
out_edges_cnt = len(out_edges)
for e in out_edges:
inc = (((((1 - self.alpha) * self.beta) * r) / out_edges_cnt) if (out_edges_cnt > 0) else 0)
self.r[e['to']] = (self.r.get(e['to'], 0) + inc)
def _backward_push(self, node, edges: list, r):
in_edges = list()
for e in edges:
if (e['to'] == node):
in_edges.append(e)
in_edges_cnt = len(in_edges)
for e in in_edges:
inc = (((((1 - self.alpha) * (1 - self.beta)) * r) / in_edges_cnt) if (in_edges_cnt > 0) else 0)
self.r[e['from']] = (self.r.get(e['from'], 0) + inc)
def pop(self):
(node, r) = (None, self.epsilon)
for (_node, _r) in self.r.items():
if (_r > r):
(node, r) = (_node, _r)
return (dict(node=node, residual=r) if (node is not None) else None) |
def batch_list_collate(collate_fn):
def collate_task(task):
if isinstance(task, TorchDataset):
return collate_fn([task[idx] for idx in range(len(task))])
elif isinstance(task, OrderedDict):
return OrderedDict([(key, collate_task(subtask)) for (key, subtask) in task.items()])
else:
raise NotImplementedError()
def _collate_fn(batch):
batch = [collate_task(task) for task in batch]
assert isinstance(batch[0], OrderedDict)
keys = list(batch[0].keys())
out_dict = OrderedDict()
for key in keys:
out_dict[key] = [x[key] for x in batch]
return out_dict
return _collate_fn |
class ToTensor(object):
def __init__(self):
self.to_tensor = torchvision.transforms.ToTensor()
def __call__(self, sample):
sample['image'] = self.to_tensor(sample['image'])
sal_ = self.to_tensor(sample['sal']).squeeze().long()
if (len(sal_.shape) == 3):
sample['sal'] = sal_[0]
else:
sample['sal'] = sal_
return sample
def __str__(self):
return 'ToTensor' |
def collect_frames(frame: FrameType) -> List[str]:
callstack = []
optional_frame: Optional[FrameType] = frame
while (optional_frame is not None):
callstack.append(frame_format(optional_frame))
optional_frame = optional_frame.f_back
callstack.reverse()
return callstack |
class AuthKeyExchange(object):
def __init__(self, privkey, onSuccess):
self.privkey = privkey
self.state = STATE_NONE
self.r = None
self.encgx = None
self.hashgx = None
self.ourKeyid = 1
self.theirPubkey = None
self.theirKeyid = 1
self.enc_c = None
self.enc_cp = None
self.mac_m1 = None
self.mac_m1p = None
self.mac_m2 = None
self.mac_m2p = None
self.sessionId = None
self.sessionIdHalf = False
self.dh = DH()
self.onSuccess = onSuccess
self.gy = None
self.extraKey = None
self.lastmsg = None
def startAKE(self):
self.r = long_to_bytes(getrandbits(128), 16)
gxmpi = pack_mpi(self.dh.pub)
self.hashgx = SHA256(gxmpi)
self.encgx = AESCTR(self.r).encrypt(gxmpi)
self.state = STATE_AWAITING_DHKEY
return proto.DHCommit(self.encgx, self.hashgx)
def handleDHCommit(self, msg):
self.encgx = msg.encgx
self.hashgx = msg.hashgx
self.state = STATE_AWAITING_REVEALSIG
return proto.DHKey(long_to_bytes(self.dh.pub))
def handleDHKey(self, msg):
if (self.state == STATE_AWAITING_DHKEY):
self.gy = bytes_to_long(msg.gy)
if (not check_group(self.gy)):
logger.error('Invalid g**y received: %r', self.gy)
return
self.createAuthKeys()
aesxb = self.calculatePubkeyAuth(self.enc_c, self.mac_m1)
self.state = STATE_AWAITING_SIG
self.lastmsg = proto.RevealSig(self.r, aesxb, b'')
self.lastmsg.mac = SHA256HMAC160(self.mac_m2, self.lastmsg.getMacedData())
return self.lastmsg
elif (self.state == STATE_AWAITING_SIG):
logger.info('received DHKey while not awaiting DHKEY')
if (msg.gy == self.gy):
logger.info('resending revealsig')
return self.lastmsg
else:
logger.info('bad state for DHKey')
def handleRevealSig(self, msg):
if (self.state != STATE_AWAITING_REVEALSIG):
logger.error('bad state for RevealSig')
raise InvalidParameterError
self.r = msg.rkey
gxmpi = AESCTR(self.r).decrypt(self.encgx)
if (SHA256(gxmpi) != self.hashgx):
logger.error("Hashes don't match")
logger.info('r=%r, hashgx=%r, computed hash=%r, gxmpi=%r', self.r, self.hashgx, SHA256(gxmpi), gxmpi)
raise InvalidParameterError
self.gy = read_mpi(gxmpi)[0]
self.createAuthKeys()
if (msg.mac != SHA256HMAC160(self.mac_m2, msg.getMacedData())):
logger.error("HMACs don't match")
logger.info('mac=%r, mac_m2=%r, data=%r', msg.mac, self.mac_m2, msg.getMacedData())
raise InvalidParameterError
self.checkPubkeyAuth(self.enc_c, self.mac_m1, msg.encsig)
aesxb = self.calculatePubkeyAuth(self.enc_cp, self.mac_m1p)
self.sessionIdHalf = True
self.onSuccess(self)
self.ourKeyid = 0
self.state = STATE_NONE
cmpmac = (struct.pack(b'!I', len(aesxb)) + aesxb)
return proto.Signature(aesxb, SHA256HMAC160(self.mac_m2p, cmpmac))
def handleSignature(self, msg):
if (self.state != STATE_AWAITING_SIG):
logger.error('bad state (%d) for Signature', self.state)
raise InvalidParameterError
if (msg.mac != SHA256HMAC160(self.mac_m2p, msg.getMacedData())):
logger.error("HMACs don't match")
raise InvalidParameterError
self.checkPubkeyAuth(self.enc_cp, self.mac_m1p, msg.encsig)
self.sessionIdHalf = False
self.onSuccess(self)
self.ourKeyid = 0
self.state = STATE_NONE
def createAuthKeys(self):
s = pow(self.gy, self.dh.priv, DH_MODULUS)
sbyte = pack_mpi(s)
self.sessionId = SHA256((b'\x00' + sbyte))[:8]
enc = SHA256((b'\x01' + sbyte))
self.enc_c = enc[:16]
self.enc_cp = enc[16:]
self.mac_m1 = SHA256((b'\x02' + sbyte))
self.mac_m2 = SHA256((b'\x03' + sbyte))
self.mac_m1p = SHA256((b'\x04' + sbyte))
self.mac_m2p = SHA256((b'\x05' + sbyte))
self.extraKey = SHA256((b'\xff' + sbyte))
def calculatePubkeyAuth(self, key, mackey):
pubkey = self.privkey.serializePublicKey()
buf = pack_mpi(self.dh.pub)
buf += pack_mpi(self.gy)
buf += pubkey
buf += struct.pack(b'!I', self.ourKeyid)
MB = self.privkey.sign(SHA256HMAC(mackey, buf))
buf = pubkey
buf += struct.pack(b'!I', self.ourKeyid)
buf += MB
return AESCTR(key).encrypt(buf)
def checkPubkeyAuth(self, key, mackey, encsig):
auth = AESCTR(key).decrypt(encsig)
(self.theirPubkey, auth) = PK.parsePublicKey(auth)
(receivedKeyid, auth) = proto.unpack(b'!I', auth)
if (receivedKeyid == 0):
raise InvalidParameterError
authbuf = pack_mpi(self.gy)
authbuf += pack_mpi(self.dh.pub)
authbuf += self.theirPubkey.serializePublicKey()
authbuf += struct.pack(b'!I', receivedKeyid)
if (self.theirPubkey.verify(SHA256HMAC(mackey, authbuf), auth) is False):
raise InvalidParameterError
self.theirKeyid = receivedKeyid |
def fuse_module(m):
last_conv = None
last_conv_name = None
for (name, child) in m.named_children():
if isinstance(child, (nn.BatchNorm2d, nn.SyncBatchNorm)):
if (last_conv is None):
continue
fused_conv = fuse_conv_bn(last_conv, child)
m._modules[last_conv_name] = fused_conv
m._modules[name] = nn.Identity()
last_conv = None
elif isinstance(child, nn.Conv2d):
last_conv = child
last_conv_name = name
else:
fuse_module(child)
return m |
class ArtifactStash():
def __enter__(self):
self.tmpdir = None
file_names = [VersionFN, BindingsFN, LibnameForSystem[Host.system]]
self.files = [fp for fp in [(ModuleDir_Raw / fn) for fn in file_names] if fp.exists()]
if (len(self.files) == 0):
return
self.tmpdir = tempfile.TemporaryDirectory(prefix='pypdfium2_artifact_stash_')
self.tmpdir_path = Path(self.tmpdir.name)
for fp in self.files:
shutil.move(fp, self.tmpdir_path)
def __exit__(self, *_):
if (self.tmpdir is None):
return
for fp in self.files:
shutil.move((self.tmpdir_path / fp.name), ModuleDir_Raw)
self.tmpdir.cleanup() |
def test_hook_auto_num_workers_none(pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch, monkeypatch_3_cpus) -> None:
from xdist.plugin import pytest_cmdline_main as check_options
monkeypatch.delenv('PYTEST_XDIST_AUTO_NUM_WORKERS', raising=False)
pytester.makeconftest('\n def pytest_xdist_auto_num_workers():\n return None\n ')
config = pytester.parseconfigure('-nauto')
check_options(config)
assert (config.getoption('numprocesses') == 3)
monkeypatch.setenv('PYTEST_XDIST_AUTO_NUM_WORKERS', '5')
config = pytester.parseconfigure('-nauto')
check_options(config)
assert (config.getoption('numprocesses') == 5) |
class Host(ObjectDefinition):
object_type = 'host'
objects = ObjectFetcher('host')
def acknowledge(self, sticky=1, notify=1, persistent=0, author='pynag', comment='acknowledged by pynag', recursive=False, timestamp=None):
if (timestamp is None):
timestamp = int(time.time())
if (recursive is True):
pass
pynag.Control.Command.acknowledge_host_problem(host_name=self.host_name, sticky=sticky, notify=notify, persistent=persistent, author=author, comment=comment, timestamp=timestamp, command_file=config.get_cfg_value('command_file'))
def downtime(self, start_time=None, end_time=None, trigger_id=0, duration=7200, author=None, comment='Downtime scheduled by pynag', recursive=False):
if (self.register == '0'):
raise ModelError('Cannot schedule a downtime for unregistered object')
if (not self.host_name):
raise ModelError('Cannot schedule a downtime for host with no host_name')
if (start_time is None):
start_time = time.time()
if (duration is None):
duration = 7200
duration = int(duration)
if (end_time is None):
end_time = (start_time + duration)
if (author is None):
author = getpass.getuser()
arguments = {'host_name': self.host_name, 'start_time': start_time, 'end_time': end_time, 'fixed': '1', 'trigger_id': trigger_id, 'duration': duration, 'author': author, 'comment': comment}
if (recursive is True):
pynag.Control.Command.schedule_host_svc_downtime(**arguments)
else:
pynag.Control.Command.schedule_host_downtime(**arguments)
def get_effective_services(self):
get_object = (lambda x: Service.objects.get_by_id(x, cache_only=True))
list_of_shortnames = sorted(ObjectRelations.host_services[self.host_name])
services = list(map(get_object, list_of_shortnames))
for hg in self.get_effective_hostgroups():
services += hg.get_effective_services()
return services
def get_effective_contacts(self):
get_object = (lambda x: Contact.objects.get_by_shortname(x, cache_only=True))
list_of_shortnames = sorted(ObjectRelations.host_contacts[self.host_name])
return list(map(get_object, list_of_shortnames))
def get_effective_contact_groups(self):
get_object = (lambda x: Contactgroup.objects.get_by_shortname(x, cache_only=True))
list_of_shortnames = sorted(ObjectRelations.host_contact_groups[self.host_name])
return list(map(get_object, list_of_shortnames))
def get_effective_hostgroups(self):
get_object = (lambda x: Hostgroup.objects.get_by_shortname(x, cache_only=True))
list_of_shortnames = sorted(ObjectRelations.host_hostgroups[self.host_name])
return list(map(get_object, list_of_shortnames))
def get_effective_network_parents(self, recursive=False):
if (self['parents'] is None):
return []
results = []
parents = self['parents'].split(',')
for parent_name in parents:
results.append(self.objects.get_by_name(parent_name, cache_only=True))
if (recursive is True):
grandparents = []
for i in results:
grandparents.append(i.get_effective_network_parents(recursive=True))
results += grandparents
return results
def get_effective_network_children(self, recursive=False):
if (self.host_name is None):
return []
children = self.objects.filter(parents__has_field=self.host_name)
if (recursive is True):
for child in children:
children += child.get_effective_network_children(recursive=True)
return children
def delete(self, recursive=False, cleanup_related_items=True):
if ((recursive is True) and self.host_name):
for service in Service.objects.filter(host_name=self.host_name, hostgroup_name__exists=False):
service.delete(recursive=recursive, cleanup_related_items=cleanup_related_items)
for service in Service.objects.filter(host_name__has_field=self.host_name):
service.attribute_removefield('host_name', self.host_name)
service.save()
if ((cleanup_related_items is True) and self.host_name):
hostgroups = Hostgroup.objects.filter(members__has_field=self.host_name)
dependenciesAndEscalations = ObjectDefinition.objects.filter(host_name__has_field=self.host_name, object_type__isnot='host')
services = Service.objects.filter(host_name__has_field=self.host_name)
for i in hostgroups:
i.attribute_removefield('members', self.host_name)
i.save()
for i in dependenciesAndEscalations:
i.attribute_removefield('host_name', self.host_name)
if ((i.get_attribute('object_type').endswith('escalation') or i.get_attribute('object_type').endswith('dependency')) and (recursive is True) and i.attribute_is_empty('host_name') and i.attribute_is_empty('hostgroup_name')):
i.delete(recursive=recursive, cleanup_related_items=cleanup_related_items)
else:
i.save()
for service in services:
service.attribute_removefield('host_name', self.host_name)
service.save()
dependencies = ObjectDefinition.objects.filter(dependent_host_name__has_field=self.host_name)
for i in dependencies:
i.attribute_removefield('dependent_host_name', self.host_name)
if (i.get_attribute('object_type').endswith('dependency') and (recursive is True) and i.attribute_is_empty('dependent_host_name') and i.attribute_is_empty('dependent_hostgroup_name')):
i.delete(recursive=recursive, cleanup_related_items=cleanup_related_items)
else:
i.save()
return super(self.__class__, self).delete(recursive=recursive, cleanup_related_items=cleanup_related_items)
def get_related_objects(self):
result = super(self.__class__, self).get_related_objects()
if (self['host_name'] is not None):
tmp = Service.objects.filter(host_name=self['host_name'])
for i in tmp:
result.append(i)
return result
def get_effective_check_command(self):
c = self.check_command
if ((not c) or (c == '')):
raise KeyError(None)
check_command = c.split('!')[0]
return Command.objects.get_by_shortname(check_command, cache_only=True)
def get_current_status(self, status=None):
if (not status):
status = pynag.Parsers.status_dat.StatusDat(cfg_file=cfg_file)
host = status.get_hoststatus(self.host_name)
return host
def copy(self, recursive=False, filename=None, **args):
copies = [ObjectDefinition.copy(self, recursive=recursive, filename=filename, **args)]
if ((recursive is True) and ('host_name' in args)):
for i in self.get_effective_services():
copies.append(i.copy(filename=filename, host_name=args.get('host_name')))
return copies
def _do_relations(self):
super(self.__class__, self)._do_relations()
hg = AttributeList(self.hostgroups)
for i in hg.fields:
ObjectRelations.host_hostgroups[self.host_name].add(i)
ObjectRelations.hostgroup_hosts[i].add(self.host_name)
cg = AttributeList(self.contact_groups)
for i in cg.fields:
ObjectRelations.host_contact_groups[self.host_name].add(i)
ObjectRelations.contactgroup_hosts[i].add(self.get_id())
contacts = AttributeList(self.contacts)
for i in contacts.fields:
ObjectRelations.host_contacts[self.host_name].add(i)
ObjectRelations.contact_hosts[i].add(self.get_id())
if self.check_command:
command_name = self.check_command.split('!')[0]
ObjectRelations.command_service[self.host_name].add(command_name)
def add_to_hostgroup(self, hostgroup_name):
hostgroup = Hostgroup.objects.get_by_shortname(hostgroup_name)
return _add_object_to_group(self, hostgroup)
def remove_from_hostgroup(self, hostgroup_name):
hostgroup = Hostgroup.objects.get_by_shortname(hostgroup_name)
return _remove_object_from_group(self, hostgroup)
def add_to_contactgroup(self, contactgroup):
return _add_to_contactgroup(self, contactgroup)
def remove_from_contactgroup(self, contactgroup):
return _remove_from_contactgroup(self, contactgroup)
def rename(self, shortname):
old_name = self.get_shortname()
super(Host, self).rename(shortname)
for i in Service.objects.filter(host_name__has_field=old_name):
i.attribute_replacefield('host_name', old_name, shortname)
i.save()
for i in Hostgroup.objects.filter(members__has_field=old_name):
i.attribute_replacefield('members', old_name, shortname)
i.save() |
def construct_groups(sources: list[BuildSource], separate: (bool | list[tuple[(list[str], (str | None))]]), use_shared_lib: bool) -> emitmodule.Groups:
if (separate is True):
groups: emitmodule.Groups = [([source], None) for source in sources]
elif isinstance(separate, list):
groups = []
used_sources = set()
for (files, name) in separate:
group_sources = [src for src in sources if (src.path in files)]
groups.append((group_sources, name))
used_sources.update(group_sources)
unused_sources = [src for src in sources if (src not in used_sources)]
if unused_sources:
groups.extend([([source], None) for source in unused_sources])
else:
groups = [(sources, None)]
for (i, (group, name)) in enumerate(groups):
if (use_shared_lib and (not name)):
name = group_name([source.module for source in group])
groups[i] = (group, name)
return groups |
def test_get_query_text_handles_parameters_pq(s1_product: SentinelOne):
sdate = datetime.now()
edate = (sdate - timedelta(days=7))
s1_product._pq = True
s1_product._queries = {Tag('valueA'): [Query(sdate, edate, 'endpoint.name', 'contains', '"dc01"')]}
assert (s1_product._get_query_text() == [(Tag('valueA', data=None), 'endpoint.name contains "dc01"')]) |
class ExportCommand(BaseExportCommand):
def handle(self) -> int:
if self.poetry.config.get('warnings.export'):
self.line_error("Warning: poetry-plugin-export will not be installed by default in a future version of Poetry.\nIn order to avoid a breaking change and make your automation forward-compatible, please install poetry-plugin-export explicitly. See for details on how to install a plugin.\nTo disable this warning run 'poetry config warnings.export false'.", style='warning')
return super().handle() |
def train(model, loaders, optimizer, n_epoch=200, max_step=0, log_every=0, eval_every=0, save_dir=None, writer=None, metrics=['loss']):
log.info('training...')
recorder = Recorder(metrics)
best_eval_loss = 10.0
step = 0
for epoch in range(n_epoch):
log.info('Epoch: {:03d}'.format(epoch))
for batch in loaders['dev']:
if ((max_step > 0) and (step >= max_step)):
break
model.train()
optimizer.zero_grad()
(batch_size, metric_values) = model.loss(batch, metrics)
loss = metric_values[0]
loss.backward()
optimizer.step()
step += 1
recorder.record(batch_size, metric_values)
def log_records(split, metric2avg):
log_str = ('step: %03dk (%s)' % ((step // 1000), split))
for (metric, avg) in metric2avg.items():
log_str += (', %s: %.3f' % (metric, avg))
if writer:
writer.add_scalar(('%s/%s' % (split, metric)), avg, step)
log.info(log_str)
if ((log_every > 0) and ((step % log_every) == 0)):
metric2avg = recorder.report_avg()
log_records('dev', metric2avg)
recorder.reset()
if ((eval_every > 0) and ((step % eval_every) == 0)):
metric2avg = evaluate(model, loaders['val'], metrics)
log_records('val', metric2avg)
loss = metric2avg['loss']
if (save_dir and (loss < best_eval_loss)):
best_eval_loss = loss
torch.save(model.state_dict(), os.path.join(save_dir, 'model_best.pt')) |
def pink(N, state=None):
state = (np.random.RandomState() if (state is None) else state)
uneven = (N % 2)
X = (state.randn((((N // 2) + 1) + uneven)) + (1j * state.randn((((N // 2) + 1) + uneven))))
S = np.sqrt((np.arange(len(X)) + 1.0))
y = irfft((X / S)).real
if uneven:
y = y[:(- 1)]
return normalize(y) |
def _process_encoding(arr: ndarray, encode_map: dict, name='query', token_map: Optional[dict]=None) -> Tensor:
arr = np.array(arr)
if (name == 'query'):
arr = np.insert(arr, 1, encode_map[name])
elif (name == 'product_id'):
arr = str(arr)[2:(- 1)]
arr = [token_map[x] for x in arr]
arr = (([encode_map[name]] + arr) + [token_map['sep']])
elif (name == 'index'):
arr = str(arr[0])
arr = [token_map[x] for x in arr]
arr = (([encode_map[name]] + arr) + [token_map['sep']])
else:
arr[0] = encode_map[name]
tensor = torch.tensor(arr, dtype=torch.long)
return tensor |
(frozen=True)
class OrConstraint(AbstractConstraint):
constraints: Tuple[(AbstractConstraint, ...)]
def apply(self) -> Iterable[Constraint]:
grouped = [self._group_constraints(cons) for cons in self.constraints]
(left, *rest) = grouped
for (varname, constraints) in left.items():
if all(((varname in group) for group in rest)):
constraints = [self._constraint_from_list(varname, constraints), *[self._constraint_from_list(varname, group[varname]) for group in rest]]
(yield Constraint(varname, ConstraintType.one_of, True, list(set(constraints))))
def _constraint_from_list(self, varname: VarnameWithOrigin, constraints: Sequence[Constraint]) -> Constraint:
if (len(constraints) == 1):
return constraints[0]
else:
return Constraint(varname, ConstraintType.all_of, True, constraints)
def _group_constraints(self, abstract_constraint: AbstractConstraint) -> Dict[(VarnameWithOrigin, List[Constraint])]:
by_varname = defaultdict(list)
for constraint in abstract_constraint.apply():
by_varname[constraint.varname].append(constraint)
return by_varname
def invert(self) -> AndConstraint:
return AndConstraint(tuple((cons.invert() for cons in self.constraints)))
def make(cls, constraints: Iterable[AbstractConstraint]) -> AbstractConstraint:
processed = {}
for cons in constraints:
if isinstance(cons, OrConstraint):
for subcons in cons.constraints:
processed[id(subcons)] = subcons
continue
processed[id(cons)] = cons
final = []
for constraint in processed.values():
if isinstance(constraint, AndConstraint):
if any(((id(subcons) in processed) for subcons in constraint.constraints)):
continue
elif isinstance(constraint, Constraint):
inverted = id(constraint.invert())
if (inverted in processed):
continue
final.append(constraint)
if (not final):
return NULL_CONSTRAINT
if (len(final) == 1):
(cons,) = final
return cons
return cls(tuple(final))
def __str__(self) -> str:
children = ' OR '.join(map(str, self.constraints))
return f'({children})' |
def batch_norm(input, is_training=True, momentum=0.9, epsilon=2e-05, in_place_update=True, name='batch_norm'):
if in_place_update:
return tf.contrib.layers.batch_norm(input, decay=momentum, center=True, scale=True, epsilon=epsilon, updates_collections=None, is_training=is_training, scope=name)
else:
return tf.contrib.layers.batch_norm(input, decay=momentum, center=True, scale=True, epsilon=epsilon, is_training=is_training, scope=name) |
def _check_method_and_attr_name(node_type: str, name: str) -> List[str]:
error_msgs = []
if (not (_is_in_snake_case(name) or (name.startswith('__') and _is_in_snake_case(name[2:])))):
error_msgs.append(f"""{node_type.capitalize()} name "{name}" should be in snake_case format. {node_type.capitalize()} names should be lowercase, with words separated by underscores. A single leading underscore can be used to denote a private {node_type} while a double leading underscore invokes Python's name-mangling rules.""")
return error_msgs |
class TestVariableNameValue(TestNameCheckVisitorBase):
_passes()
def test(self):
from typing import Any, NewType
Uid = NewType('Uid', int)
def name_ends_with_uid(uid):
return uid
def some_func() -> Any:
return 42
def test(self, uid: Uid):
assert_is_value(uid, NewTypeValue(Uid))
assert_is_value(name_ends_with_uid, KnownValue(name_ends_with_uid))
uid = some_func()
assert_is_value(uid, VariableNameValue(['uid']))
another_uid = 'hello'
assert_is_value(another_uid, KnownValue('hello'))
d = {'uid': self}
assert_is_value(d['uid'], VariableNameValue(['uid']))
assert_is_value(self.uid, VariableNameValue(['uid'])) |
class _OSA_module(nn.Module):
def __init__(self, in_ch, stage_ch, concat_ch, layer_per_block, module_name, SE=False, identity=False, depthwise=False, with_cp=True):
super(_OSA_module, self).__init__()
self.identity = identity
self.depthwise = depthwise
self.isReduced = False
self.use_checkpoint = with_cp
self.layers = nn.ModuleList()
in_channel = in_ch
if (self.depthwise and (in_channel != stage_ch)):
self.isReduced = True
self.conv_reduction = nn.Sequential(OrderedDict(conv1x1(in_channel, stage_ch, '{}_reduction'.format(module_name), '0')))
for i in range(layer_per_block):
if self.depthwise:
self.layers.append(nn.Sequential(OrderedDict(dw_conv3x3(stage_ch, stage_ch, module_name, i))))
else:
self.layers.append(nn.Sequential(OrderedDict(conv3x3(in_channel, stage_ch, module_name, i))))
in_channel = stage_ch
in_channel = (in_ch + (layer_per_block * stage_ch))
self.concat = nn.Sequential(OrderedDict(conv1x1(in_channel, concat_ch, module_name, 'concat')))
self.ese = eSEModule(concat_ch)
def _forward(self, x):
identity_feat = x
output = []
output.append(x)
if (self.depthwise and self.isReduced):
x = self.conv_reduction(x)
for layer in self.layers:
x = layer(x)
output.append(x)
x = torch.cat(output, dim=1)
xt = self.concat(x)
xt = self.ese(xt)
if self.identity:
xt = (xt + identity_feat)
return xt
def forward(self, x):
if (self.use_checkpoint and self.training):
xt = cp.checkpoint(self._forward, x)
else:
xt = self._forward(x)
return xt |
class TransitionLogAdmin(admin.ModelAdmin):
actions = None
date_hierarchy = 'timestamp'
list_display = ('modified_object', 'transition', 'from_state', 'to_state', 'user', 'timestamp')
list_filter = ('content_type', 'transition')
readonly_fields = ('user', 'modified_object', 'transition', 'timestamp')
search_fields = ('transition', 'user__username')
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return (request.method == 'GET')
def has_delete_permission(self, request, obj=None):
return False |
class BrokenRepoTest(unittest.TestCase):
def makerp(self, path):
return rpath.RPath(Globals.local_connection, path)
def makeext(self, path):
return self.root.new_index(tuple(path.split('/')))
def testDuplicateMetadataTimestamp(self):
test_base_rp = self.makerp(abs_test_dir).append('dupl_meta_time')
re_init_rpath_dir(test_base_rp)
source_rp = test_base_rp.append('source')
target_rp = test_base_rp.append('target')
source_rp.mkdir()
for suffix in range(1, 15):
source_rp.append(('file%02d' % suffix)).touch()
rdiff_backup(1, 1, source_rp.__fspath__(), target_rp.__fspath__(), current_time=(suffix * 10000))
rb_data_rp = target_rp.append('rdiff-backup-data')
files_list = sorted(filter((lambda x: x.startswith(b'mirror_metadata.')), rb_data_rp.listdir()))
meta_snapshot_rp = rb_data_rp.append(files_list[8])
meta_dupldiff_rp = rb_data_rp.append(files_list[8].replace(b'.snapshot.gz', b'.diff.gz'))
rpath.copy(meta_snapshot_rp, meta_dupldiff_rp)
rdiff_backup(1, 1, target_rp.__fspath__(), None, extra_options=b'regress')
source_rp.append('file15').touch()
rdiff_backup(1, 1, source_rp.__fspath__(), target_rp.__fspath__(), current_time=(15 * 10000), expected_ret_code=Globals.RET_CODE_ERR)
rdiff_backup(1, 1, target_rp.__fspath__(), None, expected_ret_code=Globals.RET_CODE_ERR, extra_options=b'regress')
rdiff_backup(1, 1, target_rp.__fspath__(), None, extra_options=(b'regress', b'--allow-duplicate-timestamps'), expected_ret_code=Globals.RET_CODE_WARN)
rdiff_backup(1, 1, target_rp.__fspath__(), None, extra_options=(b'--force', b'remove', b'increments', b'--older-than', b'100000'))
source_rp.append('file16').touch()
rdiff_backup(1, 1, source_rp.__fspath__(), target_rp.__fspath__(), current_time=(16 * 10000)) |
def test_semicircle():
m = folium.Map([30.0, 0.0], zoom_start=3)
sc1 = plugins.SemiCircle((34, (- 43)), radius=400000, arc=300, direction=20, color='red', fill_color='red', opacity=0, popup='Direction - 20 degrees, arc 300 degrees')
sc2 = plugins.SemiCircle((46, (- 30)), radius=400000, start_angle=10, stop_angle=50, color='red', fill_color='red', opacity=0, popup='Start angle - 10 degrees, Stop angle - 50 degrees')
m.add_child(sc1)
m.add_child(sc2)
m._repr_html_()
out = normalize(m._parent.render())
script = '<script src="
assert (script in out)
tmpl_sc1 = Template('\n var {{ this.get_name() }} = L.semiCircle(\n {{ this.location|tojson }},\n {{ this.options | tojson }}\n )\n .setDirection{{ this.direction }}\n .addTo({{ this._parent.get_name() }});\n ')
tmpl_sc2 = Template('\n var {{ this.get_name() }} = L.semiCircle(\n {{ this.location|tojson }},\n {{ this.options | tojson }}\n )\n .addTo({{ this._parent.get_name() }});\n ')
assert (normalize(tmpl_sc1.render(this=sc1)) in out)
assert (normalize(tmpl_sc2.render(this=sc2)) in out)
bounds = m.get_bounds()
assert (bounds == [[34, (- 43)], [46, (- 30)]]), bounds |
def sandia(v_dc, p_dc, inverter):
Paco = inverter['Paco']
Pnt = inverter['Pnt']
Pso = inverter['Pso']
power_ac = _sandia_eff(v_dc, p_dc, inverter)
power_ac = _sandia_limits(power_ac, p_dc, Paco, Pnt, Pso)
if isinstance(p_dc, pd.Series):
power_ac = pd.Series(power_ac, index=p_dc.index)
return power_ac |
def __do_unlink(ql: Qiling, absvpath: str) -> int:
def __has_opened_fd(hpath: str) -> bool:
opened_fds = (ql.os.fd[i] for i in range(NR_OPEN) if (ql.os.fd[i] is not None))
f = next((fd for fd in opened_fds if (getattr(fd, 'name', '') == hpath)), None)
return ((f is not None) and f.closed)
hpath = ql.os.path.virtual_to_host_path(absvpath)
if ql.os.fs_mapper.has_mapping(absvpath):
if __has_opened_fd(hpath):
return (- 1)
ql.os.fs_mapper.remove_mapping(absvpath)
else:
if (not ql.os.path.is_safe_host_path(hpath)):
raise PermissionError(f'unsafe path: {hpath}')
def __ok_to_remove(hpath: str) -> bool:
path = pathlib.Path(hpath)
return any((path.is_block_device(), path.is_fifo(), path.is_socket(), path.is_symlink()))
if (__has_opened_fd(hpath) and (not __ok_to_remove(hpath))):
return (- 1)
try:
os.unlink(hpath)
except OSError:
return (- 1)
return 0 |
class BaseHash(object):
algo = namedtuple('algo', ['crypt_id', 'salt_size', 'implicit_rounds', 'salt_exact', 'implicit_ident'])
algorithms = {'md5_crypt': algo(crypt_id='1', salt_size=8, implicit_rounds=None, salt_exact=False, implicit_ident=None), 'bcrypt': algo(crypt_id='2b', salt_size=22, implicit_rounds=12, salt_exact=True, implicit_ident='2b'), 'sha256_crypt': algo(crypt_id='5', salt_size=16, implicit_rounds=535000, salt_exact=False, implicit_ident=None), 'sha512_crypt': algo(crypt_id='6', salt_size=16, implicit_rounds=656000, salt_exact=False, implicit_ident=None)}
def __init__(self, algorithm):
self.algorithm = algorithm |
def alltoall(sendbuf, split_recvbuf=False):
if isinstance(sendbuf, numpy.ndarray):
mpi_dtype = comm.bcast(sendbuf.dtype.char)
sendbuf = numpy.asarray(sendbuf, mpi_dtype, 'C')
nrow = sendbuf.shape[0]
ncol = (sendbuf.size // nrow)
segsize = ((((nrow + pool.size) - 1) // pool.size) * ncol)
sdispls = numpy.arange(0, (pool.size * segsize), segsize)
sdispls[(sdispls > sendbuf.size)] = sendbuf.size
scounts = numpy.append((sdispls[1:] - sdispls[:(- 1)]), (sendbuf.size - sdispls[(- 1)]))
else:
assert (len(sendbuf) == pool.size)
mpi_dtype = comm.bcast(sendbuf[0].dtype.char)
sendbuf = [numpy.asarray(x, mpi_dtype).ravel() for x in sendbuf]
scounts = numpy.asarray([x.size for x in sendbuf])
sdispls = numpy.append(0, numpy.cumsum(scounts[:(- 1)]))
sendbuf = numpy.hstack(sendbuf)
rcounts = numpy.asarray(comm.alltoall(scounts))
rdispls = numpy.append(0, numpy.cumsum(rcounts[:(- 1)]))
recvbuf = numpy.empty(sum(rcounts), dtype=mpi_dtype)
comm.Alltoallv([sendbuf.ravel(), scounts, sdispls, mpi_dtype], [recvbuf.ravel(), rcounts, rdispls, mpi_dtype])
if split_recvbuf:
return [recvbuf[p0:(p0 + c)] for (p0, c) in zip(rdispls, rcounts)]
else:
return recvbuf |
def CNN(include_top=True):
model = Sequential()
model.add(Convolution2D(96, kernel_size=(7, 7), strides=(2, 2), input_shape=IMSIZE, data_format='channels_last'))
print('Output shape:', model.output_shape)
model.add(BatchNormalization(axis=3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(Convolution2D(256, kernel_size=(5, 5), strides=(2, 2), data_format='channels_last'))
model.add(BatchNormalization(axis=3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
print('Output shape:', model.output_shape)
model.add(Convolution2D(512, kernel_size=(3, 3), data_format='channels_last'))
model.add(Activation('relu'))
model.add(Convolution2D(512, kernel_size=(3, 3), data_format='channels_last'))
model.add(Activation('relu'))
print('Output shape:', model.output_shape)
model.add(Convolution2D(512, kernel_size=(3, 3), data_format='channels_last'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.9))
model.add(Dense(2048))
model.add(Activation('relu'))
model.add(Dropout(0.9))
model.add(Flatten())
print('Output shape:', model.output_shape)
if include_top:
model.add(Dense(N_CLASSES, activation='softmax', kernel_regularizer=regularizers.l2(0.01), bias_regularizer=regularizers.l2(0.01)))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
weights_dir = 'CNN_weights.h5'
if os.path.exists(weights_dir):
model.load_weights(weights_dir)
print('===Load weights')
print(model.summary())
return model |
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
bottle_planes = ((planes * cardinality) // 32)
self.conv1 = nn.Conv2d(inplanes, bottle_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if (residual is None):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out |
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, hidden_width):
super(Critic, self).__init__()
self.l1 = nn.Linear(((state_dim + action_dim) + 1), hidden_width)
self.l2 = nn.Linear(hidden_width, hidden_width)
self.l3 = nn.Linear(hidden_width, 1)
def forward(self, s, a):
q = F.relu(self.l1(torch.cat([s, a], 1)))
q = F.relu(self.l2(q))
q = self.l3(q)
return q |
def decode_from_string(encoded_value: str, annotation: Any) -> Union[(Dict[(Any, Any)], List[Any], None)]:
if (not encoded_value):
return None
value_type = annotation
value_origin = typing_inspect.get_origin(value_type)
if (value_origin is dict):
return _decode_string_to_dict(encoded_value, value_type)
elif (value_origin is list):
return _decode_string_to_list(encoded_value, value_type)
else:
raise ValueError('Unknown') |
def _add_hotspot_context(context: Dict[(str, Any)]) -> None:
context['hotspot_enabled'] = False
try:
if (subprocess.call(['/usr/local/sbin/raveberry/hotspot_enabled']) != 0):
context['hotspot_enabled'] = True
with open('/etc/hostapd/hostapd_protected.conf', encoding='utf-8') as hostapd_file:
for line in hostapd_file:
line = line.strip()
if line.startswith('ssid'):
hotspot_ssid = line.split('=')[1]
if line.startswith('wpa_passphrase'):
hotspot_password = line.split('=')[1]
device = util.get_devices()[(- 1)]
ip = util.ip_of_device(device)
url = f'
context['hotspot_ssid'] = hotspot_ssid
context['hotspot_password'] = hotspot_password
context['hotspot_wifi_qr'] = _qr_path(f'WIFI:S:{hotspot_ssid};T:WPA;P:{hotspot_password};;')
context['hotspot_url'] = url
context['hotspot_url_qr'] = _qr_path(url)
context['hotspot_ip'] = ip
except FileNotFoundError:
pass |
def test_basic() -> None:
async def trivial(x: T) -> T:
return x
assert (_core.run(trivial, 8) == 8)
with pytest.raises(TypeError):
_core.run(trivial)
with pytest.raises(TypeError):
_core.run((lambda : None))
async def trivial2(x: T) -> T:
(await _core.checkpoint())
return x
assert (_core.run(trivial2, 1) == 1) |
class InteractionOperator(PolynomialTensor):
def __init__(self, constant, one_body_tensor, two_body_tensor):
super(InteractionOperator, self).__init__({(): constant, (1, 0): one_body_tensor, (1, 1, 0, 0): two_body_tensor})
def one_body_tensor(self):
return self.n_body_tensors[(1, 0)]
_body_tensor.setter
def one_body_tensor(self, value):
self.n_body_tensors[(1, 0)] = value
def two_body_tensor(self):
return self.n_body_tensors[(1, 1, 0, 0)]
_body_tensor.setter
def two_body_tensor(self, value):
self.n_body_tensors[(1, 1, 0, 0)] = value
def unique_iter(self, complex_valued=False):
if self.constant:
(yield ())
for p in range(self.n_qubits):
for q in range((p + 1)):
if self.one_body_tensor[(p, q)]:
(yield ((p, 1), (q, 0)))
seen = set()
for quad in itertools.product(range(self.n_qubits), repeat=4):
if (self.two_body_tensor[quad] and (quad not in seen)):
seen |= set(_symmetric_two_body_terms(quad, complex_valued))
(yield tuple(zip(quad, (1, 1, 0, 0))))
def zero(cls, n_qubits):
return cls(0, numpy.zeros(((n_qubits,) * 2), dtype=numpy.complex128), numpy.zeros(((n_qubits,) * 4), dtype=numpy.complex128))
def projected(self, indices, exact=False):
projected_n_body_tensors = self.projected_n_body_tensors(indices, exact)
return type(self)(*(projected_n_body_tensors[key] for key in [(), (1, 0), (1, 1, 0, 0)]))
def with_function_applied_elementwise(self, func):
return type(self)(*(func(tensor) for tensor in [self.constant, self.one_body_tensor, self.two_body_tensor])) |
def send_endpoints_to_pinpoint(endpoints: typing.Iterable[Endpoint]):
endpoint_chunks = chunks(list(endpoints), 100)
for endpoints_chunk in endpoint_chunks:
data = {'Item': [endpoint.to_item() for endpoint in endpoints_chunk]}
client = _get_client()
client.update_endpoints_batch(ApplicationId=settings.PINPOINT_APPLICATION_ID, EndpointBatchRequest=data) |
def check_limitation(coded_version, msg):
coded_version_tuple = coded_version.split('.')
(coded_ma, coded_mi) = map(int, coded_version_tuple[0:2])
current_version_tuple = sys.version_info
(current_ma, current_mi) = current_version_tuple[0:2]
assert (not ((coded_ma < current_ma) or ((coded_ma == current_ma) and (coded_mi < current_mi)))), ('You are now on python %s.%s, code was written on %s: %s' % (current_ma, current_mi, coded_version, msg)) |
class PassportElementErrorUnspecified(PassportElementError):
__slots__ = ('element_hash',)
def __init__(self, type: str, element_hash: str, message: str, *, api_kwargs: Optional[JSONDict]=None):
super().__init__('unspecified', type, message, api_kwargs=api_kwargs)
with self._unfrozen():
self.element_hash: str = element_hash
self._id_attrs = (self.source, self.type, self.element_hash, self.message) |
def check_encoder_output(encoder_output, batch_size=None):
if (not isinstance(encoder_output, dict)):
msg = ('FairseqEncoderModel.forward(...) must be a dict' + _current_postion_info())
return (False, msg)
if ('encoder_out' not in encoder_output):
msg = ('FairseqEncoderModel.forward(...) must contain encoder_out' + _current_postion_info())
return (False, msg)
if ('encoder_padding_mask' not in encoder_output):
msg = ('FairseqEncoderModel.forward(...) must contain encoder_padding_mask' + _current_postion_info())
return (False, msg)
if (not isinstance(encoder_output['encoder_out'], torch.Tensor)):
msg = ('encoder_out must be a torch.Tensor' + _current_postion_info())
return (False, msg)
if (encoder_output['encoder_out'].dtype != torch.float32):
msg = ('encoder_out must have float32 dtype' + _current_postion_info())
return (False, msg)
mask = encoder_output['encoder_padding_mask']
if (mask is not None):
if (not isinstance(mask, torch.Tensor)):
msg = ('encoder_padding_mask must be a torch.Tensor' + _current_postion_info())
return (False, msg)
if ((mask.dtype != torch.uint8) and ((not hasattr(torch, 'bool')) or (mask.dtype != torch.bool))):
msg = ('encoder_padding_mask must have dtype of uint8' + _current_postion_info())
return (False, msg)
if (mask.dim() != 2):
msg = ('we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)' + _current_postion_info())
return (False, msg)
if ((batch_size is not None) and (mask.size(1) != batch_size)):
msg = (('we expect encoder_padding_mask to be a 2-d tensor, with size(1)' + ' being the batch size') + _current_postion_info())
return (False, msg)
return (True, None) |
def make_grounding(qdmr, qdmr_name, dataset_break, verbose=True):
question = dataset_break.questions[qdmr_name]
if verbose:
print('Question:', question)
print(f'''QDMR:
{qdmr}''')
grounding = {}
for i_op in range(len(qdmr)):
op = qdmr.ops[i_op]
assert (op in op_grounder), f'Could not find function to ground op {op}'
op_grounder[op](i_op, qdmr, grounding)
message = []
args_grounded = 0
for (i_op, (qdmr_op, qdmr_args)) in enumerate(qdmr):
for i_arg in range(len(qdmr_args)):
if (GroundingIndex(i_op, i_arg, qdmr_args[i_arg]) in grounding):
args_grounded += 1
message += [f'{os.path.basename(__file__)}: OK: grounded {args_grounded} args: {grounding}']
for m in message:
print(m)
message = '\n'.join(message)
return (grounding, message) |
def main():
try:
myfile = rs.filesystem.File('srm://tbn18.nikhef.nl/dpm/nikhef.nl/home/vlemed/mark/radical.saga/input.txt')
print(myfile.get_size_self())
except rs.SagaException as ex:
print(('An error occured during file operation: %s' % str(ex)))
sys.exit((- 1)) |
class Class(Importable):
def check_and_return(self, value):
if inspect.isclass(value):
return value
value = super(Class, self).check_and_return(value)
if (not inspect.isclass(value)):
self._failure(('imported value should be a class, got %s' % value), value=value)
return value
def __repr__(self):
return '<Class>' |
('pypyr.moduleloader.get_module')
(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_description_in_params(mock_invoke_step, mock_get_module):
step = Step({'name': 'step1', 'description': 'test description', 'run': '{key5}', 'in': {'key5': True}})
context = Context({'key5': False})
with patch_logger('pypyr.dsl', logging.NOTIFY) as mock_logger_notify:
step.run_step(context)
mock_logger_notify.assert_called_once_with('test description')
mock_invoke_step.assert_called_once()
assert (len(context) == 0) |
class ComplexSliderWidget(widgets.AxesWidget):
def __init__(self, ax, angle, r, animated=False):
(line,) = ax.plot([angle, angle], [0.0, r], linewidth=2.0)
super().__init__(ax)
self._rotator = line
self._is_click = False
self.animated = animated
self.update = (lambda x, y: None)
self.connect_event('button_press_event', self._click)
self.connect_event('button_release_event', self._release)
self.connect_event('motion_notify_event', self._motion)
def get_artist(self):
return self._rotator
def _click(self, event):
self._is_click = True
self._update_plots(event)
def _release(self, event):
self._is_click = False
def on_changed(self, update):
self.update = update
def _motion(self, event):
self._update_plots(event)
def _update_plots(self, event):
if (self._is_click and (event.xdata != None) and (event.ydata != None) and (event.x >= self.ax.bbox.xmin) and (event.x < self.ax.bbox.xmax) and (event.y >= self.ax.bbox.ymin) and (event.y < self.ax.bbox.ymax)):
(phi, r) = (event.xdata, event.ydata)
if (r < 0.2):
r = 0.0
self.update(phi, r)
self._rotator.set_xdata([phi, phi])
self._rotator.set_ydata([0.0, r])
if (not self.animated):
event.canvas.draw() |
def node_options(caller, raw_string, **kwargs):
text = "|cOption menu|n\n('|wq|nuit' to return)"
room = caller.location
options = caller.attributes.get('options', category=room.tagcategory, default={})
things_style = options.get('things_style', 2)
session = kwargs['session']
screenreader = session.protocol_flags.get('SCREENREADER', False)
options = ({'desc': '{}No item markings (hard mode)'.format(('|g(*)|n ' if (things_style == 0) else '( ) ')), 'goto': (_set_thing_style, {'value': 0, 'session': session})}, {'desc': '{}Items marked as |yitem|n (with color)'.format(('|g(*)|n ' if (things_style == 1) else '( ) ')), 'goto': (_set_thing_style, {'value': 1, 'session': session})}, {'desc': '{}Items are marked as |y[item]|n (screenreader friendly)'.format(('|g(*)|n ' if (things_style == 2) else '( ) ')), 'goto': (_set_thing_style, {'value': 2, 'session': session})}, {'desc': '{}Screenreader mode'.format(('(*) ' if screenreader else '( ) ')), 'goto': (_toggle_screen_reader, kwargs)})
return (text, options) |
class PolyvoreModel(object):
def __init__(self, config, mode, train_inception=False):
assert (mode in ['train', 'eval', 'inference'])
self.config = config
self.mode = mode
self.train_inception = train_inception
self.reader = tf.TFRecordReader()
self.initializer = tf.random_uniform_initializer(minval=(- self.config.initializer_scale), maxval=self.config.initializer_scale)
self.images = None
self.input_mask = None
self.image_embeddings = None
self.total_loss = None
self.inception_variables = []
self.init_fn = None
self.global_step = None
def is_training(self):
return (self.mode == 'train')
def process_image(self, encoded_image, thread_id=0, image_idx=0):
return image_processing.process_image(encoded_image, is_training=self.is_training(), height=self.config.image_height, width=self.config.image_width, image_format=self.config.image_format, image_idx=image_idx)
def build_inputs(self):
if (self.mode == 'inference'):
image_feed = tf.placeholder(dtype=tf.string, shape=[], name='image_feed')
image_feed = self.process_image(image_feed)
image_seqs = tf.expand_dims(image_feed, 0)
input_mask = tf.placeholder(dtype=tf.int64, shape=[1, 8], name='input_mask')
else:
input_queue = input_ops.prefetch_input_data(self.reader, self.config.input_file_pattern, is_training=self.is_training(), batch_size=self.config.batch_size, values_per_shard=self.config.values_per_input_shard, input_queue_capacity_factor=self.config.input_queue_capacity_factor, num_reader_threads=self.config.num_input_reader_threads)
images_and_captions = []
for thread_id in range(self.config.num_preprocess_threads):
serialized_sequence_example = input_queue.dequeue()
(set_id, encoded_images, image_ids, captions, likes) = input_ops.parse_sequence_example(serialized_sequence_example, set_id=self.config.set_id_name, image_feature=self.config.image_feature_name, image_index=self.config.image_index_name, caption_feature=self.config.caption_feature_name, number_set_images=self.config.number_set_images)
images = []
for i in range(self.config.number_set_images):
images.append(self.process_image(encoded_images[i], image_idx=i))
images_and_captions.append([set_id, images, image_ids, captions, likes])
queue_capacity = ((5 * self.config.num_preprocess_threads) * self.config.batch_size)
(set_ids, image_seqs, image_ids, input_mask, loss_mask, cap_seqs, cap_mask, likes) = input_ops.batch_with_dynamic_pad(images_and_captions, batch_size=self.config.batch_size, queue_capacity=queue_capacity)
self.images = image_seqs
self.input_mask = input_mask
def build_image_embeddings(self):
images = tf.reshape(self.images, [(- 1), self.config.image_height, self.config.image_height, 3])
inception_output = image_embedding.inception_v3(images, trainable=self.train_inception, is_training=self.is_training())
self.inception_variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='InceptionV3')
with tf.variable_scope('image_embedding') as scope:
image_embeddings = tf.contrib.layers.fully_connected(inputs=inception_output, num_outputs=self.config.embedding_size, activation_fn=None, weights_initializer=self.initializer, biases_initializer=None, scope=scope)
tf.constant(self.config.embedding_size, name='embedding_size')
self.image_embeddings = tf.reshape(image_embeddings, [tf.shape(self.images)[0], (- 1), self.config.embedding_size])
def build_model(self):
norm_image_embeddings = tf.nn.l2_normalize(self.image_embeddings, 2, name='norm_image_embeddings')
if (self.mode == 'inference'):
pass
else:
emb_loss_mask = np.ones((self.config.number_set_images, self.config.number_set_images))
emb_loss_mask = block_diag(emb_loss_mask, emb_loss_mask, emb_loss_mask, emb_loss_mask, emb_loss_mask, emb_loss_mask, emb_loss_mask, emb_loss_mask, emb_loss_mask, emb_loss_mask)
norm_image_embeddings = tf.reshape(norm_image_embeddings, [(self.config.number_set_images * self.config.batch_size), self.config.embedding_size])
scores = tf.matmul(norm_image_embeddings, norm_image_embeddings, transpose_a=False, transpose_b=True, name='scores')
posi_scores = (tf.reduce_sum(tf.mul(scores, emb_loss_mask)) / np.sum(emb_loss_mask))
emb_loss_mask = (1.0 - emb_loss_mask)
m = 0.8
nega_scores = tf.maximum((tf.mul(scores, emb_loss_mask) - 0.8), 0.0)
nega_scores = (tf.reduce_sum(nega_scores) / np.sum(emb_loss_mask))
emb_batch_loss = tf.sub(nega_scores, posi_scores, name='emb_batch_loss')
tf.contrib.losses.add_loss(emb_batch_loss)
total_loss = tf.contrib.losses.get_total_loss()
tf.scalar_summary('emb_batch_loss', emb_batch_loss)
tf.scalar_summary('total_loss', total_loss)
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
self.total_loss = total_loss
def setup_inception_initializer(self):
if (self.mode != 'inference'):
saver = tf.train.Saver(self.inception_variables)
def restore_fn(sess):
tf.logging.info('Restoring Inception variables from checkpoint file %s', self.config.inception_checkpoint_file)
saver.restore(sess, self.config.inception_checkpoint_file)
self.init_fn = restore_fn
def setup_global_step(self):
global_step = tf.Variable(initial_value=0, name='global_step', trainable=False, collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.VARIABLES])
self.global_step = global_step
def build(self):
self.build_inputs()
self.build_image_embeddings()
self.build_model()
self.setup_inception_initializer()
self.setup_global_step() |
def convert(module, flag_name):
mod = module
before_ch = None
for (name, child) in module.named_children():
if (hasattr(child, flag_name) and getattr(child, flag_name)):
if isinstance(child, BatchNorm2d):
before_ch = child.num_features
mod.add_module(name, FRN(num_features=child.num_features))
if isinstance(child, (ReLU, LeakyReLU)):
mod.add_module(name, TLU(num_features=before_ch))
else:
mod.add_module(name, convert(child, flag_name))
return mod |
class BLEUScorer(object):
def __init__(self):
pass
def score(self, parallel_corpus):
count = [0, 0, 0, 0]
clip_count = [0, 0, 0, 0]
r = 0
c = 0
weights = [0.25, 0.25, 0.25, 0.25]
for (hyps, refs) in parallel_corpus:
hyps = [hyp.split() for hyp in hyps]
refs = [ref.split() for ref in refs]
for hyp in hyps:
for i in range(4):
hypcnts = Counter(ngrams(hyp, (i + 1)))
cnt = sum(hypcnts.values())
count[i] += cnt
max_counts = {}
for ref in refs:
refcnts = Counter(ngrams(ref, (i + 1)))
for ng in hypcnts:
max_counts[ng] = max(max_counts.get(ng, 0), refcnts[ng])
clipcnt = dict(((ng, min(count, max_counts[ng])) for (ng, count) in hypcnts.items()))
clip_count[i] += sum(clipcnt.values())
bestmatch = [1000, 1000]
for ref in refs:
if (bestmatch[0] == 0):
break
diff = abs((len(ref) - len(hyp)))
if (diff < bestmatch[0]):
bestmatch[0] = diff
bestmatch[1] = len(ref)
r += bestmatch[1]
c += len(hyp)
p0 = 1e-07
bp = (1 if (c > r) else math.exp((1 - (float(r) / float(c)))))
p_ns = [((float(clip_count[i]) / float((count[i] + p0))) + p0) for i in range(4)]
s = math.fsum(((w * math.log(p_n)) for (w, p_n) in zip(weights, p_ns) if p_n))
bleu = (bp * math.exp(s))
return bleu |
class PenaltyLbfgsOptimizer(Serializable):
def __init__(self, max_opt_itr=20, initial_penalty=1.0, min_penalty=0.01, max_penalty=1000000.0, increase_penalty_factor=2, decrease_penalty_factor=0.5, max_penalty_itr=10, adapt_penalty=True):
Serializable.quick_init(self, locals())
self._max_opt_itr = max_opt_itr
self._penalty = initial_penalty
self._initial_penalty = initial_penalty
self._min_penalty = min_penalty
self._max_penalty = max_penalty
self._increase_penalty_factor = increase_penalty_factor
self._decrease_penalty_factor = decrease_penalty_factor
self._max_penalty_itr = max_penalty_itr
self._adapt_penalty = adapt_penalty
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name='constraint', *args, **kwargs):
(constraint_term, constraint_value) = leq_constraint
penalty_var = TT.scalar('penalty')
penalized_loss = (loss + (penalty_var * constraint_term))
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
def get_opt_output():
flat_grad = flatten_tensor_variables(theano.grad(penalized_loss, target.get_params(trainable=True), disconnected_inputs='ignore'))
return [penalized_loss.astype('float64'), flat_grad.astype('float64')]
self._opt_fun = lazydict(f_loss=(lambda : compile_function(inputs, loss, log_name='f_loss')), f_constraint=(lambda : compile_function(inputs, constraint_term, log_name='f_constraint')), f_penalized_loss=(lambda : compile_function(inputs=(inputs + [penalty_var]), outputs=[penalized_loss, loss, constraint_term], log_name='f_penalized_loss')), f_opt=(lambda : compile_function(inputs=(inputs + [penalty_var]), outputs=get_opt_output(), log_name='f_opt')))
def loss(self, inputs):
return self._opt_fun['f_loss'](*inputs)
def constraint_val(self, inputs):
return self._opt_fun['f_constraint'](*inputs)
def optimize(self, inputs):
inputs = tuple(inputs)
try_penalty = np.clip(self._penalty, self._min_penalty, self._max_penalty)
penalty_scale_factor = None
f_opt = self._opt_fun['f_opt']
f_penalized_loss = self._opt_fun['f_penalized_loss']
def gen_f_opt(penalty):
def f(flat_params):
self._target.set_param_values(flat_params, trainable=True)
return f_opt(*(inputs + (penalty,)))
return f
cur_params = self._target.get_param_values(trainable=True).astype('float64')
opt_params = cur_params
for penalty_itr in range(self._max_penalty_itr):
logger.log(('trying penalty=%.3f...' % try_penalty))
(itr_opt_params, _, _) = scipy.optimize.fmin_l_bfgs_b(func=gen_f_opt(try_penalty), x0=cur_params, maxiter=self._max_opt_itr)
(_, try_loss, try_constraint_val) = f_penalized_loss(*(inputs + (try_penalty,)))
logger.log(('penalty %f => loss %f, %s %f' % (try_penalty, try_loss, self._constraint_name, try_constraint_val)))
if ((try_constraint_val < self._max_constraint_val) or ((penalty_itr == (self._max_penalty_itr - 1)) and (opt_params is None))):
opt_params = itr_opt_params
if (not self._adapt_penalty):
break
if ((penalty_scale_factor is None) or np.isnan(try_constraint_val)):
if ((try_constraint_val > self._max_constraint_val) or np.isnan(try_constraint_val)):
penalty_scale_factor = self._increase_penalty_factor
else:
penalty_scale_factor = self._decrease_penalty_factor
opt_params = itr_opt_params
elif ((penalty_scale_factor > 1) and (try_constraint_val <= self._max_constraint_val)):
break
elif ((penalty_scale_factor < 1) and (try_constraint_val >= self._max_constraint_val)):
break
if ((try_penalty >= self._max_penalty) and (penalty_scale_factor > 1)):
logger.log('_max_penalty has already been tried!')
self._penalty = try_penalty
break
elif ((try_penalty <= self._min_penalty) and (penalty_scale_factor < 1)):
logger.log('_min_penalty has already been tried!')
self._penalty = try_penalty
break
else:
try_penalty *= penalty_scale_factor
try_penalty = np.clip(try_penalty, self._min_penalty, self._max_penalty)
self._penalty = try_penalty
self._target.set_param_values(opt_params, trainable=True) |
def gen_dest_dep_test():
return [gen_ld_dest_dep_test(5, 'lw', 8192, 66051), gen_ld_dest_dep_test(4, 'lw', 8196, ), gen_ld_dest_dep_test(3, 'lw', 8200, ), gen_ld_dest_dep_test(2, 'lw', 8204, ), gen_ld_dest_dep_test(1, 'lw', 8208, ), gen_ld_dest_dep_test(0, 'lw', 8212, ), gen_word_data([66051, , , , , ])] |
class Blosc(Codec):
codec_id = 'imagecodecs_blosc'
def __init__(self, level=None, compressor=None, typesize=None, blocksize=None, shuffle=None, numthreads=None):
self.level = level
self.compressor = compressor
self.typesize = typesize
self.blocksize = blocksize
self.shuffle = shuffle
self.numthreads = numthreads
def encode(self, buf):
buf = protective_squeeze(numpy.asarray(buf))
return imagecodecs.blosc_encode(buf, level=self.level, compressor=self.compressor, typesize=self.typesize, blocksize=self.blocksize, shuffle=self.shuffle, numthreads=self.numthreads)
def decode(self, buf, out=None):
return imagecodecs.blosc_decode(buf, numthreads=self.numthreads, out=_flat(out)) |
class SelfAttentionBlock2D(nn.Module):
def __init__(self, in_channels, key_channels, value_channels, out_channels=None, scale=1):
super().__init__()
self.scale = scale
self.in_channels = in_channels
self.out_channels = out_channels
self.key_channels = key_channels
self.value_channels = value_channels
if (out_channels is None):
self.out_channels = in_channels
self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
self.f_key = nn.Sequential(nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels, kernel_size=1), ActivatedBatchNorm(self.key_channels))
self.f_query = self.f_key
self.f_value = nn.Conv2d(in_channels=self.in_channels, out_channels=self.value_channels, kernel_size=1)
self.W = nn.Conv2d(in_channels=self.value_channels, out_channels=self.out_channels, kernel_size=1)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
def forward(self, x):
(batch_size, h, w) = (x.size(0), x.size(2), x.size(3))
if (self.scale > 1):
x = self.pool(x)
value = self.f_value(x).view(batch_size, self.value_channels, (- 1))
value = value.permute(0, 2, 1)
query = self.f_query(x).view(batch_size, self.key_channels, (- 1))
query = query.permute(0, 2, 1)
key = self.f_key(x).view(batch_size, self.key_channels, (- 1))
sim_map = torch.matmul(query, key)
sim_map = ((self.key_channels ** (- 0.5)) * sim_map)
sim_map = F.softmax(sim_map, dim=(- 1))
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.view(batch_size, self.value_channels, *x.size()[2:])
context = self.W(context)
if (self.scale > 1):
context = F.interpolate(context, size=(h, w), mode='bilinear', align_corners=True)
return context |
class UpdateableAPIResource(APIResource):
def save(self, idempotency_key=None):
updated_params = self.serialize(None)
headers = populate_headers(idempotency_key)
if updated_params:
self.refresh_from(self.request('post', self.instance_path(), updated_params, headers))
else:
util.logger.debug('Trying to save already saved object %r', self)
return self |
class DiscriminatorFromCloud():
def __init__(self, name, n_filters=[64, 128, 128, 256], filter_size=1, stride=1, activation_fn=tf.nn.leaky_relu, norm_mtd='instance_norm', latent_code_dim=128):
self.name = name
self.n_filters = n_filters.copy()
self.n_filters.append(latent_code_dim)
self.filter_size = filter_size
self.stride = stride
self.activation_fn = activation_fn
self.norm_mtd = norm_mtd
self.latent_code_dim = latent_code_dim
self.reuse = False
def __call__(self, input_cloud, is_training):
with tf.variable_scope(self.name, reuse=self.reuse):
layer = input_cloud
for (f_id, _) in enumerate(self.n_filters):
layer = tf.layers.conv1d(layer, self.n_filters[f_id], self.filter_size, self.stride, padding='same', activation=None, name=('conv1d_%d' % f_id))
if ((f_id != 0) and (self.norm_mtd is not None)):
if (self.norm_mtd == 'batch_norm'):
layer = tf.layers.batch_normalization(layer, momentum=0.99, training=is_training, name=('bn_%d' % f_id))
elif (self.norm_mtd == 'instance_norm'):
layer = tf.contrib.layers.instance_norm(layer, scope=('in_%d' % f_id))
if (self.activation_fn is not None):
layer = self.activation_fn(layer, name=('activation_fn_%d' % f_id))
layer = tf.reduce_max(layer, axis=1, name='max_pool')
layer = tf.layers.dense(layer, 1, activation=None, name='output')
self.reuse = True
self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
return layer
def __str__(self):
res = ''
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
for tv in train_vars:
res += (tv.name + '\n')
return res[:(len(res) - 2)] |
.parametrize('proc_name,proc_pttrn,lines', [('s1', 'started', 21), ('s2', 'spam, bacon, eggs', 30), ('s3', 'finally started', 130)])
def test_startup_detection_max_read_lines(tcp_port, proc_name, proc_pttrn, lines, xprocess):
data = 'bacon\n'
class Starter(ProcessStarter):
pattern = proc_pttrn
max_read_lines = lines
args = [sys.executable, server_path, tcp_port, '--no-children']
xprocess.ensure(proc_name, Starter)
info = xprocess.getinfo(proc_name)
assert info.isrunning()
assert request_response_cycle(tcp_port, data)
info.terminate() |
class Plane(Shape):
def __init__(self, plane_fit, gridsize):
plane = numpy.array(plane_fit)
origin = ((- plane) / numpy.dot(plane, plane))
n = numpy.array([plane[1], plane[2], plane[0]])
u = numpy.cross(plane, n)
v = numpy.cross(plane, u)
u /= numpy.linalg.norm(u)
v /= numpy.linalg.norm(v)
def project_point(point):
return ((origin + (point[0] * u)) + (point[1] * v))
vertexes = []
for x in range(((- gridsize) + 1), gridsize):
for y in range(((- gridsize) + 1), gridsize):
vertexes += [project_point(((x - 1), (y - 1))), project_point((x, (y - 1))), project_point((x, y)), project_point(((x - 1), y))]
super(self, Plane).__init__(vertexes) |
def forward(scan, cad, negative, separation_model, completion_model, triplet_model, criterion_separation, criterion_completion, criterion_triplet, device):
(scan_model, scan_mask, scan_name) = (scan['content'], scan['mask'], scan['name'])
scan_bg_mask = torch.where((scan_mask == 0), scan_model, torch.zeros(scan_mask.shape))
scan_model = scan_model.to(device, non_blocking=True)
scan_fg_mask = scan_mask.to(device, non_blocking=True)
scan_bg_mask = scan_bg_mask.to(device, non_blocking=True)
cad_model = cad['content']
cad_model = cad_model.to(device, non_blocking=True)
negative_model = negative['content']
negative_model = negative_model.to(device, non_blocking=True)
(foreground, background) = separation_model(torch.sigmoid(scan_model))
loss_foreground = torch.mean(criterion_separation(foreground, scan_fg_mask), dim=[1, 2, 3, 4]).mean()
loss_background = torch.mean(criterion_separation(background, scan_bg_mask), dim=[1, 2, 3, 4]).mean()
completed = completion_model(torch.sigmoid(foreground))
loss_completion = torch.mean(criterion_completion(completed, cad_model), dim=[1, 2, 3, 4]).mean()
(anchor, positive, negative) = triplet_model(torch.sigmoid(completed), cad_model, negative_model)
(a, p, n) = (anchor.view(anchor.shape[0], (- 1)), positive.view(anchor.shape[0], (- 1)), negative.view(anchor.shape[0], (- 1)))
loss_triplet = criterion_triplet(a, p, n).mean()
return (loss_foreground, loss_background, loss_completion, loss_triplet) |
class Const():
triple_len = 3
home = ''
origin_train_folder = os.path.join(home, 'train')
origin_dev_folder = os.path.join(home, 'dev')
origin_all_train_filename = os.path.join(home, 'origin_all_train.xml')
origin_all_dev_filename = os.path.join(home, 'origin_all_dev.xml')
origin_tmp_filename = os.path.join(home, 'tmp.xml')
origin_example_filename = os.path.join(home, 'origin_example.xml')
if (triple_len == 3):
folder = 'entity_end_position'
relations2id_filename = os.path.join(home, folder, 'relations2id.json')
relation2count_filename = os.path.join(home, folder, 'relation2count.json')
words2id_filename = os.path.join(home, folder, 'words2id.json')
words_id2vector_filename = os.path.join(home, folder, 'words_id2vector.json')
relations_words_id_filename = os.path.join(home, folder, 'relations_words_id.json')
train_filename = os.path.join(home, folder, 'train.json')
valid_filename = os.path.join(home, folder, 'valid.json')
dev_filename = os.path.join(home, folder, 'dev.json')
example_filename = os.path.join(home, folder, 'example.json')
nyt_style_raw_train_filename = os.path.join(home, folder, 'raw_nyt_style_train.json')
nyt_style_raw_test_filename = os.path.join(home, folder, 'raw_nyt_style_test.json')
nyt_style_raw_valid_filename = os.path.join(home, folder, 'raw_nyt_style_valid.json') |
def geth_prepare_datadir(datadir: str, genesis_file: str) -> None:
node_genesis_path = os.path.join(datadir, 'custom_genesis.json')
ipc_path = (datadir + '/geth.ipc')
assert (len(ipc_path) < 104), f'geth data path "{ipc_path}" is too large'
os.makedirs(datadir, exist_ok=True)
shutil.copy(genesis_file, node_genesis_path)
geth_init_datadir(datadir, node_genesis_path) |
def test_ordered_enqueuer_processes():
enqueuer = OrderedEnqueuer(TestSequence([3, 200, 200, 3]), use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for i in range(100):
acc.append(next(gen_output)[(0, 0, 0, 0)])
assert (acc == list(range(100))), 'Order was not keep in GeneratorEnqueuer with processes'
enqueuer.stop() |
class Erfc(UnaryScalarOp):
nfunc_spec = ('scipy.special.erfc', 1, 1)
def impl(self, x):
return scipy.special.erfc(x)
def L_op(self, inputs, outputs, grads):
(x,) = inputs
(gz,) = grads
if (x.type in complex_types):
raise NotImplementedError()
if (outputs[0].type in discrete_types):
if (x.type in discrete_types):
return [x.zeros_like(dtype=config.floatX)]
else:
return [x.zeros_like()]
cst = np.asarray((2.0 / np.sqrt(np.pi)), dtype=upcast(x.type.dtype, gz.type.dtype))
return ((((- gz) * cst) * exp(((- x) * x))),)
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(z,) = out
if (node.inputs[0].type in complex_types):
raise NotImplementedError('type not supported', type)
cast = node.outputs[0].type.dtype_specs()[1]
return f'{z} = erfc(({cast}){x});' |
class IterativeContextReReadModel(MultipleContextModel):
def __init__(self, encoder: QuestionsAndParagraphsEncoder, word_embed: Optional[WordEmbedder], char_embed: Optional[CharWordEmbedder], embed_mapper: Optional[Union[(SequenceMapper, ElmoWrapper)]], sequence_encoder: SequenceEncoder, sentences_encoder: SentencesEncoder, sentence_mapper: Optional[SequenceMapper], merger: FixedMergeLayer, post_merger: Optional[Mapper], reread_mapper: Optional[Union[(SequenceMapper, ElmoWrapper)]], pre_attention_mapper: Optional[SequenceMapper], context_to_question_attention: Optional[AttentionWithPostMapper], question_to_context_attention: Optional[AttentionWithPostMapper], first_predictor: BinaryNullPredictor, second_predictor: BinaryNullPredictor, reformulate_by_context: bool, multiply_iteration_probs: bool, max_batch_size: Optional[int]=None, elmo_model: Optional[LanguageModel]=None):
super().__init__(encoder=encoder, word_embed=word_embed, char_embed=char_embed, max_batch_size=max_batch_size, elmo_model=elmo_model)
self.embed_mapper = embed_mapper
self.sequence_encoder = sequence_encoder
self.sentences_encoder = sentences_encoder
self.sentence_mapper = sentence_mapper
self.merger = merger
self.post_merger = post_merger
self.reread_mapper = reread_mapper
self.pre_attention_mapper = pre_attention_mapper
self.question_to_context_attention = question_to_context_attention
self.context_to_question_attention = context_to_question_attention
self.reformulate_by_context = reformulate_by_context
self.multiply_iteration_probs = multiply_iteration_probs
self.first_predictor = first_predictor
self.second_predictor = second_predictor
self.max_pool = MaxPool(map_layer=None, min_val=VERY_NEGATIVE_NUMBER, regular_reshape=True)
self.mean_pool = MeanPool()
if ((self.reformulate_by_context and (question_to_context_attention is None)) or ((not self.reformulate_by_context) and (context_to_question_attention is None))):
raise ValueError('The last attention must be defined')
def _get_predictions_for(self, is_train, question_embed, question_mask, context_embed, context_mask, answer, question_lm, context_lm, sentence_segments, sentence_mask):
(question_rep, context_rep) = (question_embed, context_embed)
(context1_rep, context2_rep) = tf.unstack(context_rep, axis=1, num=2)
(context1_mask, context2_mask) = tf.unstack(context_mask, axis=1, num=2)
(context1_sentence_segments, context2_sentence_segments) = tf.unstack(sentence_segments, axis=1, num=2)
(context1_sentence_mask, context2_sentence_mask) = tf.unstack(sentence_mask, axis=1, num=2)
(q_lm_in, c1_lm_in, c2_lm_in) = ([], [], [])
if self.use_elmo:
(context1_lm, context2_lm) = tf.unstack(context_lm, axis=1, num=2)
q_lm_in = [question_lm]
c1_lm_in = [context1_lm]
c2_lm_in = [context2_lm]
if (self.embed_mapper is not None):
with tf.variable_scope('map_embed'):
context1_rep = self.embed_mapper.apply(is_train, context1_rep, context1_mask, *c1_lm_in)
with tf.variable_scope('map_embed', reuse=True):
context2_rep = self.embed_mapper.apply(is_train, context2_rep, context2_mask, *c2_lm_in)
question_rep = self.embed_mapper.apply(is_train, question_rep, question_mask, *q_lm_in)
with tf.variable_scope('seq_enc'):
question_enc = self.sequence_encoder.apply(is_train, question_rep, question_mask)
question_enc = tf.identity(question_enc, name='encode_question')
tf.add_to_collection(INTERMEDIATE_LAYER_COLLECTION, question_enc)
def encode_sentences(context, sentence_segs, sentence_mask, rep_name):
context = self.sentences_encoder.apply(context, sentence_segs, sentence_mask)
if (self.sentence_mapper is not None):
with tf.variable_scope('sentence_mapper'):
context = self.sentence_mapper.apply(is_train, context, mask=sentence_mask)
context = tf.identity(context, name=rep_name)
tf.add_to_collection(INTERMEDIATE_LAYER_COLLECTION, context)
return context
with tf.variable_scope('sentences_enc'):
context1_sent_rep = encode_sentences(context1_rep, context1_sentence_segments, context1_sentence_mask, 'encode_context1')
with tf.variable_scope('sentences_enc', reuse=True):
context2_sent_rep = encode_sentences(context2_rep, context2_sentence_segments, context2_sentence_mask, 'encode_context2')
with tf.variable_scope('context1_relevance'):
(c1_q_merged_rep, context1_sentences_logits, context1_pred) = merge_weight_predict(is_train=is_train, context_rep=context1_sent_rep, question_rep=question_enc, context_mask=context1_sentence_mask, merger=self.merger, post_merger=self.post_merger, max_pool=self.max_pool, predictor=self.first_predictor, answer=([answer[0]] + answer[2:]))
with tf.variable_scope('reformulation'):
if (self.reread_mapper is not None):
(question_rep, context_rep) = (question_embed, context_embed)
(context1_rep, _) = tf.unstack(context_rep, axis=1, num=2)
(context1_mask, _) = tf.unstack(context_mask, axis=1, num=2)
if (not isinstance(self.reread_mapper, ElmoWrapper)):
(c1_lm_in, q_lm_in) = ([], [])
with tf.variable_scope('reread_map_embed'):
context1_rep = self.reread_mapper.apply(is_train, context1_rep, context1_mask, *c1_lm_in)
with tf.variable_scope('reread_map_embed', reuse=True):
question_rep = self.reread_mapper.apply(is_train, question_rep, question_mask, *q_lm_in)
if (self.pre_attention_mapper is not None):
with tf.variable_scope('pre_att'):
question_rep = self.pre_attention_mapper.apply(is_train, question_rep, question_mask)
with tf.variable_scope('pre_att', reuse=True):
context1_rep = self.pre_attention_mapper.apply(is_train, context1_rep, context1_mask)
if (not self.reformulate_by_context):
if (self.question_to_context_attention is not None):
with tf.variable_scope('q2c'):
context1_rep = self.question_to_context_attention.apply(is_train, x=context1_rep, keys=question_rep, memories=question_rep, x_mask=context1_mask, memory_mask=question_mask)
if (self.pre_attention_mapper is not None):
with tf.variable_scope('pre_att', reuse=True):
context1_rep = self.pre_attention_mapper.apply(is_train, context1_rep, context1_mask)
with tf.variable_scope('c2q'):
question_rep = self.context_to_question_attention.apply(is_train, x=question_rep, keys=context1_rep, memories=context1_rep, x_mask=question_mask, memory_mask=context1_mask)
reformulated_q = self.sequence_encoder.apply(is_train, question_rep, question_mask)
else:
if (self.context_to_question_attention is not None):
with tf.variable_scope('c2q'):
question_rep = self.context_to_question_attention.apply(is_train, x=question_rep, keys=context1_rep, memories=context1_rep, x_mask=question_mask, memory_mask=context1_mask)
if (self.pre_attention_mapper is not None):
with tf.variable_scope('pre_att', reuse=True):
question_rep = self.pre_attention_mapper.apply(is_train, question_rep, question_mask)
with tf.variable_scope('q2c'):
context1_rep = self.question_to_context_attention.apply(is_train, x=context1_rep, keys=question_rep, memories=question_rep, x_mask=context1_mask, memory_mask=question_mask)
reformulated_q = self.sequence_encoder.apply(is_train, context1_rep, context1_mask)
reformulated_q = tf.identity(reformulated_q, name='reformulated_question')
tf.add_to_collection(INTERMEDIATE_LAYER_COLLECTION, reformulated_q)
with tf.variable_scope('context2_relevance'):
first_iter_probs = None
if self.multiply_iteration_probs:
first_iter_probs = tf.expand_dims(context1_pred.get_probs(), axis=1)
(c2_q_merged_rep, context2_sentences_logits, context2_pred) = merge_weight_predict(is_train=is_train, context_rep=context2_sent_rep, question_rep=reformulated_q, context_mask=context2_sentence_mask, merger=self.merger, post_merger=self.post_merger, max_pool=self.max_pool, predictor=self.second_predictor, answer=([answer[1]] + answer[2:]), multiply_probs=first_iter_probs)
return MultipleBinaryPredictions([context1_pred, context2_pred])
def __setstate__(self, state):
if ('multiply_iteration_probs' not in state):
state['multiply_iteration_probs'] = False
if ('reformulate_by_context' not in state):
state['reformulate_by_context'] = False
if ('second_predictor' not in state):
state['second_predictor'] = BinaryNullPredictor()
state['first_predictor'] = BinaryNullPredictor()
if ('reread_mapper' not in state):
state['reread_mapper'] = None
if ('pre_attention_mapper' not in state):
state['pre_attention_mapper'] = None
super().__setstate__(state) |
def test_load_totp_vectors():
vector_data = textwrap.dedent('\n # TOTP Test Vectors\n # RFC 6238 Appendix B\n\n COUNT = 0\n TIME = 59\n TOTP = \n MODE = SHA1\n SECRET = \n\n COUNT = 1\n TIME = 59\n TOTP = \n MODE = SHA256\n SECRET = \n\n COUNT = 2\n TIME = 59\n TOTP = \n MODE = SHA512\n SECRET = \n ').splitlines()
assert (load_nist_vectors(vector_data) == [{'time': b'59', 'totp': b'', 'mode': b'SHA1', 'secret': b''}, {'time': b'59', 'totp': b'', 'mode': b'SHA256', 'secret': b''}, {'time': b'59', 'totp': b'', 'mode': b'SHA512', 'secret': b''}]) |
class CodeStylePage(QWizardPage):
def __init__(self, parent=None):
super(CodeStylePage, self).__init__(parent)
self.setTitle('Code Style Options')
self.setSubTitle('Choose the formatting of the generated code.')
self.setPixmap(QWizard.LogoPixmap, QPixmap(':/images/logo2.png'))
commentCheckBox = QCheckBox('&Start generated files with a comment')
commentCheckBox.setChecked(True)
protectCheckBox = QCheckBox('&Protect header file against multiple inclusions')
protectCheckBox.setChecked(True)
macroNameLabel = QLabel('&Macro name:')
self.macroNameLineEdit = QLineEdit()
macroNameLabel.setBuddy(self.macroNameLineEdit)
self.includeBaseCheckBox = QCheckBox('&Include base class definition')
self.baseIncludeLabel = QLabel('Base class include:')
self.baseIncludeLineEdit = QLineEdit()
self.baseIncludeLabel.setBuddy(self.baseIncludeLineEdit)
protectCheckBox.toggled.connect(macroNameLabel.setEnabled)
protectCheckBox.toggled.connect(self.macroNameLineEdit.setEnabled)
self.includeBaseCheckBox.toggled.connect(self.baseIncludeLabel.setEnabled)
self.includeBaseCheckBox.toggled.connect(self.baseIncludeLineEdit.setEnabled)
self.registerField('comment', commentCheckBox)
self.registerField('protect', protectCheckBox)
self.registerField('macroName', self.macroNameLineEdit)
self.registerField('includeBase', self.includeBaseCheckBox)
self.registerField('baseInclude', self.baseIncludeLineEdit)
layout = QGridLayout()
layout.setColumnMinimumWidth(0, 20)
layout.addWidget(commentCheckBox, 0, 0, 1, 3)
layout.addWidget(protectCheckBox, 1, 0, 1, 3)
layout.addWidget(macroNameLabel, 2, 1)
layout.addWidget(self.macroNameLineEdit, 2, 2)
layout.addWidget(self.includeBaseCheckBox, 3, 0, 1, 3)
layout.addWidget(self.baseIncludeLabel, 4, 1)
layout.addWidget(self.baseIncludeLineEdit, 4, 2)
self.setLayout(layout)
def initializePage(self):
className = self.field('className')
self.macroNameLineEdit.setText((className.upper() + '_H'))
baseClass = self.field('baseClass')
is_baseClass = bool(baseClass)
self.includeBaseCheckBox.setChecked(is_baseClass)
self.includeBaseCheckBox.setEnabled(is_baseClass)
self.baseIncludeLabel.setEnabled(is_baseClass)
self.baseIncludeLineEdit.setEnabled(is_baseClass)
if (not is_baseClass):
self.baseIncludeLineEdit.clear()
elif QRegExp('Q[A-Z].*').exactMatch(baseClass):
self.baseIncludeLineEdit.setText((('<' + baseClass) + '>'))
else:
self.baseIncludeLineEdit.setText((('"' + baseClass.lower()) + '.h"')) |
class PycodestyleChecker(BaseRawFileChecker):
name = 'pep8_errors'
msgs = {'E9989': ('Found pycodestyle (PEP8) style error at %s', 'pep8-errors', '')}
options = (('pycodestyle-ignore', {'default': (), 'type': 'csv', 'metavar': '<pycodestyle-ignore>', 'help': 'List of Pycodestyle errors to ignore'}),)
def process_module(self, node: nodes.NodeNG) -> None:
style_guide = pycodestyle.StyleGuide(paths=[node.stream().name], reporter=JSONReport, ignore=self.linter.config.pycodestyle_ignore)
report = style_guide.check_files()
for (line_num, msg) in report.get_file_results():
self.add_message('pep8-errors', line=line_num, args=msg) |
class PromptArea(QWidget):
def __init__(self, edit, get_text, highlighter):
super(PromptArea, self).__init__(edit)
self.setFixedWidth(0)
self.edit = edit
self.get_text = get_text
self.highlighter = highlighter
edit.updateRequest.connect(self.updateContents)
def paintEvent(self, event):
edit = self.edit
height = edit.fontMetrics().height()
block = edit.firstVisibleBlock()
count = block.blockNumber()
painter = QPainter(self)
painter.fillRect(event.rect(), edit.palette().base())
first = True
while block.isValid():
count += 1
block_top = edit.blockBoundingGeometry(block).translated(edit.contentOffset()).top()
if ((not block.isVisible()) or (block_top > event.rect().bottom())):
break
rect = QRect(0, int(block_top), self.width(), height)
self.draw_block(painter, rect, block, first)
first = False
block = block.next()
painter.end()
super(PromptArea, self).paintEvent(event)
def updateContents(self, rect, scroll):
if scroll:
self.scroll(0, scroll)
else:
self.update()
def adjust_width(self, new_text):
width = calc_text_width(self.edit, new_text)
if (width > self.width()):
self.setFixedWidth(width)
def draw_block(self, painter, rect, block, first):
pen = painter.pen()
text = self.get_text(block.blockNumber())
default = self.edit.currentCharFormat()
formats = ([default] * len(text))
painter.setFont(self.edit.font())
for (index, length, format) in self.highlighter.highlight(text):
formats[index:(index + length)] = ([format] * length)
for (idx, (char, format)) in enumerate(zip(text, formats)):
rpos = ((len(text) - idx) - 1)
pen.setColor(format.foreground().color())
painter.setPen(pen)
painter.drawText(rect, Qt.AlignRight, (text[idx] + (' ' * rpos))) |
class AutoUpdateLayerMenuButton(QtWidgets.QPushButton):
def __init__(self, *args, m=None, layers=None, exclude=None, auto_text=False, **kwargs):
super().__init__(*args, **kwargs)
self.m = m
self._layers = layers
self._exclude = exclude
self._auto_text = auto_text
self._last_layers = []
menu = QtWidgets.QMenu()
menu.setStyleSheet('QMenu { menu-scrollable: 1;}')
menu.aboutToShow.connect(self.update_layers)
self.setMenu(menu)
self.m.BM.on_layer(self.update_visible_layer, persistent=True)
self.m._on_show_companion_widget.append(self.update_visible_layer)
self.update_layers()
font = QtGui.QFont('sans seriv', 8, QtGui.QFont.Bold, False)
self.setFont(font)
self.set_icons(str((iconpath / 'layers.png')), str((iconpath / 'layers_hover.png')))
self.setStyleSheet('\n QPushButton {border: 0px;}\n QPushButton::menu-indicator { width: 0; }\n ')
self.toggled.connect(self.swap_icon)
def set_icons(self, normal_icon=None, hoover_icon=None, checked_icon=None):
if normal_icon:
pm = QtGui.QPixmap(normal_icon)
self.normal_icon = QtGui.QIcon(pm.scaled(self.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
self.setIcon(self.normal_icon)
self.active_icon = self.normal_icon
if hoover_icon:
pm = QtGui.QPixmap(hoover_icon)
self.hoover_icon = QtGui.QIcon(pm.scaled(self.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
if checked_icon:
pm = QtGui.QPixmap(checked_icon)
self.checked_icon = QtGui.QIcon(pm.scaled(self.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
else:
self.checked_icon = self.hoover_icon
def swap_icon(self, *args, **kwargs):
if (self.normal_icon and self.hoover_icon):
if self.isChecked():
self.active_icon = self.checked_icon
else:
self.active_icon = self.normal_icon
self.setIcon(self.active_icon)
def leaveEvent(self, event):
if self.active_icon:
self.setIcon(self.active_icon)
return super().enterEvent(event)
def enterEvent(self, e):
if (self.hoover_icon and (not self.isChecked())):
self.setIcon(self.hoover_icon)
else:
self.setIcon(self.normal_icon)
if (self.window().showhelp is True):
QtWidgets.QToolTip.showText(e.globalPos(), '<h3>Layer Dropdown Menu</h3>Get a dropdown-list of all currently available map-layers.<p><ul><li><b>click</b> to switch to the selected layer</li><li><b>control+click</b> to overlay multiple layers</li></ul>The number [n] in front of the layer-name indicates the stack-order of the layer.')
def get_uselayer(self):
active_layers = []
for a in self.menu().actions():
w = a.defaultWidget()
if (isinstance(w, QtWidgets.QCheckBox) and w.isChecked()):
active_layers.append(a.data())
uselayer = '???'
if (len(active_layers) > 1):
uselayer = '|'.join(active_layers)
elif (len(active_layers) == 1):
uselayer = active_layers[0]
return uselayer
def mousePressEvent(self, event):
if (event.button() == Qt.RightButton):
self.update_layers()
elif (event.button() == Qt.LeftButton):
self.update_layers()
super().mousePressEvent(event)
def layers(self):
if (self._layers is not None):
return self._layers
else:
return [i for i in self.m._get_layers(exclude=self._exclude) if (not str(i).startswith('_'))]
def update_display_text(self, l):
if (not self._auto_text):
return
if (len(l) > 50):
l = f"{len([1 for i in l.split('|') if (len(i) > 0)])} layers visible"
if ('{' in l):
l = ('custom : ' + l)
self.setStyleSheet('QPushButton{color: rgb(200,50,50)}')
elif ('|' in l):
l = ('multi : ' + l)
self.setStyleSheet('QPushButton{color: rgb(200,50,50)}')
else:
self.setStyleSheet('QPushButton{color: rgb(50,200,50)}')
self.setText(l)
def update_visible_layer(self, *args, **kwargs):
if (not self.isVisible()):
return
self.update_layers()
self.update_display_text(self.m.BM._bg_layer)
()
def actionClicked(self):
action = self.sender()
if (not isinstance(action, QtWidgets.QWidgetAction)):
return
modifiers = QtWidgets.QApplication.keyboardModifiers()
actionwidget = action.defaultWidget()
checked_layers = [l for l in self.m.BM.bg_layer.split('|') if (l != '_')]
selected_layer = action.data()
selected_layers = [l for l in action.data().split('|') if (l != '_')]
if (not ((modifiers == Qt.ShiftModifier) or (modifiers == Qt.ControlModifier))):
self.m.show_layer(selected_layer)
return
if ((selected_layer == 'all') or ('|' in selected_layer)):
if ((modifiers == Qt.ShiftModifier) or (modifiers == Qt.ControlModifier)):
self.update_checkstatus()
return
else:
self.m.show_layer(selected_layer)
return
if isinstance(actionwidget, QtWidgets.QCheckBox):
if actionwidget.isChecked():
for l in selected_layers:
if (l not in checked_layers):
checked_layers.append(l)
else:
for l in selected_layers:
if ((l in checked_layers) and (len(checked_layers) > 1)):
checked_layers.remove(l)
uselayer = '???'
if (len(checked_layers) > 1):
uselayer = '|'.join(checked_layers)
elif (len(checked_layers) == 1):
uselayer = checked_layers[0]
if (uselayer != '???'):
self.m.show_layer(uselayer)
else:
self.m.show_layer(selected_layer)
def update_checkstatus(self):
currlayer = str(self.m.BM.bg_layer)
(layers, alphas) = self.m.BM._get_layers_alphas(currlayer)
if ('|' in currlayer):
active_layers = [i for i in layers if (not i.startswith('_'))]
active_layers.append(currlayer)
else:
active_layers = [currlayer]
for action in self.menu().actions():
key = action.data()
w = action.defaultWidget()
if isinstance(w, QtWidgets.QCheckBox):
w.clicked.disconnect(action.trigger)
if (key in active_layers):
w.setChecked(True)
w.setText(f'[{active_layers.index(key)}] {key}')
else:
w.setChecked(False)
w.setText((key + ' '))
w.clicked.connect(action.trigger)
()
def update_layers(self):
layers = self.layers
if (layers == self._last_layers):
self.update_checkstatus()
return
self.menu().clear()
for key in layers:
checkBox = QtWidgets.QCheckBox(key, self.menu())
action = QtWidgets.QWidgetAction(self.menu())
action.setDefaultWidget(checkBox)
action.setText((key + ' '))
action.setData(key)
if (key == 'all'):
checkBox.setStyleSheet('QCheckBox::indicator {border: none;}QCheckBox::indicator::checked {background:rgb(255,50,50)}')
elif ('|' in key):
checkBox.setStyleSheet('QCheckBox::indicator {border: none;}QCheckBox::indicator::checked {background:rgb(50,100,50)}')
checkBox.clicked.connect(action.trigger)
action.triggered.connect(self.actionClicked)
self.menu().addAction(action)
action.triggered.connect(self.menu().show)
self.update_display_text(self.m.BM._bg_layer)
self._last_layers = layers
self.update_checkstatus() |
class TrainDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.raw_datasets = raw_datasets
self.tab_processor = get_default_processor(max_cell_length=100, tokenizer=AutoTokenizer.from_pretrained(args.bert.location, use_fast=False), max_input_length=args.seq2seq.table_truncation_max_length)
cache_path = os.path.join(cache_root, 'kvret_train.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.extended_data = torch.load(cache_path)
else:
self.extended_data = []
expansion = (args.seq2seq.expansion if args.seq2seq.expansion else 1)
for expand_id in range(expansion):
for raw_data in tqdm(self.raw_datasets):
for i in range(1, (len(raw_data['dialogue']['driver']) + 1)):
if ((i > min(len(raw_data['dialogue']['driver']), len(raw_data['dialogue']['assistant']))) and (not (len(raw_data['dialogue']['driver']) == len(raw_data['dialogue']['assistant'])))):
continue
extend_data = copy.deepcopy(raw_data)
extend_data['dialogue']['driver'] = extend_data['dialogue']['driver'][:i]
extend_data['dialogue']['assistant'] = extend_data['dialogue']['assistant'][:i]
(history, gold_response) = kvret_get_constructed_history_and_golden_response(usr_utterances=extend_data['dialogue']['driver'], sys_utterances=extend_data['dialogue']['assistant'])
table_context = {'header': extend_data['kb']['header'], 'rows': extend_data['kb']['rows']}
for truncate_func in self.tab_processor.table_truncate_funcs:
truncate_func.truncate_table(table_context, history, [])
linear_table = self.tab_processor.table_linearize_func.process_table(table_context)
extend_data.update({'struct_in': linear_table.lower(), 'text_in': history.lower(), 'seq_out': gold_response.lower()})
self.extended_data.append(extend_data)
if args.dataset.use_cache:
torch.save(self.extended_data, cache_path)
def __getitem__(self, index) -> T_co:
return self.extended_data[index]
def __len__(self):
return len(self.extended_data) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.