code
stringlengths
281
23.7M
def before_and_after(predicate, it): it = iter(it) transition = [] def true_iterator(): for elem in it: if predicate(elem): (yield elem) else: transition.append(elem) return remainder_iterator = chain(transition, it) return (true_iterator(), remainder_iterator)
def cka(k_x: torch.Tensor, k_y: torch.Tensor, centered: bool=False, unbiased: bool=True) -> torch.Tensor: hsic_xy = hsic(k_x, k_y, centered, unbiased) hsic_xx = hsic(k_x, k_x, centered, unbiased) hsic_yy = hsic(k_y, k_y, centered, unbiased) return (hsic_xy / torch.sqrt((hsic_xx * hsic_yy)))
def test_unique_name_generator(): unique_names = unique_name_generator(['blah'], suffix_sep='_') x = vector('blah') x_name = unique_names(x) assert (x_name == 'blah_1') y = vector('blah_1') y_name = unique_names(y) assert (y_name == 'blah_1_1') x_name = unique_names(x) assert (x_name == 'blah_1') y_name = unique_names(y) assert (y_name == 'blah_1_1') z = vector('blah') z_name = unique_names(z) assert (z_name == 'blah_2') w = vector('blah_1') w_name = unique_names(w) assert (w_name == 'blah_1_2') q = vector() q_name_1 = unique_names(q) q_name_2 = unique_names(q) assert (q_name_1 == q_name_2 == 'tensor_variable') unique_names = unique_name_generator() r = vector() r_name_1 = unique_names(r) r_name_2 = unique_names(r, force_unique=True) assert (r_name_1 != r_name_2) r_name_3 = unique_names(r) assert (r_name_2 == r_name_3)
class SoftHistogram(nn.Module): def __init__(self, n_features, n_examples, num_bins, quantiles=False): super(SoftHistogram, self).__init__() self.in_channels = n_features self.num_bins = num_bins self.quantiles = quantiles self.bin_centers_conv = nn.Conv1d(self.in_channels, (self.num_bins * self.in_channels), kernel_size=1, groups=self.in_channels, bias=True) self.bin_centers_conv.weight.data.fill_(1) self.bin_centers_conv.weight.requires_grad = False self.bin_widths_conv = nn.Conv1d((self.num_bins * self.in_channels), (self.num_bins * self.in_channels), kernel_size=1, groups=(self.num_bins * self.in_channels), bias=True) self.bin_widths_conv.bias.data.fill_(1) self.bin_widths_conv.bias.requires_grad = False self.centers = self.bin_centers_conv.bias self.widths = self.bin_widths_conv.weight bin_centers = (((- 1) / self.num_bins) * (torch.arange(self.num_bins).float() + 0.5)) self.bin_centers_conv.bias = torch.nn.Parameter(torch.cat((self.in_channels * [bin_centers])), requires_grad=True) bin_width = ((- self.num_bins) * 2) self.bin_widths_conv.weight.data.fill_(bin_width) def forward(self, input): input = self.bin_centers_conv(input.transpose(0, 1).unsqueeze(0)) input = torch.abs(input) input = self.bin_widths_conv(input) input = F.relu(input) if self.quantiles: input = input.view((- 1), self.num_bins).cumsum(dim=1) return input def constrain_bins(self, xx): (n, c, l) = xx.size() xx_sum = (xx.reshape(n, (c // self.numBins), self.numBins, l).sum(2) + torch.tensor(1e-05)) xx_sum = torch.repeat_interleave(xx_sum, self.numBins, dim=1) xx = (xx / xx_sum) return xx
class ValueHolder(): def __init__(self, value): self._value = value def value(self): return self._value def get(self): return self._value def set(self, new_value): self._value = new_value def __bool__(self): return bool(self._value) def __eq__(self, other): return (self._value == other) def __ne__(self, other): return (self._value != other) def __repr__(self): return repr(self._value) def __lt__(self, other): return (self._value < other) def __le__(self, other): return (self._value <= other) def __gt__(self, other): return (self._value > other) def __ge__(self, other): return (self._value >= other) def __add__(self, other): return (self._value + other) def __radd__(self, other): return (other + self._value) def __iadd__(self, other): self._value += other return self def __sub__(self, other): return (self._value - other) def __rsub__(self, other): return (other - self._value) def __isub__(self, other): self._value -= other return self def __mul__(self, other): return (self._value * other) def __rmul__(self, other): return (other * self._value) def __imul__(self, other): self._value *= other return self def __matmul__(self, other): return (self._value other) def __rmatmul__(self, other): return (other self._value) def __imatmul__(self, other): self._value = other return self def __truediv__(self, other): return (self._value / other) def __rtruediv__(self, other): return (other / self._value) def __itruediv__(self, other): self._value /= other return self def __floordiv__(self, other): return (self._value // other) def __rfloordiv__(self, other): return (other // self._value) def __ifloordiv__(self, other): self._value //= other return self def __mod__(self, other): return (self._value % other) def __rmod__(self, other): return (other % self._value) def __imod__(self, other): self._value %= other return self def __divmod__(self, other): return divmod(self._value, other) def __rdivmod__(self, other): return divmod(other, self._value) def __pow__(self, other): return (self._value ** other) def __rpow__(self, other): return (other ** self._value) def __ipow__(self, other): self._value **= other return self def __lshift__(self, other): return (self._value << other) def __rlshift__(self, other): return (other << self._value) def __ilshift__(self, other): self._value <<= other return self def __rshift__(self, other): return (self._value >> other) def __rrshift__(self, other): return (other >> self._value) def __irshift__(self, other): self._value >>= other return self def __and__(self, other): return (self._value & other) def __rand__(self, other): return (other & self._value) def __iand__(self, other): self._value &= other return self def __xor__(self, other): return (self._value ^ other) def __rxor__(self, other): return (other ^ self._value) def __ixor__(self, other): self._value ^= other return self def __or__(self, other): return (self._value | other) def __ror__(self, other): return (other | self._value) def __ior__(self, other): self._value |= other return self def __neg__(self): return (- self._value) def __pos__(self): return (+ self._value) def __abs__(self): return abs(self._value) def __invert__(self): return (~ self._value) def __len__(self): return len(self._value) def __contains__(self, item): return (item in self._value) def __getitem__(self, item): return self._value[item] def __setitem__(self, key, value): self._value[key] = value def __getattr__(self, item): return getattr(self._value, item)
def test_vcc2016_dummy(): data_source = vcc2016.WavFileDataSource('dummy', speakers=['SF1']) (ValueError) def __test_invalid_speaker(): vcc2016.WavFileDataSource('dummy', speakers=['test']) (RuntimeError) def __test_nodir(data_source): data_source.collect_files() __test_invalid_speaker() __test_nodir(data_source)
class GetIfcFL(CallerIfcFL): def connect(s, other, parent): if isinstance(other, CallerIfcCL): m = RecvCL2GiveFL() if hasattr(parent, 'RecvCL2GiveFL_count'): count = parent.RecvCL2GiveFL_count setattr(parent, ('RecvCL2GiveFL_' + str(count)), m) else: parent.RecvCL2GiveFL_count = 0 parent.RecvCL2GiveFL_0 = m connect_pairs(other, m.recv, m.give, s) parent.RecvCL2GiveFL_count += 1 return True elif isinstance(other, RecvIfcRTL): m = RecvRTL2GiveFL(other.MsgType) if hasattr(parent, 'RecvRTL2GiveFL_count'): count = parent.RecvRTL2GiveFL_count setattr(parent, ('RecvRTL2GiveFL_' + str(count)), m) else: parent.RecvRTL2GiveFL_count = 0 parent.RecvRTL2GiveFL_0 = m connect_pairs(other, m.recv, m.give, s) parent.RecvRTL2GiveFL_count += 1 return True return False
def _getcommand(): var = (('a', 'a'), ('ab', 'ab'), ('abc', 'abclear'), ('abo', 'aboveleft'), ('al', 'all'), ('ar', 'ar'), ('ar', 'args'), ('arga', 'argadd'), ('argd', 'argdelete'), ('argdo', 'argdo'), ('arge', 'argedit'), ('argg', 'argglobal'), ('argl', 'arglocal'), ('argu', 'argument'), ('as', 'ascii'), ('au', 'au'), ('b', 'buffer'), ('bN', 'bNext'), ('ba', 'ball'), ('bad', 'badd'), ('bd', 'bdelete'), ('bel', 'belowright'), ('bf', 'bfirst'), ('bl', 'blast'), ('bm', 'bmodified'), ('bn', 'bnext'), ('bo', 'botright'), ('bp', 'bprevious'), ('br', 'br'), ('br', 'brewind'), ('brea', 'break'), ('breaka', 'breakadd'), ('breakd', 'breakdel'), ('breakl', 'breaklist'), ('bro', 'browse'), ('bu', 'bu'), ('buf', 'buf'), ('bufdo', 'bufdo'), ('buffers', 'buffers'), ('bun', 'bunload'), ('bw', 'bwipeout'), ('c', 'c'), ('c', 'change'), ('cN', 'cN'), ('cN', 'cNext'), ('cNf', 'cNf'), ('cNf', 'cNfile'), ('cabc', 'cabclear'), ('cad', 'cad'), ('cad', 'caddexpr'), ('caddb', 'caddbuffer'), ('caddf', 'caddfile'), ('cal', 'call'), ('cat', 'catch'), ('cb', 'cbuffer'), ('cc', 'cc'), ('ccl', 'cclose'), ('cd', 'cd'), ('ce', 'center'), ('cex', 'cexpr'), ('cf', 'cfile'), ('cfir', 'cfirst'), ('cg', 'cgetfile'), ('cgetb', 'cgetbuffer'), ('cgete', 'cgetexpr'), ('changes', 'changes'), ('chd', 'chdir'), ('che', 'checkpath'), ('checkt', 'checktime'), ('cl', 'cl'), ('cl', 'clist'), ('cla', 'clast'), ('clo', 'close'), ('cmapc', 'cmapclear'), ('cn', 'cn'), ('cn', 'cnext'), ('cnew', 'cnewer'), ('cnf', 'cnf'), ('cnf', 'cnfile'), ('co', 'copy'), ('col', 'colder'), ('colo', 'colorscheme'), ('com', 'com'), ('comc', 'comclear'), ('comp', 'compiler'), ('con', 'con'), ('con', 'continue'), ('conf', 'confirm'), ('cope', 'copen'), ('cp', 'cprevious'), ('cpf', 'cpfile'), ('cq', 'cquit'), ('cr', 'crewind'), ('cs', 'cs'), ('cscope', 'cscope'), ('cstag', 'cstag'), ('cuna', 'cunabbrev'), ('cw', 'cwindow'), ('d', 'd'), ('d', 'delete'), ('de', 'de'), ('debug', 'debug'), ('debugg', 'debuggreedy'), ('del', 'del'), ('delc', 'delcommand'), ('delel', 'delel'), ('delep', 'delep'), ('deletel', 'deletel'), ('deletep', 'deletep'), ('deletl', 'deletl'), ('deletp', 'deletp'), ('delf', 'delf'), ('delf', 'delfunction'), ('dell', 'dell'), ('delm', 'delmarks'), ('delp', 'delp'), ('dep', 'dep'), ('di', 'di'), ('di', 'display'), ('diffg', 'diffget'), ('diffo', 'diffoff'), ('diffp', 'diffpatch'), ('diffpu', 'diffput'), ('diffs', 'diffsplit'), ('difft', 'diffthis'), ('diffu', 'diffupdate'), ('dig', 'dig'), ('dig', 'digraphs'), ('dir', 'dir'), ('dj', 'djump'), ('dl', 'dl'), ('dli', 'dlist'), ('do', 'do'), ('doau', 'doau'), ('dp', 'dp'), ('dr', 'drop'), ('ds', 'dsearch'), ('dsp', 'dsplit'), ('e', 'e'), ('e', 'edit'), ('ea', 'ea'), ('earlier', 'earlier'), ('ec', 'ec'), ('echoe', 'echoerr'), ('echom', 'echomsg'), ('echon', 'echon'), ('el', 'else'), ('elsei', 'elseif'), ('em', 'emenu'), ('en', 'en'), ('en', 'endif'), ('endf', 'endf'), ('endf', 'endfunction'), ('endfo', 'endfor'), ('endfun', 'endfun'), ('endt', 'endtry'), ('endw', 'endwhile'), ('ene', 'enew'), ('ex', 'ex'), ('exi', 'exit'), ('exu', 'exusage'), ('f', 'f'), ('f', 'file'), ('files', 'files'), ('filet', 'filet'), ('filetype', 'filetype'), ('fin', 'fin'), ('fin', 'find'), ('fina', 'finally'), ('fini', 'finish'), ('fir', 'first'), ('fix', 'fixdel'), ('fo', 'fold'), ('foldc', 'foldclose'), ('foldd', 'folddoopen'), ('folddoc', 'folddoclosed'), ('foldo', 'foldopen'), ('for', 'for'), ('fu', 'fu'), ('fu', 'function'), ('fun', 'fun'), ('g', 'g'), ('go', 'goto'), ('gr', 'grep'), ('grepa', 'grepadd'), ('gui', 'gui'), ('gvim', 'gvim'), ('h', 'h'), ('h', 'help'), ('ha', 'hardcopy'), ('helpf', 'helpfind'), ('helpg', 'helpgrep'), ('helpt', 'helptags'), ('hi', 'hi'), ('hid', 'hide'), ('his', 'history'), ('i', 'i'), ('ia', 'ia'), ('iabc', 'iabclear'), ('if', 'if'), ('ij', 'ijump'), ('il', 'ilist'), ('imapc', 'imapclear'), ('in', 'in'), ('intro', 'intro'), ('is', 'isearch'), ('isp', 'isplit'), ('iuna', 'iunabbrev'), ('j', 'join'), ('ju', 'jumps'), ('k', 'k'), ('kee', 'keepmarks'), ('keepa', 'keepa'), ('keepalt', 'keepalt'), ('keepj', 'keepjumps'), ('keepp', 'keeppatterns'), ('l', 'l'), ('l', 'list'), ('lN', 'lN'), ('lN', 'lNext'), ('lNf', 'lNf'), ('lNf', 'lNfile'), ('la', 'la'), ('la', 'last'), ('lad', 'lad'), ('lad', 'laddexpr'), ('laddb', 'laddbuffer'), ('laddf', 'laddfile'), ('lan', 'lan'), ('lan', 'language'), ('lat', 'lat'), ('later', 'later'), ('lb', 'lbuffer'), ('lc', 'lcd'), ('lch', 'lchdir'), ('lcl', 'lclose'), ('lcs', 'lcs'), ('lcscope', 'lcscope'), ('le', 'left'), ('lefta', 'leftabove'), ('lex', 'lexpr'), ('lf', 'lfile'), ('lfir', 'lfirst'), ('lg', 'lgetfile'), ('lgetb', 'lgetbuffer'), ('lgete', 'lgetexpr'), ('lgr', 'lgrep'), ('lgrepa', 'lgrepadd'), ('lh', 'lhelpgrep'), ('ll', 'll'), ('lla', 'llast'), ('lli', 'llist'), ('lmak', 'lmake'), ('lmapc', 'lmapclear'), ('lne', 'lne'), ('lne', 'lnext'), ('lnew', 'lnewer'), ('lnf', 'lnf'), ('lnf', 'lnfile'), ('lo', 'lo'), ('lo', 'loadview'), ('loadk', 'loadk'), ('loadkeymap', 'loadkeymap'), ('loc', 'lockmarks'), ('lockv', 'lockvar'), ('lol', 'lolder'), ('lop', 'lopen'), ('lp', 'lprevious'), ('lpf', 'lpfile'), ('lr', 'lrewind'), ('ls', 'ls'), ('lt', 'ltag'), ('lua', 'lua'), ('luado', 'luado'), ('luafile', 'luafile'), ('lv', 'lvimgrep'), ('lvimgrepa', 'lvimgrepadd'), ('lw', 'lwindow'), ('m', 'move'), ('ma', 'ma'), ('ma', 'mark'), ('mak', 'make'), ('marks', 'marks'), ('mat', 'match'), ('menut', 'menut'), ('menut', 'menutranslate'), ('mes', 'mes'), ('messages', 'messages'), ('mk', 'mk'), ('mk', 'mkexrc'), ('mks', 'mksession'), ('mksp', 'mkspell'), ('mkv', 'mkv'), ('mkv', 'mkvimrc'), ('mkvie', 'mkview'), ('mo', 'mo'), ('mod', 'mode'), ('mz', 'mz'), ('mz', 'mzscheme'), ('mzf', 'mzfile'), ('n', 'n'), ('n', 'next'), ('nb', 'nbkey'), ('nbc', 'nbclose'), ('nbs', 'nbstart'), ('ne', 'ne'), ('new', 'new'), ('nmapc', 'nmapclear'), ('noa', 'noa'), ('noautocmd', 'noautocmd'), ('noh', 'nohlsearch'), ('nu', 'number'), ('o', 'o'), ('o', 'open'), ('ol', 'oldfiles'), ('omapc', 'omapclear'), ('on', 'only'), ('opt', 'options'), ('ownsyntax', 'ownsyntax'), ('p', 'p'), ('p', 'print'), ('pc', 'pclose'), ('pe', 'pe'), ('pe', 'perl'), ('ped', 'pedit'), ('perld', 'perldo'), ('po', 'pop'), ('popu', 'popu'), ('popu', 'popup'), ('pp', 'ppop'), ('pr', 'pr'), ('pre', 'preserve'), ('prev', 'previous'), ('pro', 'pro'), ('prof', 'profile'), ('profd', 'profdel'), ('promptf', 'promptfind'), ('promptr', 'promptrepl'), ('ps', 'psearch'), ('ptN', 'ptN'), ('ptN', 'ptNext'), ('pta', 'ptag'), ('ptf', 'ptfirst'), ('ptj', 'ptjump'), ('ptl', 'ptlast'), ('ptn', 'ptn'), ('ptn', 'ptnext'), ('ptp', 'ptprevious'), ('ptr', 'ptrewind'), ('pts', 'ptselect'), ('pu', 'put'), ('pw', 'pwd'), ('py', 'py'), ('py', 'python'), ('py3', 'py3'), ('py3', 'py3'), ('py3do', 'py3do'), ('pydo', 'pydo'), ('pyf', 'pyfile'), ('python3', 'python3'), ('q', 'q'), ('q', 'quit'), ('qa', 'qall'), ('quita', 'quitall'), ('r', 'r'), ('r', 'read'), ('re', 're'), ('rec', 'recover'), ('red', 'red'), ('red', 'redo'), ('redi', 'redir'), ('redr', 'redraw'), ('redraws', 'redrawstatus'), ('reg', 'registers'), ('res', 'resize'), ('ret', 'retab'), ('retu', 'return'), ('rew', 'rewind'), ('ri', 'right'), ('rightb', 'rightbelow'), ('ru', 'ru'), ('ru', 'runtime'), ('rub', 'ruby'), ('rubyd', 'rubydo'), ('rubyf', 'rubyfile'), ('rundo', 'rundo'), ('rv', 'rviminfo'), ('sN', 'sNext'), ('sa', 'sargument'), ('sal', 'sall'), ('san', 'sandbox'), ('sav', 'saveas'), ('sb', 'sbuffer'), ('sbN', 'sbNext'), ('sba', 'sball'), ('sbf', 'sbfirst'), ('sbl', 'sblast'), ('sbm', 'sbmodified'), ('sbn', 'sbnext'), ('sbp', 'sbprevious'), ('sbr', 'sbrewind'), ('scrip', 'scrip'), ('scrip', 'scriptnames'), ('scripte', 'scriptencoding'), ('scs', 'scs'), ('scscope', 'scscope'), ('se', 'set'), ('setf', 'setfiletype'), ('setg', 'setglobal'), ('setl', 'setlocal'), ('sf', 'sfind'), ('sfir', 'sfirst'), ('sh', 'shell'), ('si', 'si'), ('sig', 'sig'), ('sign', 'sign'), ('sil', 'silent'), ('sim', 'simalt'), ('sl', 'sl'), ('sl', 'sleep'), ('sla', 'slast'), ('sm', 'smagic'), ('sm', 'smap'), ('sme', 'sme'), ('smenu', 'smenu'), ('sn', 'snext'), ('sni', 'sniff'), ('sno', 'snomagic'), ('snoreme', 'snoreme'), ('snoremenu', 'snoremenu'), ('so', 'so'), ('so', 'source'), ('sor', 'sort'), ('sp', 'split'), ('spe', 'spe'), ('spe', 'spellgood'), ('spelld', 'spelldump'), ('spelli', 'spellinfo'), ('spellr', 'spellrepall'), ('spellu', 'spellundo'), ('spellw', 'spellwrong'), ('spr', 'sprevious'), ('sre', 'srewind'), ('st', 'st'), ('st', 'stop'), ('sta', 'stag'), ('star', 'star'), ('star', 'startinsert'), ('start', 'start'), ('startg', 'startgreplace'), ('startr', 'startreplace'), ('stj', 'stjump'), ('stopi', 'stopinsert'), ('sts', 'stselect'), ('sun', 'sunhide'), ('sunme', 'sunme'), ('sunmenu', 'sunmenu'), ('sus', 'suspend'), ('sv', 'sview'), ('sw', 'swapname'), ('sy', 'sy'), ('syn', 'syn'), ('sync', 'sync'), ('syncbind', 'syncbind'), ('syntime', 'syntime'), ('t', 't'), ('tN', 'tN'), ('tN', 'tNext'), ('ta', 'ta'), ('ta', 'tag'), ('tab', 'tab'), ('tabN', 'tabN'), ('tabN', 'tabNext'), ('tabc', 'tabclose'), ('tabd', 'tabdo'), ('tabe', 'tabedit'), ('tabf', 'tabfind'), ('tabfir', 'tabfirst'), ('tabl', 'tablast'), ('tabm', 'tabmove'), ('tabn', 'tabnext'), ('tabnew', 'tabnew'), ('tabo', 'tabonly'), ('tabp', 'tabprevious'), ('tabr', 'tabrewind'), ('tabs', 'tabs'), ('tags', 'tags'), ('tc', 'tcl'), ('tcld', 'tcldo'), ('tclf', 'tclfile'), ('te', 'tearoff'), ('tf', 'tfirst'), ('th', 'throw'), ('tj', 'tjump'), ('tl', 'tlast'), ('tm', 'tm'), ('tm', 'tmenu'), ('tn', 'tn'), ('tn', 'tnext'), ('to', 'topleft'), ('tp', 'tprevious'), ('tr', 'tr'), ('tr', 'trewind'), ('try', 'try'), ('ts', 'tselect'), ('tu', 'tu'), ('tu', 'tunmenu'), ('u', 'u'), ('u', 'undo'), ('un', 'un'), ('una', 'unabbreviate'), ('undoj', 'undojoin'), ('undol', 'undolist'), ('unh', 'unhide'), ('unl', 'unl'), ('unlo', 'unlockvar'), ('uns', 'unsilent'), ('up', 'update'), ('v', 'v'), ('ve', 've'), ('ve', 'version'), ('verb', 'verbose'), ('vert', 'vertical'), ('vi', 'vi'), ('vi', 'visual'), ('vie', 'view'), ('vim', 'vimgrep'), ('vimgrepa', 'vimgrepadd'), ('viu', 'viusage'), ('vmapc', 'vmapclear'), ('vne', 'vnew'), ('vs', 'vsplit'), ('w', 'w'), ('w', 'write'), ('wN', 'wNext'), ('wa', 'wall'), ('wh', 'while'), ('win', 'win'), ('win', 'winsize'), ('winc', 'wincmd'), ('windo', 'windo'), ('winp', 'winpos'), ('wn', 'wnext'), ('wp', 'wprevious'), ('wq', 'wq'), ('wqa', 'wqall'), ('ws', 'wsverb'), ('wundo', 'wundo'), ('wv', 'wviminfo'), ('x', 'x'), ('x', 'xit'), ('xa', 'xall'), ('xmapc', 'xmapclear'), ('xme', 'xme'), ('xmenu', 'xmenu'), ('xnoreme', 'xnoreme'), ('xnoremenu', 'xnoremenu'), ('xunme', 'xunme'), ('xunmenu', 'xunmenu'), ('xwininfo', 'xwininfo'), ('y', 'yank')) return var
def get_polynomial_decay_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, lr_end=1e-07, power=1.0, last_epoch=(- 1)): lr_init = optimizer.defaults['lr'] if (not (lr_init > lr_end)): raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})') lr_lambda = partial(_get_polynomial_decay_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, lr_end=lr_end, power=power, lr_init=lr_init) return LambdaLR(optimizer, lr_lambda, last_epoch)
.skipif(pyproj._datadir._USE_GLOBAL_CONTEXT, reason='Global Context not Threadsafe.') def test_proj_multithread(): trans = Proj('EPSG:3857') def transform(num): return trans(1, 2) with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: for result in executor.map(transform, range(10)): pass
def set_wled_ip(request: WSGIRequest) -> HttpResponse: (value, response) = extract_value(request.POST) try: socket.inet_aton(value) except socket.error: return HttpResponseBadRequest('invalid ip') storage.put('wled_ip', value) _notify_settings_changed('wled') return response
def main(): parser = argparse.ArgumentParser(description='Tool to create a commit list') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--create_new', nargs=2) group.add_argument('--update_to') group.add_argument('--stat', action='store_true') group.add_argument('--export_markdown', action='store_true') parser.add_argument('--path', default='results/commitlist.csv') args = parser.parse_args() if args.create_new: create_new(args.path, args.create_new[0], args.create_new[1]) return if args.update_to: update_existing(args.path, args.update_to) return if args.stat: commits = CommitList.from_existing(args.path) stats = commits.stat() pprint.pprint(stats) return if args.export_markdown: commits = CommitList.from_existing(args.path) categories = list(commits.stat().keys()) for category in categories: print(f'Exporting {category}...') lines = get_markdown_header(category) lines += to_markdown(commits, category) filename = f'results/export/result_{category}.md' os.makedirs(os.path.dirname(filename), exist_ok=True) with open(filename, 'w') as f: f.writelines(lines) return
class PackedArray(BaseRTLIRDataType): def __init__(s, dim_sizes, sub_dtype): assert isinstance(sub_dtype, BaseRTLIRDataType), f'non-RTLIR data type {sub_dtype} as sub type of array.' assert (not isinstance(sub_dtype, PackedArray)), 'nested PackedArray is not allowed!' assert (len(dim_sizes) >= 1), 'PackedArray dimension count should be greater than 0!' assert (sum(dim_sizes) > 0), 'PackedArray should have at least one element!' s.dim_sizes = dim_sizes s.sub_dtype = sub_dtype def __eq__(s, other): if (not isinstance(other, PackedArray)): return False if (len(s.dim_sizes) != len(other.dim_sizes)): return False if (not all(((a == b) for (a, b) in zip(s.dim_sizes, other.dim_sizes)))): return False return (s.sub_dtype == other.sub_dtype) def __hash__(s): return hash((type(s), tuple(s.dim_sizes), s.sub_dtype)) def get_name(s): return s.get_full_name() def get_full_name(s): dimension_str = 'x'.join((str(d) for d in s.dim_sizes)) return f'{s.sub_dtype.get_full_name()}x{dimension_str}' def get_length(s): return int((s.sub_dtype.get_length() * reduce((lambda p, x: (p * x)), s.dim_sizes, 1))) def get_next_dim_type(s): if (len(s.dim_sizes) == 1): return s.sub_dtype return PackedArray(s.dim_sizes[1:], s.sub_dtype) def get_dim_sizes(s): return s.dim_sizes def get_index_width(s): assert s.dim_sizes, 'rdt.PackedArray is created without dimension!' n_elements = s.dim_sizes[0] if (n_elements <= 1): return 1 else: return ceil(log2(n_elements)) def get_sub_dtype(s): return s.sub_dtype def __call__(s, obj): return (s == obj) def __str__(s): return f'PackedArray{s.dim_sizes} of {s.sub_dtype}'
def load_language(dataset, task, dataset_key, model_args, extractor, subgoal_idx=None, test_split=False): feat_numpy = dataset.load_features(task, subgoal_idx=subgoal_idx) if (not test_split): frames_expert = dataset.load_frames(dataset_key) model_util.test_extractor(task['root'], extractor, frames_expert) if ((not test_split) and ('frames' in dataset.ann_type)): feat_numpy['frames'] = frames_expert (_, input_dict, _) = data_util.tensorize_and_pad([(task, feat_numpy)], model_args.device, dataset.pad) return input_dict
class Migration(migrations.Migration): dependencies = [('api', '0016_auto__1619')] operations = [migrations.AlterField(model_name='specialsnake', name='images', field=django.contrib.postgres.fields.ArrayField(base_field=models.URLField(), help_text='Images displaying this special snake.', size=None))]
def main_worker(gpu, ngpus_per_node, args): global best_acc1 global best_acc2 args.gpu = gpu if (args.gpu is not None): print('Use GPU: {} for training'.format(args.gpu)) if args.distributed: if ((args.dist_url == 'env://') and (args.rank == (- 1))): args.rank = int(os.environ['RANK']) if args.multiprocessing_distributed: args.rank = ((args.rank * ngpus_per_node) + gpu) dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) model = InceptionResNetV2(num_classes=args.num_class) model_ema = InceptionResNetV2(num_classes=args.num_class) model_ema.load_state_dict(model.state_dict()) c = [1, 1, 2, 1, 1, 1, 1, 2, 1, 2, 1, 2, 0, 2, 2, 0, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 0, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 2, 2, 1, 2, 1, 2, 2, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2, 1, 1, 2, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 1, 1, 1, 0, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 1, 2, 1, 1, 1, 1, 0, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 2, 0, 1, 0, 1, 2, 1, 2, 2, 2, 1, 2, 1, 2, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 0, 2, 0, 2, 2, 1, 2, 1, 1, 1, 2, 2, 1, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 1, 1, 1, 1, 1, 1, 2, 1, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 0, 1, 0, 0, 1, 1, 2, 1, 1, 0, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 0, 1, 1, 0, 2, 2, 0, 1, 1, 2, 1, 1, 1, 2, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 2, 0, 1, 1, 2, 2, 1, 1, 2, 1, 1, 2, 0, 2, 1, 1, 0, 1, 1, 1, 0, 1, 2, 0, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 2, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 2, 1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 1, 1, 1, 1, 0, 2, 2, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 2, 1, 1, 2, 1, 2, 1, 1, 1, 2, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 2, 1, 1, 2, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 1, 1, 1, 2, 1, 2, 1, 0, 2, 1, 1, 1, 1, 2, 1, 2, 2, 1, 2, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 2, 0, 1, 1, 2, 2, 1, 1, 1, 2, 2, 1, 2, 2, 1, 1, 1, 1, 1, 2, 1, 1, 2] vnet = VNet(1, 100, 100, 1, 3) if (not torch.cuda.is_available()): print('using CPU, this will be slow') elif args.distributed: if (args.gpu is not None): torch.cuda.set_device(args.gpu) model.cuda(args.gpu) model_ema.cuda(args.gpu) vnet.cuda(args.gpu) vnet.load_state_dict(torch.load('./vnet_119.pth')) print('hellortyiuiopfgjhkjlkl;;rttyuiop') args.batch_size = int((args.batch_size / ngpus_per_node)) args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node)) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) model_ema = torch.nn.parallel.DistributedDataParallel(model_ema, device_ids=[args.gpu]) vnet = torch.nn.parallel.DistributedDataParallel(vnet, device_ids=[args.gpu]) else: model.cuda() model_ema.cuda() vnet.cuda() model = torch.nn.parallel.DistributedDataParallel(model) model_ema = torch.nn.parallel.DistributedDataParallel(model_ema) vnet = torch.nn.parallel.DistributedDataParallel(vnet) elif (args.gpu is not None): torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) model_ema = model_ema.cuda(args.gpu) vnet = vnet.cuda(args.gpu) elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')): model.features = torch.nn.DataParallel(model.features) model.cuda() model_ema.features = torch.nn.DataParallel(model_ema.features) model_ema.cuda() vnet.features = torch.nn.DataParallel(vnet.features) vnet.cuda() else: model = torch.nn.DataParallel(model).cuda() model_ema = torch.nn.DataParallel(model_ema).cuda() vnet = torch.nn.DataParallel(vnet).cuda() criterion = nn.CrossEntropyLoss().cuda(args.gpu) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) if (args.gpu is None): checkpoint = torch.load(args.resume) else: loc = 'cuda:{}'.format(args.gpu) checkpoint = torch.load(args.resume, map_location=loc) args.start_epoch = checkpoint['epoch'] best_acc1 = checkpoint['best_acc1'] if (args.gpu is not None): best_acc1 = best_acc1.to(args.gpu) model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) cudnn.benchmark = True transform_train = transforms.Compose([transforms.Resize(320), transforms.RandomResizedCrop(299), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) transform_test = transforms.Compose([transforms.Resize(320), transforms.CenterCrop(299), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) transform_imagenet = transforms.Compose([transforms.Resize(320), transforms.CenterCrop(299), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) all_dataset = webvision_dataset(root_dir=args.root_dir, transform=transform_train, mode='all', num_class=args.num_class) test_dataset = webvision_dataset(root_dir=args.root_dir, transform=transform_test, mode='test', num_class=args.num_class) imagenet_val = imagenet_dataset(root_dir=args.root_dir, transform=transform_imagenet, num_class=args.num_class) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(all_dataset) else: train_sampler = None trainloader = torch.utils.data.DataLoader(dataset=all_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) imagenet_loader = torch.utils.data.DataLoader(dataset=imagenet_val, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) web_num = [value for (_, value) in trainloader.dataset.train_labels.items()] print('len:', len(web_num)) get_label = get_loss(labels=np.asarray(web_num), num_classes=args.num_class, momentum=0.9) if args.evaluate: validate(val_loader, model, criterion, args) return for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) adjust_learning_rate(optimizer, epoch, args) if (epoch < 10): train_ce(trainloader, model, criterion, optimizer, epoch, args, model_ema) else: train(trainloader, model, criterion, optimizer, epoch, args, vnet, model_ema, c, get_label) acc1 = validate(test_loader, model, criterion, args) acc2 = validate(imagenet_loader, model, criterion, args) acc3 = validate(test_loader, model_ema, criterion, args) acc4 = validate(imagenet_loader, model_ema, criterion, args) torch.save(model.state_dict(), ('./save/model_%d.pth' % epoch)) torch.save(model_ema.state_dict(), ('./save/model_ema_%d.pth' % epoch)) torch.save(vnet.state_dict(), ('./save/vnet_%d.pth' % epoch)) is_best = (acc1 > best_acc1) best_acc1 = max(acc1, best_acc1) best_acc2 = max(acc2, best_acc2) best_acc1 = max(acc3, best_acc1) best_acc2 = max(acc4, best_acc2) if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))): save_checkpoint({'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'best_acc2': best_acc2, 'optimizer': optimizer.state_dict()}, is_best)
class DlrmSparseNetwork(BaseNetwork): def __init__(self, categorical_features, numerical_features, multivalue_features, attention_features, loss_ctr, loss_cvr, ptype='mean', hidden_sizes=[512, 512, 512], scope_name='dlrm2_network', save_model_mode='placeholder', save_task_id=0, masks_dir=None): self._categorical_features = categorical_features self._numerical_features = numerical_features self._multivalue_features = multivalue_features self._attention_features = attention_features self._loss_ctr = loss_ctr self._loss_cvr = loss_cvr self._hidden_sizes = hidden_sizes self._scope_name = scope_name self._ptype = ptype self._mode = save_model_mode self._weights = {} self.masks = {} self.masks_dir = masks_dir self._save_task_id = save_task_id def _dense_layer(self, name, inputs, units, mask, activation=None, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False)): weights = tf.get_variable(name=(name + '_w'), shape=[inputs.shape[1], units], initializer=kernel_initializer, trainable=True) weights = tf.multiply(weights, mask) self._weights[name] = weights output = tf.matmul(inputs, weights) if use_bias: bias = tf.get_variable(name=(name + '_biases'), shape=[units], initializer=tf.zeros_initializer()) output += bias if activation: return activation(output) else: return output def _train_fn(self, example): with tf.variable_scope(self._scope_name, reuse=tf.AUTO_REUSE): (logits_ctr, logits_cvr) = self._build_graph(example) loss_ctr = self._loss_ctr.loss_fn(logits_ctr, example) loss_cvr = self._loss_cvr.loss_fn(logits_cvr, example) return (loss_ctr, loss_cvr) def _eval_fn(self, example): with tf.variable_scope(self._scope_name, reuse=tf.AUTO_REUSE): (logits_ctr, logits_cvr) = self._build_graph(example) outputs_ctr = tf.sigmoid(logits_ctr) outputs_cvr = tf.sigmoid(logits_cvr) return (outputs_ctr, outputs_cvr) def _get_serve_inputs(self): inputs = self._categorical_features.copy() for feature in self._multivalue_features: inputs.append(feature) return inputs def _serve_fn_opt(self, example): batch_size = self._get_batch_size(example) with tf.variable_scope(self._scope_name, reuse=tf.AUTO_REUSE): categorical_part = self._tile_tensors_with_batch_size(self._build_categorical_part(example), batch_size) multivalue_part = self._tile_tensors_with_batch_size(self._build_multivalue_part(example), batch_size) hiddens = tf.stack((categorical_part + multivalue_part), axis=1) hiddens = self._build_cross_interaction(hiddens) logits = self._build_upper_part_graph_saved_pb(hiddens) outputs_ctr = tf.sigmoid(logits) outputs_cvr = tf.nn.relu(logits) outputs = [outputs_ctr, outputs_cvr] print('saved pb task_id {}'.format(self._save_task_id)) return outputs[self._save_task_id] def _serve_fn(self, example): if (self._mode == 'example'): outputs = self._eval_fn(example) else: outputs = self._serve_fn_opt(example) return outputs def _build_graph(self, inputs): hiddens = self._build_lower_part_graph(inputs) hiddens = self._build_cross_interaction(hiddens) (outputs_ctr, outputs_cvr) = self._build_upper_part_graph(hiddens) return (outputs_ctr, outputs_cvr) def _build_lower_part_graph(self, inputs): categorical_part = self._build_categorical_part(inputs) multivalue_part = self._build_multivalue_part(inputs) return tf.stack((categorical_part + multivalue_part), axis=1) def _build_upper_part_graph(self, inputs): hidden = inputs for (i, size) in enumerate(self._hidden_sizes): mask_name = (('fc_' + str(i)) + '_m') if (not (mask_name in self.masks)): mask = tf.placeholder(tf.float32, shape=(hidden.shape[1], size), name=(('fc_' + str(i)) + '_m')) self.masks[mask_name] = mask hidden = self._dense_layer(name=('fc_' + str(i)), inputs=hidden, units=size, mask=self.masks[mask_name], activation=tf.nn.relu) logit_ctr = slim.fully_connected(hidden, 1, activation_fn=None, scope='logit_ctr') logit_cvr = slim.fully_connected(hidden, 1, activation_fn=None, scope='logit_cvr') return (logit_ctr, logit_cvr) def _build_upper_part_graph_v2(self, inputs): hidden = inputs for (i, size) in enumerate(self._hidden_sizes): mask_name = (('fc_' + str(i)) + '_m') if (not (mask_name in self.masks)): mask = tf.placeholder(tf.float32, shape=(hidden.shape[1], size), name=(('fc_' + str(i)) + '_m')) self.masks[mask_name] = mask hidden = self._dense_layer(name=('fc_' + str(i)), inputs=hidden, units=size, mask=self.masks[mask_name], activation=tf.nn.relu) hidden_ctr = slim.fully_connected(inputs, 512, scope=('hidden_ctr_' + str(0))) hidden_ctr = tf.concat([hidden, hidden_ctr], axis=1) hidden_ctr = slim.fully_connected(hidden_ctr, 256, scope=('hidden_ctr_' + str(1))) hidden_ctr = slim.fully_connected(hidden_ctr, 128, scope=('hidden_ctr_' + str(2))) hidden_cvr = slim.fully_connected(inputs, 512, scope=('hidden_cvr_' + str(0))) hidden_cvr = tf.concat([hidden, hidden_cvr], axis=1) hidden_cvr = slim.fully_connected(hidden_cvr, 256, scope=('hidden_cvr_' + str(1))) hidden_cvr = slim.fully_connected(hidden_cvr, 128, scope=('hidden_cvr_' + str(2))) logit_ctr = slim.fully_connected(hidden_ctr, 1, activation_fn=None, scope='logit_ctr') logit_cvr = slim.fully_connected(hidden_cvr, 1, activation_fn=None, scope='logit_cvr') return (logit_ctr, logit_cvr) def _build_upper_part_graph_saved_pb(self, inputs): hidden = inputs mask_dic = self._load_masks(self._save_task_id, self.masks_dir) for (i, size) in enumerate(self._hidden_sizes): mask_name = (('fc_' + str(i)) + '_m') key = (((self._scope_name + '/fc_') + str(i)) + '_w:0') print(key) m = mask_dic[key] mask = tf.constant(m, dtype=tf.float32, shape=(hidden.shape[1], size), name=mask_name) hidden = self._dense_layer(name=('fc_' + str(i)), inputs=hidden, units=size, mask=mask, activation=tf.nn.relu) return slim.fully_connected(hidden, 1, activation_fn=None, scope='logit') def _build_cross_interaction(self, inputs): cross = tf.matmul(inputs, tf.transpose(inputs, perm=[0, 2, 1])) cross = tf.reshape(cross, [(- 1), (cross.get_shape()[1] * cross.get_shape()[2])]) flatten = tf.reshape(inputs, [(- 1), (inputs.get_shape()[1] * inputs.get_shape()[2])]) outputs = tf.concat([cross, flatten], axis=1) return outputs def _build_categorical_part(self, inputs): return [tf.squeeze(inputs[name], axis=1) for name in self._categorical_features] def _build_numerical_part(self, inputs): outputs = [] if (len(self._numerical_features) != 0): h0 = tf.concat([inputs[name] for name in self._numerical_features], axis=1) h1 = slim.fully_connected(h0, 128, scope='numerical_fc1') h2 = slim.fully_connected(h1, 64, scope='numerical_fc2') outputs = slim.fully_connected(h2, 32, scope='numerical_fc3') return [outputs] def _build_multivalue_part(self, inputs): def pooling(vals, ptype, fea_name): if (ptype == 'mean'): return tf.reduce_mean(vals, axis=1) elif (ptype == 'sum'): return tf.reduce_sum(vals, axis=1) elif (ptype == 'fc'): return tf.squeeze(slim.fully_connected(tf.transpose(vals, [0, 2, 1]), 1, scope=('multivalue_fc_%s' % fea_name))) outputs = [pooling(inputs[name], self._ptype, name) for name in self._multivalue_features] return outputs def _build_attention_part(self, inputs): def attention(key, vals): weight = tf.reduce_sum(tf.multiply(key, vals), axis=2) sum_weight = tf.reduce_sum(weight, axis=1, keepdims=True) norm_weight = (weight / sum_weight) return tf.reduce_sum(tf.multiply(tf.expand_dims(norm_weight, axis=2), vals), axis=1) outputs = [attention(inputs[names[0]], inputs[names[1]]) for names in self._attention_features] return outputs def _load_masks(self, task_id, masks_dir): import os import pickle masks_dir = masks_dir masks_path = [os.path.join(masks_dir, f) for f in os.listdir(masks_dir) if (not f.startswith('init'))] masks_path = list(sorted(filter((lambda f: os.path.isfile(f)), masks_path), key=(lambda s: int(os.path.basename(s).split('.pkl')[0])))) masks = [] print('loading masks') for path in masks_path: with open(path, 'rb') as f: dump = pickle.load(f) assert (('mask' in dump) and ('pruning_time' in dump)) print('loading pruning_time {}'.format(dump['pruning_time'])) masks.append(dump['mask']) assert (len(masks) == len(masks_path)) mask = masks[task_id] return mask
class FakeSongsMenuPlugin(SongsMenuPlugin): PLUGIN_NAME = 'Fake Songs Menu Plugin' PLUGIN_ID = 'SongsMunger' MAX_INVOCATIONS = 50 def __init__(self, songs, library): super().__init__(songs, library) self.total = 0 def plugin_song(self, song): self.total += 1 if (self.total > self.MAX_INVOCATIONS): raise ValueError(("Shouldn't have called me on this many songs (%d > %d)" % (self.total, self.MAX_INVOCATIONS)))
.parametrize('enabled', [True, False]) def test_auto_input_impedance_enabled(enabled): with expected_protocol(HP34401A, [('INP:IMP:AUTO?', ('1' if enabled else '0')), (f'INP:IMP:AUTO {(1 if enabled else 0)}', None)]) as inst: assert (enabled == inst.auto_input_impedance_enabled) inst.auto_input_impedance_enabled = enabled
class PwnLookup(object): def __init__(self, spi1, spi2, dc=4, cs1=16, rst=17, cs2=5, rotation=270): self.display = Display(spi1, dc=Pin(dc), cs=Pin(cs1), rst=Pin(rst), width=320, height=240, rotation=rotation) self.unispace = XglcdFont('fonts/Unispace12x24.c', 12, 24) self.keyboard = TouchKeyboard(self.display, self.unispace) self.xpt = Touch(spi2, cs=Pin(cs2), int_pin=Pin(0), int_handler=self.touchscreen_press) self.wlan = WLAN(STA_IF) def lookup(self, pwd): sha1pwd = sha1(pwd.encode('utf-8')).digest() sha1pwd = hexlify(sha1pwd).upper().decode('utf-8') (head, tail) = (sha1pwd[:5], sha1pwd[5:]) if (not self.wlan.isconnected()): raise IOError('WiFi network error') hits = 0 gc.collect() with get((' + head)) as response: for line in response.iter_lines(): l = line.decode(response.encoding).split(':') if (l[0] == tail): hits = int(l[1]) break gc.collect() return hits def touchscreen_press(self, x, y): if (self.keyboard.handle_keypress(x, y, debug=False) is True): self.keyboard.locked = True pwd = self.keyboard.kb_text self.keyboard.show_message('Searching...', color565(0, 0, 255)) try: hits = self.lookup(pwd) if hits: msg = 'PASSWORD HITS: {0}'.format(hits) self.keyboard.show_message(msg, color565(255, 0, 0)) else: msg = 'PASSWORD NOT FOUND' self.keyboard.show_message(msg, color565(0, 255, 0)) except Exception as e: if hasattr(e, 'message'): self.keyboard.show_message(e.message[:22], color565(255, 255, 255)) else: self.keyboard.show_message(str(e)[:22], color565(255, 255, 255)) self.keyboard.waiting = True self.keyboard.locked = False
.mosaiqdb def test_get_patient_name(connection: pymedphys.mosaiq.Connection): mocks.create_mock_patients() result_all = pymedphys.mosaiq.execute(connection, '\n SELECT\n Pat_Id1,\n First_Name,\n Last_Name\n FROM Patient\n ') for patient in result_all: (pat_id1, first_name, last_name) = patient print(f'Pat_ID1:{pat_id1} First Name:{first_name} Last Name:{last_name}') assert (len(result_all) == 3) moe_patient_name = helpers.get_patient_name(connection, 'MR8002') assert (moe_patient_name == 'HOWARD, Moe')
class F19_Realm(KickstartCommand): removedKeywords = KickstartCommand.removedKeywords removedAttrs = KickstartCommand.removedAttrs def __init__(self, writePriority=0, *args, **kwargs): KickstartCommand.__init__(self, writePriority, *args, **kwargs) self.join_realm = None self.join_args = [] self.discover_options = [] def _parseArguments(self, string): if self.join_realm: raise KickstartParseError(_("The realm command 'join' should only be specified once"), lineno=self.lineno) args = shlex.split(string) if (not args): raise KickstartParseError(_('Missing realm command arguments'), lineno=self.lineno) command = args.pop(0) if (command == 'join'): self._parseJoin(args) else: raise KickstartParseError((_("Unsupported realm '%s' command") % command), lineno=self.lineno) def _parseJoin(self, args): try: (opts, remaining) = getopt.getopt(args, '', ('client-software=', 'server-software=', 'membership-software=', 'one-time-password=', 'no-password', 'computer-ou=')) except getopt.GetoptError as ex: raise KickstartParseError((_('Invalid realm arguments: %s') % ex), lineno=self.lineno) if (len(remaining) != 1): raise KickstartParseError(_('Specify only one realm to join'), lineno=self.lineno) self.join_realm = remaining[0] self.join_args = args self.discover_options = [] supported_discover_options = ('--client-software', '--server-software', '--membership-software') for (o, a) in opts: if (o in supported_discover_options): self.discover_options.append(('%s=%s' % (o, a))) def _getCommandsAsStrings(self): commands = [] if self.join_args: args = [shlex.quote(arg) for arg in self.join_args] commands.append(('realm join ' + ' '.join(args))) return commands def __str__(self): retval = KickstartCommand.__str__(self) commands = self._getCommandsAsStrings() if commands: retval += '# Realm or domain membership\n' retval += '\n'.join(commands) retval += '\n' return retval def parse(self, args): self._parseArguments(self.currentLine[len(self.currentCmd):].strip()) return self def _getParser(self): return KSOptionParser(prog='realm', description='define an Active Directory realm to join', version=F19)
def test_reuters(): random.seed(time.time()) if (random.random() > 0.8): ((x_train, y_train), (x_test, y_test)) = reuters.load_data() assert (len(x_train) == len(y_train)) assert (len(x_test) == len(y_test)) assert ((len(x_train) + len(x_test)) == 11228) ((x_train, y_train), (x_test, y_test)) = reuters.load_data(maxlen=10) assert (len(x_train) == len(y_train)) assert (len(x_test) == len(y_test)) word_index = reuters.get_word_index() assert isinstance(word_index, dict)
def test_from_recap_with_cyclic_reference(): converter = ProtobufConverter() linked_list_node_type = StructType(fields=[IntType(bits=32, name='value'), ProxyType(alias='build.recap.LinkedListNode', registry=converter.registry, name='next')], alias='build.recap.LinkedListNode') result = converter.from_recap(linked_list_node_type) assert isinstance(result, ast.File) assert (len(result.file_elements) == 2) package = result.file_elements[0] assert isinstance(package, ast.Package) assert (package.name == 'build.recap') message = result.file_elements[1] assert isinstance(message, ast.Message) assert (message.name == 'LinkedListNode') assert (len(message.elements) == 2) value_field = message.elements[0] assert isinstance(value_field, ast.Field) assert (value_field.name == 'value') assert (value_field.type == 'int32') assert (value_field.number == 1) next_field = message.elements[1] assert isinstance(next_field, ast.Field) assert (next_field.name == 'next') assert (next_field.type == '.build.recap.LinkedListNode') assert (next_field.number == 2)
def process_kym_files(kym_phashes_file): kym_phashes_by_meme_dic = {} kym_images_dic = {} kym_images_dic_reverse = {} kym_meme_name = {} print('[i] process_kym_files', kym_phashes_file) with open(kym_phashes_file) as fd: for (idx, line) in enumerate(fd.readlines()): if (idx == 0): continue split = line.split() image = split[0] m_class = split[1] proba = split[2] phash = split[3] image_name = image[image.index('/'):] meme_name = image_name.split('_')[0] if (not meme_name): continue if ((m_class == 'relevant') and (float(proba) > 0.8)): continue if (meme_name not in kym_phashes_by_meme_dic): kym_phashes_by_meme_dic[meme_name] = [] kym_phashes_by_meme_dic[meme_name].append(phash) kym_images_dic[phash] = image kym_images_dic_reverse[image] = phash kym_meme_name[phash] = meme_name return (kym_phashes_by_meme_dic, kym_images_dic, kym_images_dic_reverse, kym_meme_name)
class MasterIfcRTL(Interface): def construct(s, ReqType, RespType): s.ReqType = ReqType s.RespType = RespType s.req = RecvIfcRTL(Type=ReqType) s.resp = SendIfcRTL(Type=RespType) def __str__(s): return f'{s.req}|{s.resp}' def connect(s, other, parent): if isinstance(other, MinionIfcCL): m = ValRdyMasterMinionRTL2CLAdapter(other.ReqType, other.RespType) if hasattr(parent, 'ValRdyMasterMinionRTL2CLAdapter_count'): count = parent.XcelIfcFL2RTL_count setattr(parent, ('ValRdyMasterMinionRTL2CLAdapter_count' + str(count)), m) else: parent.ValRdyMasterMinionRTL2CLAdapter_count = 0 parent.ValRdyMasterMinionRTL2CLAdapter_0 = m s //= m.left m.right //= other parent.ValRdyMasterMinionRTL2CLAdapter_count += 1 return True return False
class ModelArguments(): model_name_or_path: Optional[str] = field(default=None, metadata={'help': "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) config_overrides: Optional[str] = field(default=None, metadata={'help': 'Override some existing default config settings when a model is trained from scratch. Example: n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'}) tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `huggingface-cli login` (necessary to use this script with private models).'}) low_cpu_mem_usage: bool = field(default=False, metadata={'help': 'It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded.set True will benefit LLM loading time and RAM consumption.'}) def __post_init__(self): if ((self.config_overrides is not None) and ((self.config_name is not None) or (self.model_name_or_path is not None))): raise ValueError("--config_overrides can't be used in combination with --config_name or --model_name_or_path")
class DbmsLob(DirectoryManagement): def __init__(self, args): logging.debug('DbmsLob object created') DirectoryManagement.__init__(self, args) self.__setDirectoryName__() def getFile(self, remotePath, remoteNameFile, localFile): data = '' logging.info('Copy the {0} remote file (stored in {1}) to {2}'.format(remoteNameFile, remotePath, localFile)) DBMS_LOB_GET_FILE = "\n\t\tDECLARE\t\n -- Pointer to the BFILE\n l_loc BFILE;\n -- Current position in the file (file begins at position 1)\n l_pos NUMBER := 1;\n -- Amount of characters to read\n l_sum BINARY_INTEGER;\n -- Read Buffer\n l_buf VARCHAR2(32767);\n\t\t\tl_stat\t\tBINARY_INTEGER := 16383;\n BEGIN\n l_loc := BFILENAME('{0}','{1}');\n DBMS_LOB.OPEN(l_loc,DBMS_LOB.LOB_READONLY);\n\t\t\tl_sum := dbms_lob.getlength(l_loc);\n\t\t\tLOOP\n\t\t\tIF (l_sum < 16383) THEN\n\t\t\t\tDBMS_LOB.READ(l_loc,l_sum,l_pos,l_buf);\n\t\t\t\tdbms_output.put_line(UTL_RAW.CAST_TO_VARCHAR2(l_buf)); \t\n\t\t\t\tEXIT;\n\t\t\tEND IF;\n\t\t\tl_sum := l_sum - 16383;\n\t\t\tDBMS_LOB.READ(l_loc,l_stat,l_pos,l_buf);\n\t\t\tl_pos := l_pos + 16383;\n\t\t\tdbms_output.put_line(UTL_RAW.CAST_TO_VARCHAR2(l_buf));\n END LOOP;\n DBMS_LOB.CLOSE(l_loc);\n END;\n\t\t" isFileExist = self.getFileExist(remotePath, remoteNameFile) if (isFileExist == True): status = self.__createOrRemplaceDirectory__(remotePath) if isinstance(status, Exception): return status cursor = cx_Oracle.Cursor(self.args['dbcon']) cursor.callproc('dbms_output.enable') try: cursor.execute(DBMS_LOB_GET_FILE.format(self.directoryName, remoteNameFile)) except Exception as e: logging.info('Impossible to execute the query `{0}`: {1}'.format(DBMS_LOB_GET_FILE, self.cleanError(e))) self.__dropDirectory__() return ErrorSQLRequest(e) else: statusVar = cursor.var(cx_Oracle.NUMBER) lineVar = cursor.var(cx_Oracle.STRING) while True: cursor.callproc('dbms_output.get_line', (lineVar, statusVar)) if (statusVar.getvalue() != 0): break line = lineVar.getvalue() if (line == None): line = '' data += line logging.info(repr(line)) cursor.close() elif (isFileExist == False): data = False else: data = isFileExist self.__dropDirectory__() return data def getFileExist(self, remotePath, remoteNameFile): (exist, returnedValue) = (False, False) logging.info('Test if the {1}{0} file exists'.format(remoteNameFile, remotePath)) self.__setDirectoryName__() status = self.__createOrRemplaceDirectory__(remotePath) if isinstance(status, Exception): return status DBMS_LOB_FILE_EXISTS = "DECLARE l_loc BFILE; l_ret BOOLEAN := FALSE; BEGIN l_loc := BFILENAME('{0}','{1}'); l_ret := DBMS_LOB.FILEEXISTS(l_loc) = 1; IF (l_ret) THEN dbms_output.put_line('True'); ELSE dbms_output.put_line('False'); END IF;END;" cursor = cx_Oracle.Cursor(self.args['dbcon']) try: cursor.callproc('dbms_output.enable') try: cursor.execute(DBMS_LOB_FILE_EXISTS.format(self.directoryName, remoteNameFile)) except Exception as e: logging.info('Impossible to execute the query `{0}`: {1}'.format(DBMS_LOB_FILE_EXISTS, self.cleanError(e))) returnedValue = ErrorSQLRequest(e) else: statusVar = cursor.var(cx_Oracle.NUMBER) lineVar = cursor.var(cx_Oracle.STRING) cursor.callproc('dbms_output.get_line', (lineVar, statusVar)) if (statusVar.getvalue() != 0): returnedValue = False line = lineVar.getvalue() if (line == None): line = '' if ('True' in line): logging.debug('The file exist: good news') returnedValue = True elif ('False' in line): logging.debug("The file doesn't exist") returnedValue = False else: logging.warning("Can't know if the file exist. There is an error: {0}".format(line)) returnedValue = ErrorSQLRequest(line) cursor.close() except Exception as e: returnedValue = ErrorSQLRequest(e) self.__dropDirectory__() return returnedValue def testAll(self): folder = self.__generateRandomString__() self.args['print'].subtitle('DBMS_LOB to read files ?') logging.info('Simulate the file reading in the {0} folder thanks to DBMS_LOB'.format(folder)) status = self.getFile(remotePath=folder, remoteNameFile='data.txt', localFile='test.txt') if ((status == True) or (status == False)): self.args['print'].goodNews('OK') else: self.args['print'].badNews('KO')
class MultiEncodingWeights(WeightLayer): def __init__(self, weight_mode: str, init='glorot_uniform'): self.weight_mode = weight_mode self.init = init def apply(self, is_train, x, mask=None): init = get_keras_initialization(self.init) keys_shape = x.shape.as_list() if (self.weight_mode == 'per_encoding'): with tf.variable_scope('weighting'): weights = tf.get_variable('weights', shape=[keys_shape[1], keys_shape[2]], initializer=init) biases = tf.get_variable('biases', shape=[keys_shape[1]], initializer=tf.zeros_initializer()) unnormalized_alphas = (tf.einsum('btd,td->bt', x, weights) + biases) normalized_alphas = tf.nn.softmax(unnormalized_alphas, axis=(- 1)) elif (self.weight_mode == 'fully_connected'): with tf.variable_scope('weighting'): flattened = tf.layers.flatten(x) unnormalized_alphas = fully_connected(flattened, units=keys_shape[1], kernel_initializer=init) normalized_alphas = tf.nn.softmax(unnormalized_alphas, axis=(- 1)) elif (self.weight_mode == 'mlp'): with tf.variable_scope('weighting'): flattened = tf.layers.flatten(x) flattened = fully_connected(flattened, units=512, kernel_initializer=init, activation=tf.nn.relu) unnormalized_alphas = fully_connected(flattened, units=keys_shape[1], kernel_initializer=init) normalized_alphas = tf.nn.softmax(unnormalized_alphas, axis=(- 1)) else: raise NotImplementedError() return normalized_alphas
def configureLogging2(args): logformatNoColor = '%(levelname)-3s -: %(message)s' datefmt = '%H:%M:%S' if ('verbose' in args): if (args['verbose'] == 0): level = logging.WARNING elif (args['verbose'] == 1): level = logging.INFO elif (args['verbose'] == 2): level = logging.DEBUG elif (args['verbose'] > 2): level = logging.DEBUG else: level = level = logging.WARNING logging.basicConfig(level=level, format=logformatNoColor, datefmt=datefmt) root = logging.getLogger() root.setLevel(level) hdlr = root.handlers[0] formatter = logging.Formatter(logformatNoColor, datefmt=datefmt) hdlr.setFormatter(formatter)
class LoginRequiredMixin(DjangoLoginRequiredMixin): redirect_unauthenticated_users = True def handle_no_permission(self): response = redirect_to_login(self.request.get_full_path(), self.get_login_url(), self.get_redirect_field_name()) if self.raise_exception: if (self.redirect_unauthenticated_users and (not self.request.user.is_authenticated)): return response raise PermissionDenied(self.get_permission_denied_message()) return response
def get_local_addresses(): global retries addresses = [] if retries: try: from netifaces import interfaces, ifaddresses for interface in interfaces(): addrs = ifaddresses(interface) for i in addrs: if ('addr' in i): addresses.append(i['addr']) return addresses except Exception as e: retries -= 1 interfaces = os.listdir('/sys/class/net') for interface in interfaces: try: addresses.append(socket.inet_ntoa(fcntl.ioctl(zerosocket.fileno(), 35093, struct.pack('256s', bytes(interface[:15], 'utf-8')))[20:24])) except: pass return addresses
def test(rtlsdrtcp, use_numpy): from utils import generic_test port = 1235 while True: try: server = rtlsdrtcp.RtlSdrTcpServer(port=port) server.run() except socket.error as e: if (e.errno != errno.EADDRINUSE): raise server = None port += 1 if (server is not None): print('server running on port {0}'.format(port)) break client = rtlsdrtcp.RtlSdrTcpClient(port=port) try: generic_test(client, test_async=False, test_exceptions=False, use_numpy=use_numpy) with pytest.raises(NotImplementedError): generic_test(client, test_async=True, test_exceptions=False, use_numpy=use_numpy) finally: server.close()
def my_status(task): if task['disabled']: return u'' if task['last_failed_count']: return (u'%d,...' % task['last_failed_count']) if ((task['last_failed'] or 0) > (task['last_success'] or 0)): return u'' if ((task['success_count'] == 0) and (task['failed_count'] == 0) and task['next'] and ((task['next'] - time.time()) < 60)): return u'' return u''
class HorizontalLineDecorator(ChartDecorator, SimpleLegendItem): def __init__(self, y: ScalarType, color: str='k', key: str=None, **plot_settings: Any): ChartDecorator.__init__(self, key) SimpleLegendItem.__init__(self) self._y = y self._color = color self.plot_settings = plot_settings def decorate(self, chart: 'Chart') -> None: self.legend_artist = chart.axes.axhline(y=self._y, color=self._color, **self.plot_settings) def decorate_html(self, chart: 'Chart', chart_id: str) -> str: template = Template('\n if (decorator_options.yAxis.plotLines === undefined) {\n decorator_options.yAxis.plotLines = [];\n }\n\n decorator_options.yAxis.plotLines.push({\n color: "{{ color }}",\n dashStyle: "{{ dash_style }}",\n value: {{ value }},\n width: 2,\n {% for key, value in plot_settings.items(): %}\n {% if key in ["label"]: %}\n {{ key }}: {{ value }},\n {% endif %}\n {% endfor %}\n });\n ') color = HIGHCHART_COLORS.get(self._color, self._color) dash_style = HIGHCHART_DASH_STYLES.get(self.plot_settings['linestyle'], '') return template.render(value=self._y, color=color, dash_style=dash_style, decorator_id=self.key)
def td_format(td_object): seconds = int(td_object.total_seconds()) periods = [('y', (((60 * 60) * 24) * 365)), ('m', (((60 * 60) * 24) * 30)), ('d', ((60 * 60) * 24)), ('h', (60 * 60)), ('m', 60), ('s', 1)] ret = '' for (period_name, period_seconds) in periods: if (seconds > period_seconds): (period_value, seconds) = divmod(seconds, period_seconds) ret += f'{period_value}{period_name}' return ret
def ElemwiseOpTime(N, script=False, loops=1000): x = vector('x') np.random.seed(1235) v = np.random.random(N).astype(config.floatX) f = pytensor.function([x], ((2 * x) + (x * x))) f1 = pytensor.function([x], tanh(x)) f.trust_input = True f1.trust_input = True if (not script): if config.openmp: print('With openmp:') print('Fast op ', end=' ') ceapTime = evalTime(f, v, script=script, loops=loops) if (not script): print('Slow op ', end=' ') costlyTime = evalTime(f1, v, script=script, loops=loops) return (ceapTime, costlyTime)
class SSSResponseControl(ResponseControl): controlType = '1.2.840.113556.1.4.474' def __init__(self, criticality=False): ResponseControl.__init__(self, self.controlType, criticality) def decodeControlValue(self, encoded): (p, rest) = decoder.decode(encoded, asn1Spec=SortResultType()) assert (not rest), 'all data could not be decoded' sort_result = p.getComponentByName('sortResult') self.sortResult = int(sort_result) attribute_type = p.getComponentByName('attributeType') if attribute_type.hasValue(): self.attributeType = attribute_type else: self.attributeType = None self.result = self.sortResult self.attribute_type_error = self.attributeType
class SEModule(nn.Module): def __init__(self, channels, rd_ratio=(1.0 / 16), rd_channels=None, rd_divisor=8, add_maxpool=False, bias=True, act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): super(SEModule, self).__init__() self.add_maxpool = add_maxpool if (not rd_channels): rd_channels = make_divisible((channels * rd_ratio), rd_divisor, round_limit=0.0) self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=bias) self.bn = (norm_layer(rd_channels) if norm_layer else nn.Identity()) self.act = create_act_layer(act_layer, inplace=True) self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((2, 3), keepdim=True) if self.add_maxpool: x_se = ((0.5 * x_se) + (0.5 * x.amax((2, 3), keepdim=True))) x_se = self.fc1(x_se) x_se = self.act(self.bn(x_se)) x_se = self.fc2(x_se) return (x * self.gate(x_se))
class TableWidget(QtWidgets.QTableWidget): def __init__(self, *args, **kwds): QtWidgets.QTableWidget.__init__(self, *args) self.itemClass = TableWidgetItem self.setVerticalScrollMode(self.ScrollMode.ScrollPerPixel) self.setSelectionMode(QtWidgets.QAbstractItemView.SelectionMode.ContiguousSelection) self.setSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Preferred) self.clear() kwds.setdefault('sortable', True) kwds.setdefault('editable', False) self.setEditable(kwds.pop('editable')) self.setSortingEnabled(kwds.pop('sortable')) if (len(kwds) > 0): raise TypeError(("Invalid keyword arguments '%s'" % list(kwds.keys()))) self._sorting = None self._formats = {None: None} self.sortModes = {} self.itemChanged.connect(self.handleItemChanged) self.contextMenu = QtWidgets.QMenu() self.contextMenu.addAction(translate('TableWidget', 'Copy Selection')).triggered.connect(self.copySel) self.contextMenu.addAction(translate('TableWidget', 'Copy All')).triggered.connect(self.copyAll) self.contextMenu.addAction(translate('TableWidget', 'Save Selection')).triggered.connect(self.saveSel) self.contextMenu.addAction(translate('TableWidget', 'Save All')).triggered.connect(self.saveAll) def clear(self): QtWidgets.QTableWidget.clear(self) self.verticalHeadersSet = False self.horizontalHeadersSet = False self.items = [] self.setRowCount(0) self.setColumnCount(0) self.sortModes = {} def setData(self, data): self.clear() self.appendData(data) self.resizeColumnsToContents() _defersort def appendData(self, data): startRow = self.rowCount() (fn0, header0) = self.iteratorFn(data) if (fn0 is None): self.clear() return it0 = fn0(data) try: first = next(it0) except StopIteration: return (fn1, header1) = self.iteratorFn(first) if (fn1 is None): self.clear() return firstVals = [x for x in fn1(first)] self.setColumnCount(len(firstVals)) if ((not self.verticalHeadersSet) and (header0 is not None)): labels = [self.verticalHeaderItem(i).text() for i in range(self.rowCount())] self.setRowCount((startRow + len(header0))) self.setVerticalHeaderLabels((labels + header0)) self.verticalHeadersSet = True if ((not self.horizontalHeadersSet) and (header1 is not None)): self.setHorizontalHeaderLabels(header1) self.horizontalHeadersSet = True i = startRow self.setRow(i, firstVals) for row in it0: i += 1 self.setRow(i, [x for x in fn1(row)]) if (self._sorting and self.horizontalHeadersSet and (self.horizontalHeader().sortIndicatorSection() >= self.columnCount())): self.sortByColumn(0, QtCore.Qt.SortOrder.AscendingOrder) def setEditable(self, editable=True): self.editable = editable for item in self.items: item.setEditable(editable) def setFormat(self, format, column=None): if ((format is not None) and (not isinstance(format, str)) and (not callable(format))): raise ValueError(('Format argument must string, callable, or None. (got %s)' % format)) self._formats[column] = format if (column is None): for c in range(self.columnCount()): if (self._formats.get(c, None) is None): for r in range(self.rowCount()): item = self.item(r, c) if (item is None): continue item.setFormat(format) else: if (format is None): format = self._formats[None] for r in range(self.rowCount()): item = self.item(r, column) if (item is None): continue item.setFormat(format) def iteratorFn(self, data): if (isinstance(data, list) or isinstance(data, tuple)): return ((lambda d: d.__iter__()), None) elif isinstance(data, dict): return ((lambda d: iter(d.values())), list(map(str, data.keys()))) elif (hasattr(data, 'implements') and data.implements('MetaArray')): if data.axisHasColumns(0): header = [str(data.columnName(0, i)) for i in range(data.shape[0])] elif data.axisHasValues(0): header = list(map(str, data.xvals(0))) else: header = None return (self.iterFirstAxis, header) elif isinstance(data, np.ndarray): return (self.iterFirstAxis, None) elif isinstance(data, np.void): return (self.iterate, list(map(str, data.dtype.names))) elif (data is None): return (None, None) elif np.isscalar(data): return (self.iterateScalar, None) else: msg = "Don't know how to iterate over data type: {!s}".format(type(data)) raise TypeError(msg) def iterFirstAxis(self, data): for i in range(data.shape[0]): (yield data[i]) def iterate(self, data): for x in data: (yield x) def iterateScalar(self, data): (yield data) def appendRow(self, data): self.appendData([data]) _defersort def addRow(self, vals): row = self.rowCount() self.setRowCount((row + 1)) self.setRow(row, vals) _defersort def setRow(self, row, vals): if (row > (self.rowCount() - 1)): self.setRowCount((row + 1)) for col in range(len(vals)): val = vals[col] item = self.itemClass(val, row) item.setEditable(self.editable) sortMode = self.sortModes.get(col, None) if (sortMode is not None): item.setSortMode(sortMode) format = self._formats.get(col, self._formats[None]) item.setFormat(format) self.items.append(item) self.setItem(row, col, item) item.setValue(val) def setSortMode(self, column, mode): for r in range(self.rowCount()): item = self.item(r, column) if hasattr(item, 'setSortMode'): item.setSortMode(mode) self.sortModes[column] = mode def sizeHint(self): width = sum((self.columnWidth(i) for i in range(self.columnCount()))) width += self.verticalHeader().sizeHint().width() width += self.verticalScrollBar().sizeHint().width() width += (self.frameWidth() * 2) height = sum((self.rowHeight(i) for i in range(self.rowCount()))) height += self.verticalHeader().sizeHint().height() height += self.horizontalScrollBar().sizeHint().height() return QtCore.QSize(width, height) def serialize(self, useSelection=False): if useSelection: selection = self.selectedRanges()[0] rows = list(range(selection.topRow(), (selection.bottomRow() + 1))) columns = list(range(selection.leftColumn(), (selection.rightColumn() + 1))) else: rows = list(range(self.rowCount())) columns = list(range(self.columnCount())) data = [] if self.horizontalHeadersSet: row = [] if self.verticalHeadersSet: row.append('') for c in columns: row.append(self.horizontalHeaderItem(c).text()) data.append(row) for r in rows: row = [] if self.verticalHeadersSet: row.append(self.verticalHeaderItem(r).text()) for c in columns: item = self.item(r, c) if (item is not None): row.append(str(item.value)) else: row.append('') data.append(row) s = '' for row in data: s += ('\t'.join(row) + '\n') return s def copySel(self): QtWidgets.QApplication.clipboard().setText(self.serialize(useSelection=True)) def copyAll(self): QtWidgets.QApplication.clipboard().setText(self.serialize(useSelection=False)) def saveSel(self): self.save(self.serialize(useSelection=True)) def saveAll(self): self.save(self.serialize(useSelection=False)) def save(self, data): (fileName, _) = QtWidgets.QFileDialog.getSaveFileName(self, f"{translate('TableWidget', 'Save As')}...", '', f"{translate('TableWidget', 'Tab-separated values')} (*.tsv)") if (not fileName): return with open(fileName, 'w') as fd: fd.write(data) def contextMenuEvent(self, ev): self.contextMenu.popup(ev.globalPos()) def keyPressEvent(self, ev): if ev.matches(QtGui.QKeySequence.StandardKey.Copy): ev.accept() self.copySel() else: super().keyPressEvent(ev) def handleItemChanged(self, item): item.itemChanged()
def evaluate_stack(s): (op, num_args) = (s.pop(), 0) if isinstance(op, tuple): (op, num_args) = op if (op == 'unary -'): return (- evaluate_stack(s)) if (op in '+-*/^'): op2 = evaluate_stack(s) op1 = evaluate_stack(s) return opn[op](op1, op2) elif (op == 'PI'): return math.pi elif (op == 'E'): return math.e elif (op in fn): args = reversed([evaluate_stack(s) for _ in range(num_args)]) return fn[op](*args) elif op[0].isalpha(): raise Exception(("invalid identifier '%s'" % op)) else: try: return int(op) except ValueError: return float(op)
class XLISCalendarTestCase(EuronextCalendarTestBase, TestCase): answer_key_filename = 'xlis' calendar_class = XLISExchangeCalendar MAX_SESSION_HOURS = 8.5 TIMEDELTA_TO_NORMAL_CLOSE = pd.Timedelta(hours=16, minutes=30) TIMEDELTA_TO_EARLY_CLOSE = pd.Timedelta(hours=13, minutes=5) TZ = 'Europe/Lisbon' def test_old_holidays(self): all_sessions = self.calendar.all_sessions expected_holidays = [pd.Timestamp('2002-02-12', tz=UTC), pd.Timestamp('2002-05-30', tz=UTC), pd.Timestamp('2002-04-25', tz=UTC), pd.Timestamp('2002-06-10', tz=UTC), pd.Timestamp('2001-06-13', tz=UTC), pd.Timestamp('2002-08-15', tz=UTC), pd.Timestamp('2001-10-05', tz=UTC), pd.Timestamp('2002-11-01', tz=UTC), pd.Timestamp('2000-12-01', tz=UTC), pd.Timestamp('2000-12-08', tz=UTC), pd.Timestamp('2002-12-24', tz=UTC)] for session_label in expected_holidays: self.assertNotIn(session_label, all_sessions) expected_sessions = [pd.Timestamp('2003-03-04', tz=UTC), pd.Timestamp('2003-06-16', tz=UTC), pd.Timestamp('2003-04-25', tz=UTC), pd.Timestamp('2003-06-10', tz=UTC), pd.Timestamp('2002-06-13', tz=UTC), pd.Timestamp('2003-08-15', tz=UTC), pd.Timestamp('2004-10-05', tz=UTC), pd.Timestamp('2004-11-01', tz=UTC), pd.Timestamp('2003-12-01', tz=UTC), pd.Timestamp('2003-12-08', tz=UTC), pd.Timestamp('2003-12-24', tz=UTC)] for session_label in expected_sessions: self.assertIn(session_label, all_sessions)
class Meter(object): def __init__(self, name, val, avg): self.name = name self.val = val self.avg = avg def __repr__(self): return '{name}: {val:.6f} ({avg:.6f})'.format(name=self.name, val=self.val, avg=self.avg) def __format__(self, *tuples, **kwargs): return self.__repr__()
class AccountSessionHandler(object): def __init__(self, account): self.account = account def get(self, sessid=None): global _SESSIONS if (not _SESSIONS): from evennia.server.sessionhandler import SESSIONS as _SESSIONS if sessid: return make_iter(_SESSIONS.session_from_account(self.account, sessid)) else: return _SESSIONS.sessions_from_account(self.account) def all(self): return self.get() def count(self): return len(self.get())
_module class RepeatDataset(object): def __init__(self, dataset, times): self.dataset = dataset self.times = times self.CLASSES = dataset.CLASSES if hasattr(self.dataset, 'flag'): self.flag = np.tile(self.dataset.flag, times) self._ori_len = len(self.dataset) def __getitem__(self, idx): return self.dataset[(idx % self._ori_len)] def __len__(self): return (self.times * self._ori_len)
class DCSourceGenerator(SourceGenerator): depth_min = Float.T(default=0.0) depth_max = Float.T(default=(30 * km)) strike = Float.T(optional=True) dip = Float.T(optional=True) rake = Float.T(optional=True) perturbation_angle_std = Float.T(optional=True) def get_source(self, ievent): rstate = self.get_rstate(ievent) time = (self.time_min + rstate.uniform(0.0, float((self.time_max - self.time_min)))) (lat, lon, north_shift, east_shift, depth) = self.get_coordinates(ievent) depth = rstate.uniform(self.depth_min, self.depth_max) magnitude = self.draw_magnitude(rstate) if ((self.strike is None) and (self.dip is None) and (self.rake is None)): mt = moment_tensor.MomentTensor.random_dc(x=rstate.uniform(size=3)) else: if (None in (self.strike, self.dip, self.rake)): raise ScenarioError('DCSourceGenerator: strike, dip, and rake must be used in combination.') mt = moment_tensor.MomentTensor(strike=self.strike, dip=self.dip, rake=self.rake) if (self.perturbation_angle_std is not None): mt = mt.random_rotated(self.perturbation_angle_std, rstate=rstate) ((s, d, r), (_, _, _)) = mt.both_strike_dip_rake() source = gf.DCSource(name=('ev%04i' % ievent), time=util.to_time_float(time), lat=float(lat), lon=float(lon), north_shift=float(north_shift), east_shift=float(east_shift), depth=float(depth), magnitude=float(magnitude), strike=float(s), dip=float(d), rake=float(r)) return source def add_map_artists(self, automap): from pyrocko.plot import gmtpy for source in self.get_sources(): event = source.pyrocko_event() mt = event.moment_tensor.m_up_south_east() xx = (num.trace(mt) / 3.0) mc = num.array([[xx, 0.0, 0.0], [0.0, xx, 0.0], [0.0, 0.0, xx]]) mc = (mt - mc) mc = ((mc / event.moment_tensor.scalar_moment()) * moment_tensor.magnitude_to_moment(5.0)) m6 = tuple(moment_tensor.to6(mc)) symbol_size = 20.0 automap.gmt.psmeca(*automap.jxyr, S=('%s%g' % ('d', (symbol_size / gmtpy.cm))), in_rows=[(((source.effective_lon, source.effective_lat, 10) + m6) + (1, 0, 0))], M=True)
def linear_dequantize(input, scale, zero_point, inplace=False): if (len(input.shape) == 4): scale = scale.view((- 1), 1, 1, 1) zero_point = zero_point.view((- 1), 1, 1, 1) elif (len(input.shape) == 2): scale = scale.view((- 1), 1) zero_point = zero_point.view((- 1), 1) if inplace: input.add_(zero_point).div_(scale) return input return ((input + zero_point) / scale)
def prepare_CHB_MIT_dataloader(args): seed = 12345 torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) root = '/srv/local/data/physionet.org/files/chbmit/1.0.0/clean_segments' train_files = os.listdir(os.path.join(root, 'train')) val_files = os.listdir(os.path.join(root, 'val')) test_files = os.listdir(os.path.join(root, 'test')) print(len(train_files), len(val_files), len(test_files)) train_loader = torch.utils.data.DataLoader(CHBMITLoader(os.path.join(root, 'train'), train_files, args.sampling_rate), batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.num_workers, persistent_workers=True) test_loader = torch.utils.data.DataLoader(CHBMITLoader(os.path.join(root, 'test'), test_files, args.sampling_rate), batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, persistent_workers=True) val_loader = torch.utils.data.DataLoader(CHBMITLoader(os.path.join(root, 'val'), val_files, args.sampling_rate), batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, persistent_workers=True) print(len(train_loader), len(val_loader), len(test_loader)) return (train_loader, test_loader, val_loader)
def extract_qa_p(path='../data/msmarco-qa/train_v2.1.json', output='../data/msmarco-qa/train.txt'): data = json.load(open(path)) data_to_save = [] for (id_, answers) in data['answers'].items(): if (answers[0] != 'No Answer Present.'): passages = data['passages'][id_] query = data['query'][id_] relevant_p = [] for p in passages: if p['is_selected']: relevant_p.append(p['passage_text']) if (len(relevant_p) != 0): data_to_save.append({'q': query, 'answer': answers, 'para': ' '.join(relevant_p)}) with open(output, 'w') as g: for l in data_to_save: g.write((json.dumps(l) + '\n'))
def test_path_completion_no_text(cmd2_app): text = '' line = 'shell ls {}'.format(text) endidx = len(line) begidx = (endidx - len(text)) completions_no_text = cmd2_app.path_complete(text, line, begidx, endidx) text = (os.getcwd() + os.path.sep) line = 'shell ls {}'.format(text) endidx = len(line) begidx = (endidx - len(text)) completions_cwd = [match.replace(text, '', 1) for match in cmd2_app.path_complete(text, line, begidx, endidx)] assert (completions_no_text == completions_cwd) assert completions_cwd
def test_directed_tripartition_indices(): assert (directed_tripartition_indices(0) == []) assert (directed_tripartition_indices(2) == [((0, 1), (), ()), ((0,), (1,), ()), ((0,), (), (1,)), ((1,), (0,), ()), ((), (0, 1), ()), ((), (0,), (1,)), ((1,), (), (0,)), ((), (1,), (0,)), ((), (), (0, 1))])
class TFSegformerDropPath(tf.keras.layers.Layer): def __init__(self, drop_path, **kwargs): super().__init__(**kwargs) self.drop_path = drop_path def call(self, x, training=None): if training: keep_prob = (1 - self.drop_path) shape = ((tf.shape(x)[0],) + ((1,) * (len(tf.shape(x)) - 1))) random_tensor = (keep_prob + tf.random.uniform(shape, 0, 1)) random_tensor = tf.floor(random_tensor) return ((x / keep_prob) * random_tensor) return x
def _read_from_init(initcontent, initname): metadata = [] i = 0 lines = initcontent.split('\n') while (i < len(lines)): if re.search('def\\s+([^\\(]+)', lines[i]): k = re.search('def\\s+([^\\(]+)', lines[i]).groups()[0] i += 1 while ((i < len(lines)) and (lines[i] != '')): if re.search('return\\s+["\']?([^"\']+)["\']?', lines[i]): metadata.append((k, re.search('return\\s+["\']?([^"\']+)["\']?', lines[i]).groups()[0])) break i += 1 i += 1 if (not len(metadata)): raise ValidationError((_('Cannot find valid metadata in %s') % initname)) return metadata
class CalcChangeProjectedDroneAmountCommand(wx.Command): def __init__(self, fitID, itemID, amount): wx.Command.__init__(self, True, 'Change Projected Drone Amount') self.fitID = fitID self.itemID = itemID self.amount = amount self.savedDroneInfo = None def Do(self): pyfalog.debug('Doing change of projected drone {} amount to {} on fit {}'.format(self.itemID, self.amount, self.fitID)) fit = Fit.getInstance().getFit(self.fitID) drone = next((pd for pd in fit.projectedDrones if (pd.itemID == self.itemID)), None) if (drone is None): pyfalog.warning('Cannot find projected drone') return False self.savedDroneInfo = DroneInfo.fromDrone(drone) if (self.amount == self.savedDroneInfo.amount): return False drone.amount = self.amount if (drone.amountActive > 0): difference = (self.amount - self.savedDroneInfo.amount) drone.amount = self.amount drone.amountActive = max(min((drone.amountActive + difference), drone.amount), 0) return True def Undo(self): pyfalog.debug('Undoing change of projected drone {} amount to {} on fit {}'.format(self.itemID, self.amount, self.fitID)) if (self.savedDroneInfo is not None): fit = Fit.getInstance().getFit(self.fitID) drone = next((pd for pd in fit.projectedDrones if (pd.itemID == self.savedDroneInfo.itemID)), None) if (drone is None): pyfalog.warning('Cannot find projected drone') return False drone.amount = self.savedDroneInfo.amount drone.amountActive = self.savedDroneInfo.amountActive return True return False
_fixtures(FieldFixture) def test_min_length_constraint(fixture): min_required_length = 5 min_length_constraint = MinLengthConstraint(min_length=min_required_length) assert (min_length_constraint.parameters == ('%s' % min_required_length)) with expected(NoException): min_length_constraint.validate_input(('5' * min_required_length)) with expected(NoException): min_length_constraint.validate_input(('e' * (min_required_length + 1))) with expected(MinLengthConstraint): min_length_constraint.validate_input(('s' * (min_required_length - 1))) with expected(MinLengthConstraint): min_length_constraint.validate_input('')
class Seq2SeqSequenceClassifierOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
def get_ts(url): data = m3u8.load(url) key_link = get_key(data) ts_content = b'' key = None for (i, segment) in enumerate(data.segments): decrypt_func = (lambda x: x) if (segment.key.method == 'AES-128'): if (not key): key_uri = segment.key.uri key = read_keys(key_uri) ind = (i + data.media_sequence) iv = binascii.a2b_hex(('%032x' % ind)) cipher = AES.new(key, AES.MODE_CBC, iv=iv) decrypt_func = cipher.decrypt ts_url = f'{(key_link if key_link else segment.key.base_uri)}/{segment.uri}' coded_data = read_keys(ts_url) ts_content += decrypt_func(coded_data) return ts_content
def transform_take(a, indices, axis): a = pytensor.tensor.as_tensor_variable(a) indices = pytensor.tensor.as_tensor_variable(indices) if (indices.ndim == 1): if (axis == 0): return advanced_subtensor1(a, indices) else: shuffle = list(range(a.ndim)) shuffle[0] = axis shuffle[axis] = 0 res = advanced_subtensor1(a.dimshuffle(shuffle), indices).dimshuffle(shuffle) return res indices_shape = shape_tuple(indices) a_shape = shape_tuple(a) shape_parts = [a_shape[:axis], indices_shape, a_shape[(axis + 1):]] shape_parts = [sp for sp in shape_parts if (len(sp) > 0)] assert (len(shape_parts) > 0) if (len(shape_parts) > 1): shape = pytensor.tensor.concatenate(shape_parts) else: shape = shape_parts[0] ndim = ((a.ndim + indices.ndim) - 1) return transform_take(a, indices.flatten(), axis).reshape(shape, ndim=ndim)
def _expand_paths_by_content_type(base_paths: Union[(str, List[str])], base_urls: List[S3Url], content_type_provider: Callable[([str], ContentType)], path_type: S3PathType, user_fs: Optional[Union[(S3FileSystem, s3fs.S3FileSystem)]], resolved_fs: S3FileSystem, **s3_client_kwargs) -> Tuple[(Dict[(ContentType, List[str])], CachedFileMetadataProvider)]: if (path_type == S3PathType.MANIFEST): (content_type_to_paths, meta_provider) = _expand_manifest_paths(base_paths, resolved_fs, content_type_provider) elif (path_type == S3PathType.PREFIX): (content_type_to_paths, meta_provider) = _expand_prefix_paths(base_urls, content_type_provider, **s3_client_kwargs) elif (path_type == S3PathType.FILES_AND_FOLDERS): (base_paths, file_infos) = DefaultFileMetadataProvider().expand_paths(base_paths, resolved_fs) file_sizes = [file_info.size for file_info in file_infos] meta_provider = CachedFileMetadataProvider({path: BlockMetadata(num_rows=None, size_bytes=file_sizes[i], schema=None, input_files=[], exec_stats=None) for (i, path) in enumerate(base_paths)}) content_type_to_paths = _infer_content_types_from_paths(base_paths, content_type_provider) else: raise NotImplementedError(f'Unsupported S3 path type: {path_type}') for (content_type, paths) in content_type_to_paths.items(): (paths, urls) = _normalize_s3_paths_for_filesystem(paths, user_fs) content_type_to_paths[content_type] = paths meta_provider = CachedFileMetadataProvider({_normalize_s3_paths_for_filesystem(path, user_fs)[0][0]: metadata for (path, metadata) in meta_provider.get_meta_cache().items()}) return (content_type_to_paths, meta_provider)
class Processor(Iface, TProcessor): def __init__(self, handler): self._handler = handler self._processMap = {} self._processMap['is_healthy'] = Processor.process_is_healthy self._on_message_begin = None def on_message_begin(self, func): self._on_message_begin = func def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() if self._on_message_begin: self._on_message_begin(name, type, seqid) if (name not in self._processMap): iprot.skip(TType.STRUCT) iprot.readMessageEnd() x = TApplicationException(TApplicationException.UNKNOWN_METHOD, ('Unknown function %s' % name)) oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid) x.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() return else: self._processMap[name](self, seqid, iprot, oprot) return True def process_is_healthy(self, seqid, iprot, oprot): args = is_healthy_args() args.read(iprot) iprot.readMessageEnd() result = is_healthy_result() try: result.success = self._handler.is_healthy() msg_type = TMessageType.REPLY except TTransport.TTransportException: raise except TApplicationException as ex: logging.exception('TApplication exception in handler') msg_type = TMessageType.EXCEPTION result = ex except Exception: logging.exception('Unexpected exception in handler') msg_type = TMessageType.EXCEPTION result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin('is_healthy', msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush()
class Effect11426(BaseEffect): type = 'passive' def handler(fit, container, context, projectionRange, **kwargs): fit.drones.filteredItemBoost((lambda drone: drone.item.requiresSkill('Drones')), 'damageMultiplier', container.getModifiedItemAttr('shipBonusAB'), skill='Amarr Battleship', **kwargs)
def html_parse(parse, viols=False, use_caps=False, use_html=True, between_words=' ', between_sylls='.', line_id='ID'): last_word = None output = '' for pos in parse.positions: violated = pos.violated if (viols and violated): viol_str = ' '.join([rename_constraint(c) for c in violated]) viol_title = ('Violated %s constraints: %s' % (len(violated), viol_str)) output += ('<span class="violation" title="%s" id="viol__line_%s">' % (viol_title, line_id)) for slot in pos.slots: slotstr = slot.token if use_caps: slotstr = (slotstr.upper() if (pos.meterVal == 's') else slotstr.lower()) if use_html: slotstr = ((('<span class="meter_strong">' + slotstr) + '</span>') if (pos.meterVal == 's') else (('<span class="meter_weak">' + slotstr) + '</span>')) if (last_word != slot.wordtoken): output += (between_words + slotstr) last_word = slot.wordtoken else: output += (between_sylls + slotstr) if (viols and violated): output += ('</span><script type="text/javascript">tippy("#viol__line_%s")</script>' % line_id) return output.strip()
def read_engine_configs() -> dict: all_configs = {} engines_config_dir = os.path.join(ROOT_DIR, 'experiments', 'configurations') config_files = glob.glob(os.path.join(engines_config_dir, '*.json')) for config_file in config_files: with open(config_file, 'r') as fd: configs = json.load(fd) for config in configs: all_configs[config['name']] = config return all_configs
def MenuBlockAsControls(menuItems, parentage=None): if (parentage is None): parentage = [] blocks = [] curBlock = [] for item in menuItems: itemAsCtrl = MenuItemAsControl(item) if parentage: itemPath = ('%s->%s' % ('->'.join(parentage), item['text'])) else: itemPath = item['text'] curBlock.append(itemAsCtrl) if ('menu_items' in item.keys()): parentage.append(item['text']) blocks.extend(MenuBlockAsControls(item['menu_items']['menu_items'], parentage)) del parentage[(- 1)] blocks.append(curBlock) return blocks
class MSMRDiffusion(BaseParticle): def __init__(self, param, domain, options, phase='primary', x_average=False): super().__init__(param, domain, options, phase) self.x_average = x_average pybamm.citations.register('Baker2018') pybamm.citations.register('Verbrugge2017') def get_fundamental_variables(self): (domain, Domain) = self.domain_Domain phase_name = self.phase_name variables = {} c_max = self.phase_param.c_max if (self.size_distribution is False): if (self.x_average is False): U = pybamm.Variable(f'{Domain} {phase_name}particle potential [V]', f'{domain} {phase_name}particle', auxiliary_domains={'secondary': f'{domain} electrode', 'tertiary': 'current collector'}) U.print_name = f'U_{domain[0]}' else: U_xav = pybamm.Variable(f'X-averaged {domain} {phase_name}particle potential [V]', f'{domain} {phase_name}particle', auxiliary_domains={'secondary': 'current collector'}) U_xav.print_name = f'U_{domain[0]}_xav' U = pybamm.SecondaryBroadcast(U_xav, f'{domain} electrode') else: if (self.x_average is False): U_distribution = pybamm.Variable(f'{Domain} {phase_name}particle potential distribution [V]', domain=f'{domain} {phase_name}particle', auxiliary_domains={'secondary': f'{domain} {phase_name}particle size', 'tertiary': f'{domain} electrode', 'quaternary': 'current collector'}) R = pybamm.SpatialVariable(f'R_{domain[0]}', domain=[f'{domain} {phase_name}particle size'], auxiliary_domains={'secondary': f'{domain} electrode', 'tertiary': 'current collector'}, coord_sys='cartesian') variables = self._get_distribution_variables(R) f_v_dist = variables[f'{Domain} volume-weighted {phase_name}particle-size distribution [m-1]'] else: U_distribution = pybamm.Variable(f'X-averaged {domain} {phase_name}particle potential distribution [V]', domain=f'{domain} {phase_name}particle', auxiliary_domains={'secondary': f'{domain} {phase_name}particle size', 'tertiary': 'current collector'}) R = pybamm.SpatialVariable(f'R_{domain[0]}', domain=[f'{domain} {phase_name}particle size'], auxiliary_domains={'secondary': 'current collector'}, coord_sys='cartesian') variables = self._get_distribution_variables(R) f_v_dist = variables[f'X-averaged {domain} volume-weighted {phase_name}particle-size distribution [m-1]'] variables.update(self._get_standard_potential_distribution_variables(U_distribution)) x_distribution = self.phase_param.x(U_distribution) dxdU_distribution = self.phase_param.dxdU(U_distribution) c_s_distribution = (x_distribution * c_max) variables.update(self._get_standard_concentration_distribution_variables(c_s_distribution)) variables.update(self._get_standard_differential_stoichiometry_distribution_variables(dxdU_distribution)) U = pybamm.Integral((f_v_dist * U_distribution), R) if (self.x_average is True): U = pybamm.SecondaryBroadcast(U, [f'{domain} electrode']) variables.update(self._get_standard_potential_variables(U)) variables.update(self._get_standard_fractional_occupancy_variables(U)) variables.update(self._get_standard_differential_fractional_occupancy_variables(U)) x = self.phase_param.x(U) dxdU = self.phase_param.dxdU(U) c_s = (x * c_max) variables.update(self._get_standard_concentration_variables(c_s)) variables.update(self._get_standard_differential_stoichiometry_variables(dxdU)) return variables def get_coupled_variables(self, variables): (domain, Domain) = self.domain_Domain phase_name = self.phase_name param = self.param if (self.size_distribution is False): if (self.x_average is False): x = variables[f'{Domain} {phase_name}particle stoichiometry'] dxdU = variables[f'{Domain} {phase_name}particle differential stoichiometry [V-1]'] U = variables[f'{Domain} {phase_name}particle potential [V]'] T = pybamm.PrimaryBroadcast(variables[f'{Domain} electrode temperature [K]'], [f'{domain} {phase_name}particle']) R_nondim = variables[f'{Domain} {phase_name}particle radius'] j = variables[f'{Domain} electrode {phase_name}interfacial current density [A.m-2]'] else: x = variables[f'X-averaged {domain} {phase_name}particle stoichiometry'] dxdU = variables[f'X-averaged {domain} {phase_name}particle differential stoichiometry [V-1]'] U = variables[f'X-averaged {domain} {phase_name}particle potential [V]'] T = pybamm.PrimaryBroadcast(variables[f'X-averaged {domain} electrode temperature [K]'], [f'{domain} {phase_name}particle']) R_nondim = 1 j = variables[f'X-averaged {domain} electrode {phase_name}interfacial current density [A.m-2]'] R_broad_nondim = R_nondim else: R_nondim = variables[f'{Domain} {phase_name}particle sizes'] R_broad_nondim = pybamm.PrimaryBroadcast(R_nondim, [f'{domain} {phase_name}particle']) if (self.x_average is False): x = variables[f'{Domain} {phase_name}particle stoichiometry distribution'] dxdU = variables[f'{Domain} {phase_name}particle differential stoichiometry distribution [V-1]'] U = variables[f'{Domain} {phase_name}particle potential distribution [V]'] T = pybamm.PrimaryBroadcast(variables[f'{Domain} electrode temperature [K]'], [f'{domain} {phase_name}particle size']) T = pybamm.PrimaryBroadcast(T, [f'{domain} {phase_name}particle']) j = variables[f'{Domain} electrode {phase_name}interfacial current density distribution [A.m-2]'] else: x = variables[f'X-averaged {domain} {phase_name}particle stoichiometry distribution'] dxdU = variables[f'X-averaged {domain} {phase_name}particle differential stoichiometry distribution [V-1]'] U = variables[f'X-averaged {domain} {phase_name}particle potential distribution [V]'] T = pybamm.PrimaryBroadcast(variables[f'X-averaged {domain} electrode temperature [K]'], [f'{domain} {phase_name}particle size']) T = pybamm.PrimaryBroadcast(T, [f'{domain} {phase_name}particle']) j = variables[f'X-averaged {domain} electrode {phase_name}interfacial current density distribution [A.m-2]'] c_max = self.phase_param.c_max current = variables['Total current density [A.m-2]'] D_eff = self._get_effective_diffusivity((x * c_max), T, current) f = (self.param.F / (self.param.R * T)) N_s = (((((c_max * x) * (1 - x)) * f) * D_eff) * pybamm.grad(U)) variables.update({f'{Domain} {phase_name}particle rhs [V.s-1]': ((((- (1 / (R_broad_nondim ** 2))) * pybamm.div(N_s)) / c_max) / dxdU), f'{Domain} {phase_name}particle bc [V.m-1]': (((j * R_nondim) / param.F) / pybamm.surf(((((c_max * x) * (1 - x)) * f) * D_eff)))}) if (self.size_distribution is True): variables.update(self._get_standard_diffusivity_distribution_variables(D_eff)) variables.update(self._get_standard_flux_distribution_variables(N_s)) R = variables[f'{Domain} {phase_name}particle sizes [m]'] f_a_dist = self.phase_param.f_a_dist(R) D_eff = pybamm.Integral((f_a_dist * D_eff), R) N_s = pybamm.Integral((f_a_dist * N_s), R) if (self.x_average is True): D_eff = pybamm.SecondaryBroadcast(D_eff, [f'{domain} electrode']) N_s = pybamm.SecondaryBroadcast(N_s, [f'{domain} electrode']) variables.update(self._get_standard_diffusivity_variables(D_eff)) variables.update(self._get_standard_flux_variables(N_s)) return variables def set_rhs(self, variables): (domain, Domain) = self.domain_Domain phase_name = self.phase_name if (self.size_distribution is False): if (self.x_average is False): U = variables[f'{Domain} {phase_name}particle potential [V]'] else: U = variables[f'X-averaged {domain} {phase_name}particle potential [V]'] elif (self.x_average is False): U = variables[f'{Domain} {phase_name}particle potential distribution [V]'] else: U = variables[f'X-averaged {domain} {phase_name}particle potential distribution [V]'] self.rhs = {U: variables[f'{Domain} {phase_name}particle rhs [V.s-1]']} def set_boundary_conditions(self, variables): (domain, Domain) = self.domain_Domain phase_name = self.phase_name if (self.size_distribution is False): if (self.x_average is False): U = variables[f'{Domain} {phase_name}particle potential [V]'] else: U = variables[f'X-averaged {domain} {phase_name}particle potential [V]'] elif (self.x_average is False): U = variables[f'{Domain} {phase_name}particle potential distribution [V]'] else: U = variables[f'X-averaged {domain} {phase_name}particle potential distribution [V]'] rbc = variables[f'{Domain} {phase_name}particle bc [V.m-1]'] self.boundary_conditions = {U: {'left': (pybamm.Scalar(0), 'Neumann'), 'right': (rbc, 'Neumann')}} def set_initial_conditions(self, variables): (domain, Domain) = self.domain_Domain phase_name = self.phase_name U_init = self.phase_param.U_init if (self.size_distribution is False): if (self.x_average is False): U = variables[f'{Domain} {phase_name}particle potential [V]'] else: U = variables[f'X-averaged {domain} {phase_name}particle potential [V]'] elif (self.x_average is False): U = variables[f'{Domain} {phase_name}particle potential distribution [V]'] else: U = variables[f'X-averaged {domain} {phase_name}particle potential distribution [V]'] self.initial_conditions = {U: U_init} def _get_standard_potential_variables(self, U): (domain, Domain) = self.domain_Domain phase_name = self.phase_name U_surf = pybamm.surf(U) U_surf_av = pybamm.x_average(U_surf) U_xav = pybamm.x_average(U) U_rav = pybamm.r_average(U) U_av = pybamm.r_average(U_xav) variables = {f'{Domain} {phase_name}particle potential [V]': U, f'X-averaged {domain} {phase_name}particle potential [V]': U_xav, f'R-averaged {domain} {phase_name}particle potential [V]': U_rav, f'Average {domain} {phase_name}particle potential [V]': U_av, f'{Domain} {phase_name}particle surface potential [V]': U_surf, f'X-averaged {domain} {phase_name}particle surface potential [V]': U_surf_av, f'Minimum {domain} {phase_name}particle potential [V]': pybamm.min(U), f'Maximum {domain} {phase_name}particle potential [V]': pybamm.max(U), f'Minimum {domain} {phase_name}particle surface potential [V]': pybamm.min(U_surf), f'Maximum {domain} {phase_name}particle surface potential [V]': pybamm.max(U_surf)} return variables def _get_standard_potential_distribution_variables(self, U): (domain, Domain) = self.domain_Domain phase_name = self.phase_name if ((U.domain == [f'{domain} {phase_name}particle size']) and (U.domains['secondary'] != [f'{domain} electrode'])): U_xav_distribution = pybamm.PrimaryBroadcast(U, [f'{domain} {phase_name}particle']) U_surf_xav_distribution = U U_surf_distribution = pybamm.SecondaryBroadcast(U_surf_xav_distribution, [f'{domain} electrode']) U_distribution = pybamm.PrimaryBroadcast(U_surf_distribution, [f'{domain} {phase_name}particle']) elif ((U.domain == [f'{domain} {phase_name}particle']) and (U.domains['tertiary'] != [f'{domain} electrode'])): U_xav_distribution = U U_surf_xav_distribution = pybamm.surf(U_xav_distribution) U_surf_distribution = pybamm.SecondaryBroadcast(U_surf_xav_distribution, [f'{domain} electrode']) U_distribution = pybamm.TertiaryBroadcast(U_xav_distribution, [f'{domain} electrode']) elif ((U.domain == [f'{domain} {phase_name}particle size']) and (U.domains['secondary'] == [f'{domain} electrode'])): U_surf_distribution = U U_surf_xav_distribution = pybamm.x_average(U) U_xav_distribution = pybamm.PrimaryBroadcast(U_surf_xav_distribution, [f'{domain} {phase_name}particle']) U_distribution = pybamm.PrimaryBroadcast(U_surf_distribution, [f'{domain} {phase_name}particle']) else: U_distribution = U U_xav_distribution = pybamm.FullBroadcast(0.5, [f'{domain} {phase_name}particle'], {'secondary': f'{domain} {phase_name}particle size', 'tertiary': 'current collector'}) U_surf_distribution = pybamm.surf(U) U_surf_xav_distribution = pybamm.x_average(U_surf_distribution) U_rav_distribution = pybamm.r_average(U_distribution) U_av_distribution = pybamm.x_average(U_rav_distribution) variables = {f'{Domain} {phase_name}particle potential distribution [V]': U_distribution, f'X-averaged {domain} {phase_name}particle potential distribution [V]': U_xav_distribution, f'R-averaged {domain} {phase_name}particle potential distribution [V]': U_rav_distribution, f'Average {domain} {phase_name}particle potential distribution [V]': U_av_distribution, f'{Domain} {phase_name}particle surface potential distribution [V]': U_surf_distribution, f'X-averaged {domain} {phase_name}particle surface potential distribution [V]': U_surf_xav_distribution} return variables def _get_standard_fractional_occupancy_variables(self, U): options = self.options domain = self.domain d = domain[0] variables = {} N = int(getattr(options, domain)['number of MSMR reactions']) for i in range(N): x = self.phase_param.x_j(U, i) x_surf = pybamm.surf(x) x_surf_av = pybamm.x_average(x_surf) x_xav = pybamm.x_average(x) x_rav = pybamm.r_average(x) x_av = pybamm.r_average(x_xav) variables.update({f'x_{d}_{i}': x, f'X-averaged x_{d}_{i}': x_xav, f'R-averaged x_{d}_{i}': x_rav, f'Average x_{d}_{i}': x_av, f'Surface x_{d}_{i}': x_surf, f'X-averaged surface x_{d}_{i}': x_surf_av}) return variables def _get_standard_differential_fractional_occupancy_variables(self, U): options = self.options domain = self.domain d = domain[0] variables = {} N = int(getattr(options, domain)['number of MSMR reactions']) for i in range(N): dxdU = self.phase_param.dxdU_j(U, i) dxdU_surf = pybamm.surf(dxdU) dxdU_surf_av = pybamm.x_average(dxdU_surf) dxdU_xav = pybamm.x_average(dxdU) dxdU_rav = pybamm.r_average(dxdU) dxdU_av = pybamm.r_average(dxdU_xav) variables.update({f'dxdU_{d}_{i}': dxdU, f'X-averaged dxdU_{d}_{i}': dxdU_xav, f'R-averaged dxdU_{d}_{i}': dxdU_rav, f'Average dxdU_{d}_{i}': dxdU_av, f'Surface dxdU_{d}_{i}': dxdU_surf, f'X-averaged surface dxdU_{d}_{i}': dxdU_surf_av}) return variables def _get_standard_differential_stoichiometry_variables(self, dxdU): (domain, Domain) = self.domain_Domain phase_name = self.phase_name dxdU_surf = pybamm.surf(dxdU) dxdU_surf_av = pybamm.x_average(dxdU_surf) dxdU_xav = pybamm.x_average(dxdU) dxdU_rav = pybamm.r_average(dxdU) dxdU_av = pybamm.r_average(dxdU_xav) variables = {f'{Domain} {phase_name}particle differential stoichiometry [V-1]': dxdU, f'X-averaged {domain} {phase_name}particle differential stoichiometry [V-1]': dxdU_xav, f'R-averaged {domain} {phase_name}particle differential stoichiometry [V-1]': dxdU_rav, f'Average {domain} {phase_name}particle differential stoichiometry [V-1]': dxdU_av, f'{Domain} {phase_name}particle surface differential stoichiometry [V-1]': dxdU_surf, f'X-averaged {domain} {phase_name}particle surface differential stoichiometry [V-1]': dxdU_surf_av} return variables def _get_standard_differential_stoichiometry_distribution_variables(self, dxdU): (domain, Domain) = self.domain_Domain phase_name = self.phase_name if ((dxdU.domain == [f'{domain} {phase_name}particle size']) and (dxdU.domains['secondary'] != [f'{domain} electrode'])): dxdU_xav_distribution = pybamm.PrimaryBroadcast(dxdU, [f'{domain} {phase_name}particle']) dxdU_surf_xav_distribution = dxdU dxdU_surf_distribution = pybamm.SecondaryBroadcast(dxdU_surf_xav_distribution, [f'{domain} electrode']) dxdU_distribution = pybamm.PrimaryBroadcast(dxdU_surf_distribution, [f'{domain} {phase_name}particle']) elif ((dxdU.domain == [f'{domain} {phase_name}particle']) and (dxdU.domains['tertiary'] != [f'{domain} electrode'])): dxdU_xav_distribution = dxdU dxdU_surf_xav_distribution = pybamm.surf(dxdU_xav_distribution) dxdU_surf_distribution = pybamm.SecondaryBroadcast(dxdU_surf_xav_distribution, [f'{domain} electrode']) dxdU_distribution = pybamm.TertiaryBroadcast(dxdU_xav_distribution, [f'{domain} electrode']) elif ((dxdU.domain == [f'{domain} {phase_name}particle size']) and (dxdU.domains['secondary'] == [f'{domain} electrode'])): dxdU_surf_distribution = dxdU dxdU_surf_xav_distribution = pybamm.x_average(dxdU) dxdU_xav_distribution = pybamm.PrimaryBroadcast(dxdU_surf_xav_distribution, [f'{domain} {phase_name}particle']) dxdU_distribution = pybamm.PrimaryBroadcast(dxdU_surf_distribution, [f'{domain} {phase_name}particle']) else: dxdU_distribution = dxdU dxdU_xav_distribution = pybamm.FullBroadcast(0.5, [f'{domain} {phase_name}particle'], {'secondary': f'{domain} {phase_name}particle size', 'tertiary': 'current collector'}) dxdU_surf_distribution = pybamm.surf(dxdU) dxdU_surf_xav_distribution = pybamm.x_average(dxdU_surf_distribution) dxdU_rav_distribution = pybamm.r_average(dxdU_distribution) dxdU_av_distribution = pybamm.x_average(dxdU_rav_distribution) variables = {f'{Domain} {phase_name}particle differential stoichiometry distribution [V-1]': dxdU_distribution, f'X-averaged {domain} {phase_name}particle differential stoichiometry distribution [V-1]': dxdU_xav_distribution, f'R-averaged {domain} {phase_name}particle differential stoichiometry distribution [V-1]': dxdU_rav_distribution, f'Average {domain} {phase_name}particle differential stoichiometry distribution [V-1]': dxdU_av_distribution, f'{Domain} {phase_name}particle surface differential stoichiometry distribution [V-1]': dxdU_surf_distribution, f'X-averaged {domain} {phase_name}particle surface differential stoichiometry distribution [V-1]': dxdU_surf_xav_distribution} return variables
def smart_text(s, encoding='utf-8', errors='strict'): if isinstance(s, six.text_type): return s if (not isinstance(s, six.string_types)): if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) elif hasattr(s, '__unicode__'): s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: s = six.text_type(s) return s
def Hamming(term, *others): if isinstance(term, types.GeneratorType): term = [l for l in term] elif (len(others) > 0): term = list(((term,) + others)) lists = [flatten(l) for l in term] assert (all((checkType(l, [Variable]) for l in lists)) and (len(lists) == 2) and (len(lists[0]) == len(lists[1]))) return Sum(((lists[0][j] != lists[1][j]) for j in range(len(lists[0]))))
class CmdLink(COMMAND_DEFAULT_CLASS): key = 'link' locks = 'cmd:perm(link) or perm(Builder)' help_category = 'Building' def func(self): caller = self.caller if (not self.args): caller.msg('Usage: link[/twoway] <object> = <target>') return object_name = self.lhs results = caller.search(object_name, quiet=True) if (len(results) > 1): _AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1)) return _AT_SEARCH_RESULT(results, caller, query=object_name) elif (len(results) == 1): obj = results[0] else: obj = caller.search(object_name, global_search=True) if (not obj): return if self.rhs: target = caller.search(self.rhs, global_search=True) if (not target): return if (target == obj): self.caller.msg('Cannot link an object to itself.') return string = '' note = 'Note: %s(%s) did not have a destination set before. Make sure you linked the right thing.' if (not obj.destination): string = (note % (obj.name, obj.dbref)) if ('twoway' in self.switches): if (not (target.location and obj.location)): string = ('To create a two-way link, %s and %s must both have a location' % (obj, target)) string += ' (i.e. they cannot be rooms, but should be exits).' self.caller.msg(string) return if (not target.destination): string += (note % (target.name, target.dbref)) obj.destination = target.location target.destination = obj.location string += ('\nLink created %s (in %s) <-> %s (in %s) (two-way).' % (obj.name, obj.location, target.name, target.location)) else: obj.destination = target string += ('\nLink created %s -> %s (one way).' % (obj.name, target)) elif (self.rhs is None): dest = obj.destination if dest: string = ('%s is an exit to %s.' % (obj.name, dest.name)) else: string = ('%s is not an exit. Its home location is %s.' % (obj.name, obj.home)) elif obj.destination: obj.destination = None string = ('Former exit %s no longer links anywhere.' % obj.name) else: string = ('%s had no destination to unlink.' % obj.name) caller.msg(string.strip())
class outconv(nn.Module): def __init__(self, in_ch, out_ch): super(outconv, self).__init__() self.conv = nn.Conv2d(in_ch, out_ch, 1) self.conv2 = nn.Conv2d(in_ch, out_ch, 3, padding=1) def forward(self, x): x1 = self.conv(x) x2 = self.conv2(x) return (x1 + x2)
class _AEADCipherContext(AEADCipherContext): _ctx: (_BackendCipherContext | None) _tag: (bytes | None) def __init__(self, ctx: _BackendCipherContext) -> None: self._ctx = ctx self._bytes_processed = 0 self._aad_bytes_processed = 0 self._tag = None self._updated = False def _check_limit(self, data_size: int) -> None: if (self._ctx is None): raise AlreadyFinalized('Context was already finalized.') self._updated = True self._bytes_processed += data_size if (self._bytes_processed > self._ctx._mode._MAX_ENCRYPTED_BYTES): raise ValueError('{} has a maximum encrypted byte limit of {}'.format(self._ctx._mode.name, self._ctx._mode._MAX_ENCRYPTED_BYTES)) def update(self, data: bytes) -> bytes: self._check_limit(len(data)) assert (self._ctx is not None) return self._ctx.update(data) def update_into(self, data: bytes, buf: bytes) -> int: self._check_limit(len(data)) assert (self._ctx is not None) return self._ctx.update_into(data, buf) def finalize(self) -> bytes: if (self._ctx is None): raise AlreadyFinalized('Context was already finalized.') data = self._ctx.finalize() self._tag = self._ctx.tag self._ctx = None return data def authenticate_additional_data(self, data: bytes) -> None: if (self._ctx is None): raise AlreadyFinalized('Context was already finalized.') if self._updated: raise AlreadyUpdated('Update has been called on this context.') self._aad_bytes_processed += len(data) if (self._aad_bytes_processed > self._ctx._mode._MAX_AAD_BYTES): raise ValueError('{} has a maximum AAD byte limit of {}'.format(self._ctx._mode.name, self._ctx._mode._MAX_AAD_BYTES)) self._ctx.authenticate_additional_data(data)
def getMeshObject(analysis_object): isPresent = False meshObj = [] if analysis_object: members = analysis_object.Group else: members = FreeCAD.activeDocument().Objects for i in members: if (hasattr(i, 'Proxy') and hasattr(i.Proxy, 'Type') and ((i.Proxy.Type == 'FemMeshGmsh') or (i.Proxy.Type == 'CfdMeshCart'))): if isPresent: FreeCAD.Console.PrintError('Analysis contains more than one mesh object.') else: meshObj.append(i) isPresent = True if (not isPresent): meshObj = [None] return (meshObj[0], isPresent)
class Conv2dSamePadding(nn.Conv2d): def __init__(self, image_size, in_channels, out_channels, kernel_size, **kernel_wargs): super().__init__(in_channels, out_channels, kernel_size, **kernel_wargs) (image_h, image_w) = image_size (kernel_h, kernel_w) = self.weight.size()[(- 2):] (stride_h, stride_w) = self.stride (dilation_h, dilation_w) = self.dilation (out_h, out_w) = (math.ceil((image_h / stride_h)), math.ceil((image_w / stride_w))) pad_h = max((((((out_h - 1) * self.stride[0]) + ((kernel_h - 1) * dilation_h)) + 1) - image_h), 0) pad_w = max((((((out_w - 1) * self.stride[1]) + ((kernel_w - 1) * dilation_w)) + 1) - image_w), 0) self.out_h = out_h self.out_w = out_w self.same_padding = None if ((pad_h > 0) or (pad_w > 0)): self.same_padding = nn.ZeroPad2d(((pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2)))) self.image_size = image_size def forward(self, x): input_image_size = x.shape[(- 2):] assert (input_image_size == self.image_size), f'Input shape mismatch, got: {input_image_size}, expected: {self.image_size}' if (self.same_padding is not None): x = self.same_padding(x) x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) return x def flops(self, x): batchsize_per_replica = x.size()[0] return (((((((batchsize_per_replica * self.in_channels) * self.out_channels) * self.kernel_size[0]) * self.kernel_size[1]) * self.out_h) * self.out_w) / self.groups) def activations(self, x, out): return out.numel()
class GRU_Head(nn.Module): def __init__(self, input_dim, hidden_dim, n_class=8): super(GRU_Head, self).__init__() self._name = 'Head' self.GRU_layer = nn.GRU(input_dim, hidden_dim, batch_first=True, bidirectional=True) self.fc_1 = nn.Linear((hidden_dim * 2), n_class) def forward(self, x): (B, N, C) = x.size() self.GRU_layer.flatten_parameters() f0 = F.relu(self.GRU_layer(x)[0]) output = self.fc_1(f0) return {'output': output, 'feature': f0}
class ExportStaticFiles(ProductionCommand): keyword = 'exportstatics' def assemble(self): super().assemble() self.parser.add_argument('destination_directory', type=str, help='the destination directory to export to') def execute(self, args): super().execute(args) if os.path.exists(args.destination_directory): raise DomainException(message=('The path %s already exists. Please move it out of the way first.' % args.destination_directory)) try: os.mkdir(args.destination_directory) except Exception as ex: raise DomainException(message=('Could not create %s: %s' % (args.destination_directory, str(ex)))) for packaged_file in self.config.web.frontend_libraries.packaged_files(): print(('extracting %s' % packaged_file.full_path)) shutil.copy(packaged_file.full_path, args.destination_directory)
class ObjectModel(): def __init__(self): self._instance = None def __repr__(self): result = [] cname = type(self).__name__ string = '%(cname)s(%(fields)s)' alignment = (len(cname) + 1) for field in sorted(self.attributes()): width = ((80 - len(field)) - alignment) lines = pprint.pformat(field, indent=2, width=width).splitlines(True) inner = [lines[0]] for line in lines[1:]: inner.append(((' ' * alignment) + line)) result.append(field) return (string % {'cname': cname, 'fields': (',\n' + (' ' * alignment)).join(result)}) def __call__(self, instance): self._instance = instance return self def __get__(self, instance, cls=None): return self(instance) def __contains__(self, name) -> bool: return (name in self.attributes()) _cache def attributes(self) -> list[str]: return [o[LEN_OF_IMPL_PREFIX:] for o in dir(self) if o.startswith(IMPL_PREFIX)] def lookup(self, name): if (name in self.attributes()): return getattr(self, (IMPL_PREFIX + name)) raise AttributeInferenceError(target=self._instance, attribute=name) def attr___new__(self) -> bases.BoundMethod: from astroid import builder node: nodes.FunctionDef = builder.extract_node('def __new__(self, cls): return cls()') node.parent = AstroidManager().builtins_module['object'] return bases.BoundMethod(proxy=node, bound=_get_bound_node(self)) def attr___init__(self) -> bases.BoundMethod: from astroid import builder node: nodes.FunctionDef = builder.extract_node('def __init__(self, *args, **kwargs): return None') node.parent = AstroidManager().builtins_module['object'] return bases.BoundMethod(proxy=node, bound=_get_bound_node(self))
.parametrize('molecule, n_rotatables', [pytest.param('bace0.pdb', 2, id='bace0pdb'), pytest.param('butane.pdb', 1, id='butanepdb'), pytest.param('biphenyl.pdb', 1, id='biphenylpdb')]) def test_find_rotatable_bonds_n_rotatables(molecule, n_rotatables): mol = Ligand.from_file(get_data(molecule)) assert (len(mol.find_rotatable_bonds(['[*:1]-[CH3:2]', '[*:1]-[NH2:2]'])) == n_rotatables)
class BiSeNetOutput(nn.Module): def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs): super(BiSeNetOutput, self).__init__() self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1) self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False) self.init_weight() def forward(self, x): x = self.conv(x) x = self.conv_out(x) return x def init_weight(self): for ly in self.children(): if isinstance(ly, nn.Conv2d): nn.init.kaiming_normal_(ly.weight, a=1) if (not (ly.bias is None)): nn.init.constant_(ly.bias, 0) def get_params(self): (wd_params, nowd_params) = ([], []) for (name, module) in self.named_modules(): if (isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d)): wd_params.append(module.weight) if (not (module.bias is None)): nowd_params.append(module.bias) elif isinstance(module, nn.BatchNorm2d): nowd_params += list(module.parameters()) return (wd_params, nowd_params)
class Light(): location: str = '' level: int = 0 def __init__(self, location: str): self.location = location def on(self) -> None: self.level = 100 print(f'{self.location} light is on') def off(self) -> None: self.level = 0 print(f'{self.location} light is off') def dim(self, level: int) -> None: self.level = level if (level == 0): self.off() else: print(f'Light is dimmed to {self.level}%') def getLevel(self) -> int: return self.level
class ZeroShotCoTMethod(PromptMethod): def __init__(self, **kwargs: Any): super().__init__(**kwargs) def run(self, x: Union[(str, Dict)], in_context_examples: List[Dict]=None, prompt_file_path: Optional[str]=None, **kwargs: Any) -> Union[(str, List[str])]: if isinstance(x, str): raise NotImplementedError step_1_prompt = PromptBuilder.build_prompt(x=x, in_context_examples=(in_context_examples if in_context_examples else self.kwargs.get('in_context_examples', None)), prompt_file_path=(prompt_file_path if prompt_file_path else self.kwargs.get('prompt_file_path', None)), transform=(kwargs['transform'] if ('transform' in kwargs) else self.kwargs.get('transform', None)), extraction_words=(kwargs['extraction_words'] if ('extraction_words' in kwargs) else self.kwargs.get('extraction_words', None))) chain_of_thought = self.run_lm(step_1_prompt, **kwargs) assert isinstance(chain_of_thought, str), 'Assume no over sampling in this implementation' x['chain_of_thought'] = chain_of_thought step_2_prompt = PromptBuilder.build_prompt(x=x, in_context_examples=(in_context_examples if in_context_examples else self.kwargs.get('in_context_examples', None)), prompt_file_path=(prompt_file_path if prompt_file_path else self.kwargs.get('prompt_file_path', None)), transform=(kwargs['transform'] if ('transform' in kwargs) else self.kwargs.get('transform', None)), extraction_words=(kwargs['extraction_words'] if ('extraction_words' in kwargs) else self.kwargs.get('extraction_words', None))) y = self.run_lm(step_2_prompt, **kwargs) return y
def main(args): with open(args.parsed_ace_roles) as f: parsed_ace_roles = json.load(f) name_to_ontology = {parsed['name']: parsed for parsed in parsed_ace_roles} print('Ontology loaded.') with open(('.'.join(args.parsed_ace_roles.split('.')[:(- 1)]) + '.py')) as f: ace_roles_full_context = f.read() subset = args.input_filepath.split('/')[(- 1)].split('.')[0] pathlib.Path(args.output_filedir).mkdir(parents=True, exist_ok=True) output_filepath = os.path.join(args.output_filedir, (subset + '.jsonl')) output_code_dir = os.path.join(args.output_filedir, subset) pathlib.Path(output_code_dir).mkdir(parents=True, exist_ok=True) print(f'Writing mapping (.json) to {output_filepath}, code (.py) to {output_code_dir}') with open(args.input_filepath, 'r') as fread, open(output_filepath, 'w') as fwrite: for (line_idx, line) in tqdm(enumerate(fread)): ex = json.loads(line) if args.predict_event_type: process_sentence_events(ex, ace_roles_full_context, name_to_ontology, line_idx, output_code_dir, fwrite, args) else: process_single_event(ex, name_to_ontology, line_idx, output_code_dir, fwrite, ace_roles_full_context, args)
def getItemAttrs(typeid): attrs = {} cursor.execute(QUERY_TYPEID_ATTRIBS, (typeid,)) for row in cursor: attrs[row[0]] = row[1] cursor.execute(QUERY_TYPEID_BASEATTRIBS, (typeid,)) for row in cursor: if (row[0] is not None): attrs['volume'] = row[0] if (row[1] is not None): attrs['mass'] = row[1] if (row[2] is not None): attrs['capacity'] = row[2] return attrs
def get_contractreceivechannelsettled_data_from_event(chain_state: ChainState, event: DecodedEvent) -> Optional[ChannelSettleState]: args = event.event_data['args'] token_network_address = TokenNetworkAddress(event.originating_contract) channel_identifier = args['channel_identifier'] participant1 = args['participant1'] participant2 = args['participant2'] locksroot_participant1 = args['participant1_locksroot'] amount_participant1 = args['participant1_amount'] locksroot_participant2 = args['participant2_locksroot'] amount_participant2 = args['participant2_amount'] canonical_identifier = CanonicalIdentifier(chain_identifier=chain_state.chain_id, token_network_address=token_network_address, channel_identifier=channel_identifier) token_network_state = views.get_token_network_by_address(chain_state, token_network_address) msg = f'Could not find token network for address {to_checksum_address(token_network_address)}' assert (token_network_state is not None), msg channel_state = views.get_channelstate_by_canonical_identifier(chain_state=chain_state, canonical_identifier=canonical_identifier) if (not channel_state): return None if (participant1 == to_checksum_address(chain_state.our_address)): our_locksroot = locksroot_participant1 our_amount = amount_participant1 partner_locksroot = locksroot_participant2 partner_amount = amount_participant2 elif (participant2 == to_checksum_address(chain_state.our_address)): our_locksroot = locksroot_participant2 our_amount = amount_participant2 partner_locksroot = locksroot_participant1 partner_amount = amount_participant1 else: raise RaidenUnrecoverableError(f"Received settle event that we're not a part of. Settlement was between {participant1} and {participant2}") return ChannelSettleState(canonical_identifier, our_amount, our_locksroot, partner_amount, partner_locksroot)
def _train(args, device): print('==> Loading data generator... ') (train_gen_list, elmo, char_vocab) = get_all_datasets(args) if (args.model_type == 'ETModel'): print('==> ETModel') model = models.ETModel(args, constant.ANSWER_NUM_DICT[args.goal]) else: print(('ERROR: Invalid model type: -model_type ' + args.model_type)) raise NotImplementedError model.to(device) total_loss = 0 batch_num = 0 best_macro_f1 = 0.0 start_time = time.time() init_time = time.time() optimizer = optim.Adam(model.parameters(), lr=args.learning_rate) if args.load: load_model(args.reload_model_name, constant.EXP_ROOT, args.model_id, model, optimizer) for (idx, m) in enumerate(model.modules()): logging.info(((str(idx) + '->') + str(m))) while True: batch_num += 1 for data_gen in train_gen_list: try: batch = next(data_gen) (batch, _) = to_torch(batch, device) except StopIteration: logging.info(('finished at ' + str(batch_num))) print('Done!') torch.save({'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, '{0:s}/{1:s}.pt'.format(constant.EXP_ROOT, args.model_id)) return optimizer.zero_grad() (loss, output_logits, _) = model(batch) loss.backward() total_loss += loss.item() optimizer.step() if (((batch_num % args.log_period) == 0) and (batch_num > 0)): gc.collect() cur_loss = float((1.0 * loss.clone().item())) elapsed = (time.time() - start_time) train_loss_str = '|loss {0:3f} | at {1:d}step | {2:.2f} ms/batch'.format(cur_loss, batch_num, ((elapsed * 1000) / args.log_period)) start_time = time.time() print(train_loss_str) logging.info(train_loss_str) if (((batch_num % args.eval_period) == 0) and (batch_num > 0)): output_index = get_output_index(output_logits, threshold=args.threshold) gold_pred_train = get_gold_pred_str(output_index, batch['y'].data.cpu().clone(), args.goal) accuracy = ((sum([(set(y) == set(yp)) for (y, yp) in gold_pred_train]) * 1.0) / len(gold_pred_train)) train_acc_str = '==> Train accuracy: {0:.1f}%'.format((accuracy * 100)) print(train_acc_str) logging.info(train_acc_str) if (((batch_num % args.eval_period) == 0) and (batch_num > args.eval_after)): print('---- eval at step {0:d} ---'.format(batch_num)) (_, macro_f1) = evaluate_data(batch_num, args.dev_data, model, args, elmo, device, char_vocab, dev_type='original') if (best_macro_f1 < macro_f1): best_macro_f1 = macro_f1 save_fname = '{0:s}/{1:s}_best.pt'.format(constant.EXP_ROOT, args.model_id) torch.save({'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_fname) print('Total {0:.2f} minutes have passed, saving at {1:s} '.format(((time.time() - init_time) / 60), save_fname)) if (((batch_num % args.save_period) == 0) and (batch_num > args.save_after)): save_fname = '{0:s}/{1:s}_{2:d}.pt'.format(constant.EXP_ROOT, args.model_id, batch_num) torch.save({'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_fname) print('Total {0:.2f} minutes have passed, saving at {1:s} '.format(((time.time() - init_time) / 60), save_fname))
class ResNetEncoder(nn.Module): def __init__(self, num_input_channels: int=1, num_features: List=None, verbose: bool=False) -> None: super().__init__() if (num_features is None): num_features = [8, 16, 32, 64, 256] self.verbose = verbose self.num_features = ([num_input_channels] + num_features) self.network = nn.Sequential(nn.Conv3d(self.num_features[0], self.num_features[1], kernel_size=7, stride=1, padding=3, bias=False), nn.ReLU(), nn.BatchNorm3d(self.num_features[1]), nn.Conv3d(self.num_features[1], self.num_features[1], kernel_size=4, stride=2, padding=1, bias=False), ResNetBlock(self.num_features[1]), nn.ReLU(), nn.BatchNorm3d(self.num_features[1]), nn.Conv3d(self.num_features[1], self.num_features[2], kernel_size=4, stride=2, padding=1, bias=False), ResNetBlock(self.num_features[2]), nn.ReLU(), nn.BatchNorm3d(self.num_features[2]), nn.Conv3d(self.num_features[2], self.num_features[3], kernel_size=4, stride=2, padding=1, bias=False), ResNetBlock(self.num_features[3]), nn.ReLU(), nn.BatchNorm3d(self.num_features[3]), nn.Conv3d(self.num_features[3], self.num_features[4], kernel_size=4, stride=2, padding=1, bias=False), ResNetBlock(self.num_features[4]), nn.ReLU(), nn.BatchNorm3d(self.num_features[4]), nn.Conv3d(self.num_features[4], self.num_features[5], kernel_size=2, stride=1, padding=0, bias=False), nn.BatchNorm3d(self.num_features[5])) self.init_weights() def init_weights(self) -> None: for m in self.modules(): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm3d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, x: torch.Tensor) -> torch.Tensor: layers = list(self.network.children()) for (depth, layer) in enumerate(layers): shape_before = x.data[0].size() x = layer(x) shape_after = x.data[0].size() if (self.verbose is True): print(f'Layer {depth}: {shape_before} --> {shape_after}') self.verbose = False return x
class ImageNet_hdf5(data.Dataset): def __init__(self, data_dir, dataidxs=None, train=True, transform=None, target_transform=None, download=False): self.dataidxs = dataidxs self.train = train self.transform = transform self.target_transform = target_transform self.download = download self.hdf5fn = os.path.join(data_dir) self.all_data_hdf5 = DatasetHDF5(self.hdf5fn, ('train' if self.train else 'val'), transform=self.transform, target_transform=self.target_transform) (self.data_local_num_dict, self.net_dataidx_map) = self._get_net_dataidx_map() self.all_data_idx = range(len(self.all_data_hdf5)) if (dataidxs == None): self.local_data_idx = self.all_data_idx elif (type(dataidxs) == int): self.local_data_idx = self.net_dataidx_map[dataidxs] else: self.local_data_idx = [] for idxs in dataidxs: self.local_data_idx += self.net_dataidx_map[idxs] def _get_net_dataidx_map(self): data_local_num_dict = dict() net_dataidx_map = dict() for (i, label) in enumerate(self.all_data_hdf5.dlabel): label_int = np.int64(label) if (label in net_dataidx_map): net_dataidx_map[label_int].append(i) else: net_dataidx_map[label_int] = [] net_dataidx_map[label_int].append(i) for (key, value) in net_dataidx_map.items(): data_local_num_dict[key] = len(value) return (data_local_num_dict, net_dataidx_map) def get_net_dataidx_map(self): return self.net_dataidx_map def get_data_local_num_dict(self): return self.data_local_num_dict def __getitem__(self, index): (img, target) = self.all_data_hdf5[self.local_data_idx[index]] img = transforms.ToPILImage()(img) if (self.transform is not None): img = self.transform(img) if (self.target_transform is not None): target = self.target_transform(target) return (img, target) def __len__(self): return len(self.local_data_idx)
def test_archs_default(platform, intercepted_build_args): main() options = intercepted_build_args.args[0] if (platform == 'linux'): assert (options.globals.architectures == {Architecture.x86_64, Architecture.i686}) elif (platform == 'windows'): assert (options.globals.architectures == {Architecture.AMD64, Architecture.x86}) else: assert (options.globals.architectures == {Architecture.x86_64})
class VeloxFunctional(types.ModuleType): def __init__(self): super().__init__('torcharrow.velox_rt.functional') self._populate_udfs() def create_dispatch_wrapper(op_name: str): def dispatch(*args): wrapped_args = [] first_col = next((arg for arg in args if isinstance(arg, ColumnCpuMixin)), None) if (first_col is None): raise AssertionError('None of the argument is Column') length = len(first_col) for arg in args: if isinstance(arg, ColumnCpuMixin): wrapped_args.append(arg._data) else: wrapped_args.append(ta.ConstantColumn(arg, length)) result_col = ta.generic_udf_dispatch(op_name, wrapped_args) result_dtype = result_col.dtype().with_null(True) return ColumnCpuMixin._from_velox(first_col.device, result_dtype, result_col, True) def factory_dispatch(*args, size=None, device='cpu'): if (size is None): raise AssertionError(f'Factory method call {op_name} requires expclit size parameter') wrapped_args = [] for arg in args: wrapped_args.append(ta.ConstantColumn(arg, size)) wrapped_args.append(size) result_col = ta.factory_udf_dispatch(op_name, *wrapped_args) result_dtype = result_col.dtype().with_null(True) return ColumnCpuMixin._from_velox(device, result_dtype, result_col, True) if (op_name in global_functional._factory_methods): return factory_dispatch else: return dispatch def __getattr__(self, op_name: str): dispatch_wrapper = self.create_dispatch_wrapper(op_name) setattr(self, op_name, dispatch_wrapper) return dispatch_wrapper def _populate_udfs(self): pass
class MindDataset(Dataset): def __init__(self, root: str, tokenizer: AutoTokenizer, mode: str='train', split: str='small', news_max_len: int=20, hist_max_len: int=20, seq_max_len: int=300) -> None: super(MindDataset, self).__init__() self.data_path = os.path.join(root, split) self._mode = mode self._split = split self._tokenizer = tokenizer self._mode = mode self._news_max_len = news_max_len self._hist_max_len = hist_max_len self._seq_max_len = seq_max_len self._examples = self.get_examples(negative_sampling=4) self._news = self.process_news() def get_examples(self, negative_sampling: bool=None) -> Any: behavior_file = os.path.join(self.data_path, self._mode, 'behaviors.tsv') if (self._split == 'small'): df = pd.read_csv(behavior_file, sep='\t', header=None, names=['user_id', 'time', 'news_history', 'impressions']) df['impression_id'] = list(range(len(df))) else: df = pd.read_csv(behavior_file, sep='\t', header=None, names=['impression_id', 'user_id', 'time', 'news_history', 'impressions']) if (self._mode == 'train'): df = df.dropna(subset=['news_history']) df['news_history'] = df['news_history'].fillna('') if ((self._mode == 'train') and (negative_sampling is not None)): df['impressions'] = df['impressions'].apply((lambda x: sampling(x, ratio=negative_sampling))) df = df.drop('impressions', axis=1).join(df['impressions'].str.split(' ', expand=True).stack().reset_index(level=1, drop=True).rename('impression')) if (self._mode == 'test'): df['news_id'] = df['impression'] df['click'] = ([(- 1)] * len(df)) else: df[['news_id', 'click']] = df['impression'].str.split('-', expand=True) df['click'] = df['click'].astype(int) return df def process_news(self) -> Dict[(str, Any)]: filepath = os.path.join(self.data_path, 'news_dict.pkl') if os.path.exists(filepath): print('Loading news info from', filepath) with open(filepath, 'rb') as fin: news = pickle.load(fin) return news news = dict() news = self.read_news(news, os.path.join(self.data_path, 'train')) news = self.read_news(news, os.path.join(self.data_path, 'dev')) if (self._split == 'large'): news = self.read_news(news, os.path.join(self.data_path, 'test')) print('Saving news info from', filepath) with open(filepath, 'wb') as fout: pickle.dump(news, fout) return news def read_news(self, news: Dict[(str, Any)], filepath: str, drop_stopword: bool=True) -> Dict[(str, Any)]: with open(os.path.join(filepath, 'news.tsv'), encoding='utf-8') as f: lines = f.readlines() for line in lines: info = dict() splitted = line.strip('\n').split('\t') news_id = splitted[0] if (news_id in news): continue title = splitted[3].lower() abstract = splitted[4].lower() if drop_stopword: title = remove_stopword(title) abstract = remove_stopword(abstract) news[news_id] = dict() title_words = self._tokenizer.tokenize(title) news[news_id]['title'] = self._tokenizer.convert_tokens_to_ids(title_words) abstract_words = self._tokenizer.tokenize(abstract) news[news_id]['abstract'] = self._tokenizer.convert_tokens_to_ids(abstract_words) return news def collate(self, batch: Dict[(str, Any)]): input_ids = torch.tensor([item['input_ids'] for item in batch]) segment_ids = torch.tensor([item['segment_ids'] for item in batch]) input_mask = torch.tensor([item['input_mask'] for item in batch]) news_segment_ids = torch.tensor([item['news_segment_ids'] for item in batch]) sentence_ids = torch.tensor([item['sentence_ids'] for item in batch]) sentence_mask = torch.tensor([item['sentence_mask'] for item in batch]) sentence_segment_ids = torch.tensor([item['sentence_segment_ids'] for item in batch]) inputs = {'input_ids': input_ids, 'segment_ids': segment_ids, 'input_mask': input_mask, 'news_segment_ids': news_segment_ids, 'sentence_ids': sentence_ids, 'sentence_mask': sentence_mask, 'sentence_segment_ids': sentence_segment_ids} if (self._mode == 'train'): inputs['label'] = torch.tensor([item['label'] for item in batch]) return inputs elif (self._mode == 'dev'): inputs['impression_id'] = [item['impression_id'] for item in batch] inputs['label'] = torch.tensor([item['label'] for item in batch]) return inputs elif (self._mode == 'test'): inputs['impression_id'] = [item['impression_id'] for item in batch] else: raise ValueError('Mode must be `train`, `dev` or `test`.') def pack_bert_features(self, example: Any): curr_news = self._news[example['news_id']]['title'][:self._news_max_len] news_segment_ids = [] hist_news = [] sentence_ids = [0, 1, 2] for (i, ns) in enumerate(example['news_history'].split()[:self._hist_max_len]): ids = self._news[ns]['title'][:self._news_max_len] hist_news += ids news_segment_ids += ([(i + 2)] * len(ids)) sentence_ids.append((sentence_ids[(- 1)] + 1)) tmp_hist_len = ((self._seq_max_len - len(curr_news)) - 3) hist_news = hist_news[:tmp_hist_len] input_ids = (((([self._tokenizer.cls_token_id] + curr_news) + [self._tokenizer.sep_token_id]) + hist_news) + [self._tokenizer.sep_token_id]) news_segment_ids = (((([0] + ([1] * len(curr_news))) + [0]) + news_segment_ids[:tmp_hist_len]) + [0]) segment_ids = (([0] * (len(curr_news) + 2)) + ([1] * (len(hist_news) + 1))) input_mask = ([1] * len(input_ids)) padding_len = (self._seq_max_len - len(input_ids)) input_ids = (input_ids + ([self._tokenizer.pad_token_id] * padding_len)) input_mask = (input_mask + ([0] * padding_len)) segment_ids = (segment_ids + ([0] * padding_len)) news_segment_ids = (news_segment_ids + ([0] * padding_len)) sentence_segment_ids = (([0] * 3) + ([1] * (len(sentence_ids) - 3))) sentence_mask = ([1] * len(sentence_ids)) sentence_max_len = (3 + self._hist_max_len) sentence_mask = ([1] * len(sentence_ids)) padding_len = (sentence_max_len - len(sentence_ids)) sentence_ids = (sentence_ids + ([0] * padding_len)) sentence_mask = (sentence_mask + ([0] * padding_len)) sentence_segment_ids = (sentence_segment_ids + ([0] * padding_len)) assert (len(input_ids) == self._seq_max_len) assert (len(input_mask) == self._seq_max_len) assert (len(segment_ids) == self._seq_max_len) assert (len(news_segment_ids) == self._seq_max_len) assert (len(sentence_ids) == sentence_max_len) assert (len(sentence_mask) == sentence_max_len) assert (len(sentence_segment_ids) == sentence_max_len) return (input_ids, input_mask, segment_ids, news_segment_ids, sentence_ids, sentence_mask, sentence_segment_ids) def __getitem__(self, index: int) -> Dict[(str, Any)]: example = self._examples.iloc[index] (input_ids, input_mask, segment_ids, news_segment_ids, sentence_ids, sentence_mask, sentence_segment_ids) = self.pack_bert_features(example) inputs = {'input_ids': input_ids, 'segment_ids': segment_ids, 'input_mask': input_mask, 'news_segment_ids': news_segment_ids, 'sentence_ids': sentence_ids, 'sentence_mask': sentence_mask, 'sentence_segment_ids': sentence_segment_ids} if (self._mode == 'train'): inputs['label'] = example['click'] return inputs elif (self._mode == 'dev'): inputs['impression_id'] = example['impression_id'] inputs['label'] = example['click'] return inputs elif (self._mode == 'test'): inputs['impression_id'] = example['impression_id'] return inputs else: raise ValueError('Mode must be `train`, `dev` or `test`.') def __len__(self) -> int: return len(self._examples)
def _uda_concat_dataset(cfg, default_args=None): from .uda_concat import UDAConcatDataset img_dir = cfg['img_dir'] ann_dir = cfg.get('ann_dir', None) split = cfg.get('split', None) separate_eval = cfg.pop('separate_eval', True) num_img_dir = (len(img_dir) if isinstance(img_dir, (list, tuple)) else 1) if (ann_dir is not None): num_ann_dir = (len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1) else: num_ann_dir = 0 if (split is not None): num_split = (len(split) if isinstance(split, (list, tuple)) else 1) else: num_split = 0 if (num_img_dir > 1): assert ((num_img_dir == num_ann_dir) or (num_ann_dir == 0)) assert ((num_img_dir == num_split) or (num_split == 0)) else: assert ((num_split == num_ann_dir) or (num_ann_dir <= 1)) num_dset = max(num_split, num_img_dir) datasets = [] for i in range(num_dset): data_cfg = copy.deepcopy(cfg) if isinstance(img_dir, (list, tuple)): data_cfg['img_dir'] = img_dir[i] if isinstance(ann_dir, (list, tuple)): data_cfg['ann_dir'] = ann_dir[i] if isinstance(split, (list, tuple)): data_cfg['split'] = split[i] datasets.append(build_uda_dataset(data_cfg, default_args)) return UDAConcatDataset(datasets, separate_eval)
class ClockItem(pg.ItemGroup): def __init__(self, clock): pg.ItemGroup.__init__(self) self.size = clock.size self.item = QtWidgets.QGraphicsEllipseItem(QtCore.QRectF(0, 0, self.size, self.size)) tr = QtGui.QTransform.fromTranslate(((- self.size) * 0.5), ((- self.size) * 0.5)) self.item.setTransform(tr) self.item.setPen(pg.mkPen(100, 100, 100)) self.item.setBrush(clock.brush) self.hand = QtWidgets.QGraphicsLineItem(0, 0, 0, (self.size * 0.5)) self.hand.setPen(pg.mkPen('w')) self.hand.setZValue(10) self.flare = QtWidgets.QGraphicsPolygonItem(QtGui.QPolygonF([QtCore.QPointF(0, ((- self.size) * 0.25)), QtCore.QPointF(0, (self.size * 0.25)), QtCore.QPointF((self.size * 1.5), 0), QtCore.QPointF(0, ((- self.size) * 0.25))])) self.flare.setPen(pg.mkPen('y')) self.flare.setBrush(pg.mkBrush(255, 150, 0)) self.flare.setZValue((- 10)) self.addItem(self.hand) self.addItem(self.item) self.addItem(self.flare) self.clock = clock self.i = 1 self._spaceline = None def spaceline(self): if (self._spaceline is None): self._spaceline = pg.InfiniteLine() self._spaceline.setPen(self.clock.pen) return self._spaceline def stepTo(self, t): data = self.clock.refData while ((self.i < (len(data) - 1)) and (data['t'][self.i] < t)): self.i += 1 while ((self.i > 1) and (data['t'][(self.i - 1)] >= t)): self.i -= 1 self.setPos(data['x'][self.i], self.clock.y0) t = data['pt'][self.i] self.hand.setRotation((((- 0.25) * t) * 360.0)) v = data['v'][self.i] gam = ((1.0 - (v ** 2)) ** 0.5) self.setTransform(QtGui.QTransform.fromScale(gam, 1.0)) f = data['f'][self.i] tr = QtGui.QTransform() if (f < 0): tr.translate((self.size * 0.4), 0) else: tr.translate(((- self.size) * 0.4), 0) tr.scale(((- f) * (0.5 + (np.random.random() * 0.1))), 1.0) self.flare.setTransform(tr) if (self._spaceline is not None): self._spaceline.setPos(pg.Point(data['x'][self.i], data['t'][self.i])) self._spaceline.setAngle((data['v'][self.i] * 45.0)) def reset(self): self.i = 1
def clean_up(molecule, delete_input=True, delete_output=False): input_file = (molecule.filename + '.inp') output_file = (molecule.filename + '.out') run_directory = os.getcwd() for local_file in os.listdir(run_directory): if local_file.endswith('.clean'): os.remove(((run_directory + '/') + local_file)) try: os.remove('timer.dat') except: pass if delete_input: os.remove(input_file) if delete_output: os.remove(output_file)
def add_common_options(parser): parser.add_option('--show_defaults', '-d', action='store_false', help='Show the default values of command options. Must be typed before help option.') parser.add_option('--plot_velocity', '-v', action='store_true', help='Plot the velocity traces also.', default=False) parser.add_option('--output-format', help='The format of the report: pdf or html', choices=['pdf', 'html'], default='pdf') parser.add_option('--pdf_dir', '-p', help='The directory where the pdf/html will be saved to. Default is the HOME directory.', default=None) parser.add_option('--output', '-o', help='The full path and name to save the resulting configuration file.', default=None) parser.add_option('--report_only', '-r', dest='plot_everything', action='store_false', default=True, help='Do not plot the trace graphs')
def collate_fn_all(batch): (obj_point_list, obj_label_list) = ([], []) (rel_point_list, rel_label_list) = ([], []) edge_indices = [] for i in batch: obj_point_list.append(i[0]) obj_label_list.append(i[3]) rel_point_list.append(i[2]) rel_label_list.append(i[4]) edge_indices.append(i[5]) return (torch.cat(obj_point_list, dim=0), torch.cat(obj_label_list, dim=0), torch.cat(rel_point_list, dim=0), torch.cat(rel_label_list, dim=0), torch.cat(edge_indices, dim=0))
def add_flops_counting_methods(net_main_module): net_main_module.start_flops_count = start_flops_count.__get__(net_main_module) net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module) net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module) net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module) net_main_module.reset_flops_count() net_main_module.apply(add_flops_mask_variable_or_reset) return net_main_module
def test_decorator_of_context_manager(): data = [] class Context(): def __init__(self, key): self.key = key def __enter__(self): data.append(('enter %s' % self.key)) def __exit__(self, *args): data.append(('exit %s' % self.key)) decorator = decorator_of_context_manager(Context) ('maras') def decorated(): data.append('inside maras') assert_eq('Dummy context', decorator.__doc__) decorated() assert_eq(['enter maras', 'inside maras', 'exit maras'], data) class NoDocString(): def __enter__(self): pass def __exit__(self, *args): pass assert_eq('Decorator that runs the inner function in the context of {}'.format(NoDocString), decorator_of_context_manager(NoDocString).__doc__)
class LeafPattern(BasePattern): def __init__(self, type=None, content=None, name=None): if (type is not None): assert (0 <= type < 256), type if (content is not None): assert isinstance(content, str), repr(content) self.type = type self.content = content self.name = name def match(self, node, results=None): if (not isinstance(node, Leaf)): return False return BasePattern.match(self, node, results) def _submatch(self, node, results=None): return (self.content == node.value)
def valid_pypi_name(package_spec: str) -> Optional[str]: try: package_req = Requirement(package_spec) except InvalidRequirement: return None if (package_req.url or package_req.name.endswith(ARCHIVE_EXTENSIONS)): return None return canonicalize_name(package_req.name)
def test_sink(input_dataframe, feature_set): client = SparkClient() client.conn.conf.set('spark.sql.sources.partitionOverwriteMode', 'dynamic') feature_set_df = feature_set.construct(input_dataframe, client) target_latest_df = OnlineFeatureStoreWriter.filter_latest(feature_set_df, id_columns=[key.name for key in feature_set.keys]) columns_sort = feature_set_df.schema.fieldNames() s3config = Mock() s3config.mode = 'overwrite' s3config.format_ = 'parquet' s3config.get_options = Mock(return_value={'path': 'test_folder/historical/entity/feature_set'}) s3config.get_path_with_partitions = Mock(return_value='test_folder/historical/entity/feature_set') historical_writer = HistoricalFeatureStoreWriter(db_config=s3config, interval_mode=True) online_config = Mock() online_config.mode = 'overwrite' online_config.format_ = 'parquet' online_config.get_options = Mock(return_value={'path': 'test_folder/online/entity/feature_set'}) online_writer = OnlineFeatureStoreWriter(db_config=online_config) writers = [historical_writer, online_writer] sink = Sink(writers) client.sql('CREATE DATABASE IF NOT EXISTS {}'.format(historical_writer.database)) sink.flush(feature_set, feature_set_df, client) historical_result_df = client.read(s3config.format_, path=s3config.get_path_with_partitions(feature_set.name, feature_set_df)) online_result_df = client.read(online_config.format_, **online_config.get_options(feature_set.name)) assert (sorted(feature_set_df.select(*columns_sort).collect()) == sorted(historical_result_df.select(*columns_sort).collect())) assert (sorted(target_latest_df.select(*columns_sort).collect()) == sorted(online_result_df.select(*columns_sort).collect())) shutil.rmtree('test_folder')
class PointnetSAModuleCenters(nn.Module): def __init__(self, *, mlp: List[int], npoint: int=None, radius: float=None, nsample: int=None, bn: bool=True, use_xyz: bool=True, pooling: str='max', sigma: float=None, normalize_xyz: bool=False, sample_uniformly: bool=False, ret_unique_cnt: bool=False): super().__init__() self.npoint = npoint self.radius = radius self.nsample = nsample self.pooling = pooling self.mlp_module = None self.use_xyz = use_xyz self.sigma = sigma if (self.sigma is None): self.sigma = (self.radius / 2) self.normalize_xyz = normalize_xyz self.ret_unique_cnt = ret_unique_cnt if (npoint is not None): self.grouper = pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, ret_grouped_xyz=True, normalize_xyz=normalize_xyz, sample_uniformly=sample_uniformly, ret_unique_cnt=ret_unique_cnt) else: self.grouper = pointnet2_utils.GroupAll(use_xyz, ret_grouped_xyz=True) mlp_spec = mlp if (use_xyz and (len(mlp_spec) > 0)): mlp_spec[0] += 3 self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn) def forward(self, xyz: torch.Tensor, features: torch.Tensor, centers: torch.Tensor) -> torch.Tensor: new_xyz = centers if (not self.ret_unique_cnt): (grouped_features, grouped_xyz) = self.grouper(xyz, new_xyz, features) new_features = self.mlp_module(grouped_features) if (self.pooling == 'max'): new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)]) elif (self.pooling == 'avg'): new_features = F.avg_pool2d(new_features, kernel_size=[1, new_features.size(3)]) elif (self.pooling == 'rbf'): rbf = torch.exp(((((- 1) * grouped_xyz.pow(2).sum(1, keepdim=False)) / (self.sigma ** 2)) / 2)) new_features = (torch.sum((new_features * rbf.unsqueeze(1)), (- 1), keepdim=True) / float(self.nsample)) new_features = new_features.squeeze((- 1)) if (not self.ret_unique_cnt): return new_features
class QlColoredFormatter(QlBaseFormatter): __level_color = {'WARNING': COLOR.YELLOW, 'INFO': COLOR.BLUE, 'DEBUG': COLOR.MAGENTA, 'CRITICAL': COLOR.CRIMSON, 'ERROR': COLOR.RED} def get_level_tag(self, level: str) -> str: s = super().get_level_tag(level) return f'{self.__level_color[level]}{s}{COLOR.DEFAULT}' def get_thread_tag(self, tid: str) -> str: s = super().get_thread_tag(tid) return f'{COLOR.GREEN}{s}{COLOR.DEFAULT}'