function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def _namedtuple(cls): return namedtuple( ''.join((str.capitalize(cls.label), "Rev")), (f[0] for f in cls.fields))
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def status(self): """ :returns: str """ pass
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def diff(self): """ :returns: str """ pass
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def branch(self): """ :returns: str """ pass
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def last_commit(self): return self.log_iter(maxentries=1).next()
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def itersplit_to_fields(self, _str): if self.preparse: _str = self.preparse(_str) _fields = itersplit(_str, self.fsep) try: values = ( t[1] for t in izip_longest(self._tuple._fields, _fields)) return self._tuple(*values) except: log.error(self._tuple) log.error(_fields) raise
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def log_iter(self, maxentries=None, template=None, **kwargs): # op = self.sh(( # "hg log %s --template" # % (maxentries and ('-l%d' % maxentries) or '')), # ignore_error=True # ) template = repr(template or self.template) op = self.log(n=maxentries, template=template, **kwargs) if not op: return print(op) for l in itersplit(op, self.lsep): l = l.strip() if not l: continue try: yield self._parselog(l,) except Exception: log.error("%s %r" % (str(self), l)) raise return
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def full_report(self): yield '' yield "# %s" % self.origin_report().next() yield "%s [%s]" % (self.last_commit, self) if self.status: for l in self.status.split('\n'): yield l yield '' if hasattr(self, 'log_iter'): for r in self.log_iter(): yield r return
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def eggname(self): return os.path.basename(self.fpath)
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def to_normal_url(cls, url): return url
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def sh_report(self): output = [] if not self.remote_url: output.append('#') output.extend([ self.label, self.clone_cmd, repr(self.remote_url), # TODO: shell quote? repr(self.relpath) ]) yield ' '.join(output)
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def origin_report(self): yield "%s://%s = %s" % ( self.label, self.fpath, self.remote_url, # revid ) return
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def hgsub_report(self): if self.relpath == '.': return yield "%s = [%s]%s" % ( self.fpath.lstrip('./'), self.label, self.remote_url)
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def __unicode__(self): return '%s://%s' % (self.label, self.fpath)
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def mtime(self, fpath=None): return dtformat( datetime.datetime.utcfromtimestamp( os.path.getmtime(fpath or self.fpath)))
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def ctime(self, fpath=None): return dtformat( datetime.datetime.utcfromtimestamp( os.path.getctime(fpath or self.fpath)))
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def find_symlinks(self): cmd = ("find . -type l -printf '%p -> %l\n'") return self.sh(cmd)
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def sh(self, cmd, ignore_error=False, cwd=None, *args, **kwargs): kwargs.update({ 'shell': True, 'cwd': cwd or self.fpath, 'stderr': subprocess.STDOUT, 'stdout': subprocess.PIPE}) log.debug('cmd: %s %s' % (cmd, kwargs)) return sh(cmd, ignore_error=ignore_error, **kwargs) # p = subprocess.Popen(cmd, **kwargs) # p_stdout = p.communicate()[0] # if p.returncode and not ignore_error: # raise Exception("Subprocess return code: %d\n%r\n%r" % ( # p.returncode, cmd, p_stdout)) # return p_stdout #.rstrip()
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def unique_id(self): return self.fpath # self.sh('hg id -r 0').rstrip()
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def status(self): return self.sh('hg status').rstrip()
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def remote_url(self): return self.sh('hg showconfig paths.default', ignore_error=True).strip()
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def remote_urls(self): return self.sh('hg showconfig paths')
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def diff(self): return self.sh('hg diff -g')
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def current_id(self): return self.sh('hg id -i').rstrip().rstrip('+') # TODO
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def branch(self): return self.sh('hg branch')
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def loggraph(self): return self.sh('hg log --graph')
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def serve(self): return self.sh('hg serve')
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def _get_url_scheme_regexes(): output = sh("hg showconfig | grep '^schemes.'").split('\n') log.debug(output) schemes = ( l.split('.', 1)[1].split('=') for l in output if '=' in l) regexes = sorted( ((k, v, re.compile(v.replace('{1}', '(.*)')+'(.*)')) for k, v in schemes), key=lambda x: (len(x[0]), x), reverse=True) return regexes
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def to_hg_scheme_url(cls, url): """ convert a URL to local mercurial URL schemes example:: # schemes.gh = git://github.com/ >> remote_url = git://github.com/westurner/dotfiles' >> to_hg_scheme_url(remote_url) << gh://westurner/dotfiles """ regexes = cls._get_url_scheme_regexes() for scheme_key, pattern, regex in regexes: match = regex.match(url) if match is not None: groups = match.groups() if len(groups) == 2: return u''.join( scheme_key, '://', pattern.replace('{1}', groups[0]), groups[1]) elif len(groups) == 1: return u''.join( scheme_key, '://', pattern, groups[0])
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def to_normal_url(cls, url): """ convert a URL from local mercurial URL schemes to "normal" URLS example:: # schemes.gh = git://github.com/ # remote_url = "gh://westurner/dotfiles" >> to_normal_url(remote_url) << 'git://github.com/westurner/dotfiles' """ regexes = cls._get_url_scheme_regexes() _url = url[:] for scheme_key, pattern, regex in regexes: if _url.startswith(scheme_key): if '{1}' in pattern: _url = pattern.replace('{1}', _url.lstrip(scheme_key)) else: _url = (pattern + _url.lstrip(scheme_key).lstrip('://')) return _url
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def unique_id(self): return self.fpath
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def status(self): return self.sh('git status -s')
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def remote_url(self): return self.sh('git config remote.origin.url', ignore_error=True).strip() # .split('=',1)[1]# *
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def remote_urls(self): return self.sh('git config -l | grep "url"', ignore_error=True).strip() # .split('=',1)[1]# *
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def current_id(self): return self.sh('git rev-parse --short HEAD').rstrip()
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def branch(self): return self.sh('git branch')
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def loggraph(self): return self.sh('git log --graph')
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def last_commit(self): return self.log_iter(maxentries=1).next()
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def unpushed(self): return self.sh("git log master --not --remotes='*/master'")
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def unique_id(self): return self.fpath
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def status(self): return self.sh('bzr status')
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def remote_url(self): return self.sh( """bzr info | egrep '^ parent branch:' | awk '{ print $3 }'""", ignore_error=True)
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def current_id(self): return self.sh("bzr version-info --custom --template='{revision_id}'")
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def branch(self): return self.sh('bzr nick')
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def _logmessage_transform(cls, s, by=2): if len(s) >= by: return s[by:].strip('\n') return s.strip('\n')
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def _parselog(self, r): """ Parse bazaar log file format :: $ bzr log -l1 ------------------------------------------------------------ revno: 1 committer: ubuntu <ubuntu@ubuntu-desktop> branch nick: ubuntu-desktop /etc repository timestamp: Wed 2011-10-12 01:16:55 -0500 message: Initial commit """ def __parselog(entry): bufname = None buf = deque() print(entry) if entry == ['']: return for l in itersplit(entry, '\n'): if not l: continue mobj = self.logrgx.match(l) if not mobj: # " - Log message" buf.append(self._logmessage_transform(l)) if mobj: mobjlen = len(mobj.groups()) if mobjlen == 2: # "attr: value" attr, value = mobj.groups() if attr == 'message': bufname = 'desc' else: attr = self.field_trans.get(attr, attr) yield (self.field_trans.get(attr, attr), value) else: raise Exception() if bufname is not None: if len(buf): buf.pop() len(buf) > 1 and buf.popleft() yield (bufname, '\n'.join(buf)) return kwargs = dict(__parselog(r)) # FIXME if kwargs: if 'tags' not in kwargs: kwargs['tags'] = tuple() else: kwargs['tags'].split(' ') # TODO if 'branchnick' not in kwargs: kwargs['branchnick'] = None try: yield kwargs # TODO # return self._tuple(**kwargs) except: log.error(r) log.error(kwargs) raise else: log.error("failed to parse: %r" % r)
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def unique_id(self): cmdo = self.sh('svn info | grep "^Repository UUID"', ignore_error=True) if cmdo: return cmdo.split(': ', 1)[1].rstrip() return None
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def status(self): return self.sh('svn status')
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def remote_url(self): return ( self.sh('svn info | grep "^Repository Root:"') .split(': ', 1)[1]).strip()
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def current_id(self): # from xml.etree import ElementTree as ET # info = ET.fromstringlist(self.sh('svn info --xml')) # return info.find('entry').get('revision') return ( self.sh('svn info | grep "^Revision: "') .split(': ', 1)[1].strip())
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def _last_commit(self): """ ::
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def __last_commit(self): """ $ svn info Path: . URL: http://python-dlp.googlecode.com/svn/trunk/layercake-python Repository Root: http://python-dlp.googlecode.com/svn Repository UUID: d0ad5f6e-b329-0410-b51c-492c9c4f233d Revision: 378 Node Kind: directory Schedule: normal Last Changed Author: chimezie Last Changed Rev: 378 Last Changed Date: 2011-05-01 01:31:38 -0500 (Sun, 01 May 2011) """ op = self.sh("svn info") if not op: return None author, rev, datestr = op.split('\n')[7:10] author = author.split(': ', 1)[1].strip() rev = rev.split(': ', 1)[1].strip() datestr = datestr.split(': ', 1)[1].split('(', 1)[0].strip() return datestr, (rev, author, None, None)
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def last_commit(self): return self.log_iter().next()
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def search_upwards(self, fpath=None, repodirname='.svn', upwards={}): """ Traverse filesystem upwards, searching for .svn directories with matching UUIDs repo/.svn repo/dir1/.svn repo/dir1/dir2/.svn >> search_upwards('repo/') << 'repo/' >> search_upwards('repo/dir1') << 'repo/' >> search_upwards('repo/dir1/dir2') << 'repo/' repo/.svn repo/dirA/ repo/dirA/dirB/.svn >> search_upwards('repo/dirA') << 'repo/' >> search_upwards('repo/dirA/dirB') >> 'repo/dirB') """ fpath = fpath or self.fpath uuid = self.unique_id last_path = self path_comp = fpath.split(os.path.sep) # [0:-1], [0:-2], [0:-1*len(path_comp)] for n in xrange(1, len(path_comp)-1): checkpath = os.path.join(*path_comp[0:-1 * n]) repodir = os.path.join(checkpath, repodirname) upw_uuid = upwards.get(repodir) if upw_uuid: if upw_uuid == uuid: last_path = SvnRepository(checkpath) continue else: break elif os.path.exists(repodir): repo = SvnRepository(checkpath) upw_uuid = repo.unique_id upwards[repodir] = upw_uuid # TODO: match on REVISION too if upw_uuid == uuid: last_path = repo continue else: break return last_path
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def listdir_find_repos(where): stack = deque([(convert_path(where), '')]) while stack: where, prefix = stack.pop() try: for name in sorted(os.listdir(where), reverse=True): fn = os.path.join(where, name) if os.path.isdir(fn): if name in REPO_PREFIXES: # yield name[1:], fn.rstrip(name)[:-1] # abspath repo = REPO_PREFIXES[name](fn.rstrip(name)[:-1]) yield repo stack.append((fn, prefix + name + '/')) except OSError as e: if e.errno == errno.EACCES: log.error("Skipping: %s", e) else: raise
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def find_unique_repos(where): repos = Dict() path_uuids = Dict() log.debug("find_unique_repos(%r)" % where) for repo in find_find_repos(where): # log.debug(repo) repo2 = (hasattr(repo, 'search_upwards') and repo.search_upwards(upwards=path_uuids)) if repo2: if repo2 == repo: continue else: repo = repo2 if (repo.fpath not in repos): log.debug("%s | %s | %s" % (repo.prefix, repo.fpath, repo.unique_id)) repos[repo.fpath] = repo yield repo
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def do_repo_report(repos, report='full', output=sys.stdout, *args, **kwargs): for i, repo in enumerate(repos): log.debug(str((i, repo.origin_report().next()))) try: if repo is not None: reportfunc = REPORT_TYPES.get(report) if reportfunc is None: raise Exception("Unrecognized report type: %r (%s)" % (report, ', '.join(REPORT_TYPES.keys()))) for l in reportfunc(repo, *args, **kwargs): print(l, file=output) except Exception as e: log.error(repo) log.error(report) log.error(e) raise yield repo
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def fullname_to_shortname(fullname): shortname = fullname.replace(os.environ['HOME'], '~') shortname = shortname.lstrip('./') return shortname
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def main(): """ mainfunc """ import optparse import logging prs = optparse.OptionParser(usage="./") prs.add_option('-s', '--scan', dest='scan', action='append', default=[], help='Path(s) to scan for repositories') prs.add_option('-r', '--report', dest='reports', action='append', default=[], help='pip || full || status || hgsub || thg') prs.add_option('--thg', dest='thg_report', action='store_true', help='Write a thg-reporegistry.xml file to stdout') prs.add_option('--template', dest='report_template', action='store', help='Report template') prs.add_option('-v', '--verbose', dest='verbose', action='store_true',) prs.add_option('-q', '--quiet', dest='quiet', action='store_true',) (opts, args) = prs.parse_args() if not opts.quiet: _format = None _format = "%(levelname)s\t%(message)s" # _format = "%(message)s" logging.basicConfig(format=_format) log = logging.getLogger('repos') if opts.verbose: log.setLevel(logging.DEBUG) elif opts.quiet: log.setLevel(logging.ERROR) else: log.setLevel(logging.INFO) if not opts.scan: opts.scan = ['.'] if opts.scan: # if not opts.reports: # opts.reports = ['pip'] if opts.reports or opts.thg_report: opts.reports = [s.strip().lower() for s in opts.reports] if 'thg' in opts.reports: opts.thg_report = True opts.reports.remove('thg') # repos = [] # for _path in opts.scan: # repos.extend(find_unique_repos(_path)) log.debug("SCANNING PATHS: %s" % opts.scan) repos = chain(*imap(find_unique_repos, opts.scan)) if opts.reports and opts.thg_report: repos = list(repos) # TODO: tee if opts.reports: for report in opts.reports: list(do_repo_report(repos, report=report)) if opts.thg_report: import sys do_tortoisehg_report(repos, output=sys.stdout) else: opts.scan = '.' list(do_repo_report( find_unique_repos(opts.scan), report='sh'))
westurner/pkgsetcomp
[ 1, 1, 1, 1, 1400665453 ]
def __init__( self, resource_type: str, resource_names: List[str], producer: Callable, dependencies: List[str],
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def type(self) -> str: """The type of resource produced by this resource group's producer. Must be one of `RESOURCE_TYPES`. """ return self._resource_type
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def names(self) -> List[str]: """The long names (including type) of all resources in this group.""" return [f"{self._resource_type}.{name}" for name in self._resource_names]
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def producer(self) -> Any: """The method or object that produces this group of resources.""" return self._producer
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def dependencies(self) -> List[str]: """The long names (including type) of dependencies for this group.""" return self._dependencies
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def __repr__(self) -> str: resources = ", ".join(self) return f"ResourceProducer({resources})"
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def __init__(self): # This will be a dict with string keys representing the the resource # and the resource group they belong to. This is a one to many mapping # as some resource groups contain many resources. self._resource_group_map = {} # null producers are those that don't produce any resources externally # but still consume other resources (i.e., have dependencies) - these # are only pop initializers as of 9/26/2019. Tracker is here to assign # them unique ids. self._null_producer_count = 0 # Attribute used for lazy (but cached) graph initialization. self._graph = None # Attribute used for lazy (but cached) graph topo sort. self._sorted_nodes = None
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def name(self) -> str: """The name of this manager.""" return "resource_manager"
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def graph(self) -> nx.DiGraph: """The networkx graph representation of the resource pool.""" if self._graph is None: self._graph = self._to_graph() return self._graph
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def sorted_nodes(self): """Returns a topological sort of the resource graph. Notes ----- Topological sorts are not stable. Be wary of depending on order where you shouldn't. """ if self._sorted_nodes is None: try: self._sorted_nodes = list(nx.algorithms.topological_sort(self.graph)) except nx.NetworkXUnfeasible: raise ResourceError( f"The resource pool contains at least one cycle: " f"{nx.find_cycle(self.graph)}." ) return self._sorted_nodes
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def _get_resource_group( self, resource_type: str, resource_names: List[str], producer: MethodType, dependencies: List[str],
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def _to_graph(self) -> nx.DiGraph: """Constructs the full resource graph from information in the groups. Components specify local dependency information during setup time. When the resources are required at population creation time, the graph is generated as all resources must be registered at that point. Notes ----- We are taking advantage of lazy initialization to sneak this in between post setup time when the :class:`values manager <vivarium.framework.values.ValuesManager>` finalizes pipeline dependencies and population creation time. """ resource_graph = nx.DiGraph() # networkx ignores duplicates resource_graph.add_nodes_from(self._resource_group_map.values()) for resource_group in resource_graph.nodes: for dependency in resource_group.dependencies: if dependency not in self._resource_group_map: # Warn here because this sometimes happens naturally # if observer components are missing from a simulation. logger.warning( f"Resource {dependency} is not provided by any component but is needed to " f"compute {resource_group}." ) continue dependency_group = self._resource_group_map[dependency] resource_graph.add_edge(dependency_group, resource_group) return resource_graph
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def __repr__(self): out = {} for resource_group in set(self._resource_group_map.values()): produced = ", ".join(resource_group) out[produced] = ", ".join(resource_group.dependencies) return "\n".join([f"{produced} : {depends}" for produced, depends in out.items()])
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def __init__(self, manager: ResourceManager): self._manager = manager
ihmeuw/vivarium
[ 40, 10, 40, 6, 1499718083 ]
def to_caffe(tfW, name=None, shape=None, color_layer='', conv_fc_transitionals=None, info=DummyDict()): assert conv_fc_transitionals is None or name is not None if tfW.ndim == 4: if (name == 'conv1_1' or name == 'conv1' or name == color_layer) and tfW.shape[2] == 3: tfW = tfW[:, :, ::-1] info[name] = 'flipped' cfW = tfW.transpose(3, 2, 0, 1) return cfW else: if conv_fc_transitionals is not None and name in conv_fc_transitionals: cf_shape = conv_fc_transitionals[name] tf_shape = (cf_shape[2], cf_shape[3], cf_shape[1], cf_shape[0]) cfW = tfW.reshape(tf_shape).transpose(3, 2, 0, 1).reshape(cf_shape[0], -1) info[name] = 'fc->c transitioned with caffe shape {}'.format(cf_shape) return cfW else: return tfW.T
gustavla/self-supervision
[ 29, 5, 29, 7, 1492017767 ]
def load_caffemodel(path, session, prefix='', ignore=set(), conv_fc_transitionals=None, renamed_layers=DummyDict(), color_layer='', verbose=False, pre_adjust_batch_norm=False): import tensorflow as tf def find_weights(name, which='weights'): for tw in tf.trainable_variables(): if tw.name.split(':')[0] == name + '/' + which: return tw return None """ def find_batch_norm(name, which='mean'): for tw in tf.all_variables(): if tw.name.endswith(name + '/bn_' + which + ':0'): return tw return None """ data = dd.io.load(path, '/data') assigns = [] loaded = [] info = {} for key in data: local_key = prefix + renamed_layers.get(key, key) if key not in ignore: bn_name = 'batch_' + key if '0' in data[key]: weights = find_weights(local_key, 'weights') if weights is not None: W = from_caffe(data[key]['0'], name=key, info=info, conv_fc_transitionals=conv_fc_transitionals, color_layer=color_layer) if W.ndim != weights.get_shape().as_list(): W = W.reshape(weights.get_shape().as_list()) init_str = '' if pre_adjust_batch_norm and bn_name in data: bn_data = data[bn_name] sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2']) W /= sigma init_str += ' batch-adjusted' assigns.append(weights.assign(W)) loaded.append('{}:0 -> {}:weights{} {}'.format(key, local_key, init_str, info.get(key, ''))) if '1' in data[key]: biases = find_weights(local_key, 'biases') if biases is not None: bias = data[key]['1'] init_str = '' if pre_adjust_batch_norm and bn_name in data: bn_data = data[bn_name] sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2']) mu = bn_data['0'] / bn_data['2'] bias = (bias - mu) / sigma init_str += ' batch-adjusted' assigns.append(biases.assign(bias)) loaded.append('{}:1 -> {}:biases{}'.format(key, local_key, init_str)) # Check batch norm and load them (unless they have been folded into) #if not pre_adjust_batch_norm: session.run(assigns) if verbose: tprint('Loaded model from', path) for l in loaded: tprint('-', l) return loaded
gustavla/self-supervision
[ 29, 5, 29, 7, 1492017767 ]
def find_weights(name, which='weights'): for tw in tf.trainable_variables(): if lax_naming: ok = tw.name.split(':')[0].endswith(name + '/' + which) else: ok = tw.name.split(':')[0] == name + '/' + which if ok: return tw return None
gustavla/self-supervision
[ 29, 5, 29, 7, 1492017767 ]
def setUp(self): self.report = frappe.get_doc({ "doctype": "Report", "name": "Permitted Documents For User" }) self.filters = { "user": "Administrator", "doctype": "Role" } self.prepared_report_doc = frappe.get_doc({ "doctype": "Prepared Report", "report_name": self.report.name, "filters": json.dumps(self.filters), "ref_report_doctype": self.report.name }).insert()
frappe/frappe
[ 4495, 2418, 4495, 1493, 1307520856 ]
def data_path(*args): """ Returns a path to dev data """ return os.path.join(DEV_DATA_PATH, *args)
kmike/DAWG-Python
[ 49, 12, 49, 4, 1348177105 ]
def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [bitcoind_electrum_args()]
BitcoinUnlimited/BitcoinUnlimited
[ 451, 230, 451, 63, 1397077380 ]
def setup_network(self, dummy = None): self.nodes = self.setup_nodes()
BitcoinUnlimited/BitcoinUnlimited
[ 451, 230, 451, 63, 1397077380 ]
def Wait(): x64dbg.Wait()
x64dbg/x64dbgpy
[ 1423, 71, 1423, 28, 1436823637 ]
def Stop(): x64dbg.Stop()
x64dbg/x64dbgpy
[ 1423, 71, 1423, 28, 1436823637 ]
def StepOver(): x64dbg.StepOver()
x64dbg/x64dbgpy
[ 1423, 71, 1423, 28, 1436823637 ]
def SetBreakpoint(address): return x64dbg.SetBreakpoint(address)
x64dbg/x64dbgpy
[ 1423, 71, 1423, 28, 1436823637 ]
def SetHardwareBreakpoint(address, type = HardwareType.HardwareExecute): return x64dbg.SetHardwareBreakpoint(address, type)
x64dbg/x64dbgpy
[ 1423, 71, 1423, 28, 1436823637 ]
def test_effects_layer_empty_wr(kls): check_write_read(kls())
kmike/psd-tools
[ 888, 176, 888, 46, 1350188801 ]
def __init__(self): super().__init__() self.block_receive_map = defaultdict(int)
nlgcoin/guldencoin-official
[ 136, 50, 136, 34, 1439462804 ]
def on_block(self, message): message.block.calc_sha256() self.block_receive_map[message.block.sha256] += 1
nlgcoin/guldencoin-official
[ 136, 50, 136, 34, 1439462804 ]
def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [["-maxuploadtarget=800"]] # Cache for utxos, as the listunspent may take a long time later in the test self.utxo_cache = []
nlgcoin/guldencoin-official
[ 136, 50, 136, 34, 1439462804 ]
def run_test(self): # Before we connect anything, we first set the time on the node # to be in the past, otherwise things break because the CNode # time counters can't be reset backward after initialization old_time = int(time.time() - 2*60*60*24*7) self.nodes[0].setmocktime(old_time) # Generate some old blocks self.nodes[0].generate(130) # p2p_conns[0] will only request old blocks # p2p_conns[1] will only request new blocks # p2p_conns[2] will test resetting the counters p2p_conns = [] for _ in range(3): p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn())) # Now mine a big block mine_large_block(self.nodes[0], self.utxo_cache) # Store the hash; we'll request this later big_old_block = self.nodes[0].getbestblockhash() old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] big_old_block = int(big_old_block, 16) # Advance to two days ago self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24) # Mine one more block, so that the prior block looks old mine_large_block(self.nodes[0], self.utxo_cache) # We'll be requesting this new block too big_new_block = self.nodes[0].getbestblockhash() big_new_block = int(big_new_block, 16) # p2p_conns[0] will test what happens if we just keep requesting the # the same big old block too many times (expect: disconnect) getdata_request = msg_getdata() getdata_request.inv.append(CInv(2, big_old_block)) max_bytes_per_day = 800*1024*1024 daily_buffer = 144 * 4000000 max_bytes_available = max_bytes_per_day - daily_buffer success_count = max_bytes_available // old_block_size # 576MB will be reserved for relaying new blocks, so expect this to # succeed for ~235 tries. for i in range(success_count): p2p_conns[0].send_message(getdata_request) p2p_conns[0].sync_with_ping() assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1) assert_equal(len(self.nodes[0].getpeerinfo()), 3) # At most a couple more tries should succeed (depending on how long # the test has been running so far). for i in range(3): p2p_conns[0].send_message(getdata_request) p2p_conns[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) self.log.info("Peer 0 disconnected after downloading old block too many times") # Requesting the current block on p2p_conns[1] should succeed indefinitely, # even when over the max upload target. # We'll try 800 times getdata_request.inv = [CInv(2, big_new_block)] for i in range(800): p2p_conns[1].send_message(getdata_request) p2p_conns[1].sync_with_ping() assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1) self.log.info("Peer 1 able to repeatedly download new block") # But if p2p_conns[1] tries for an old block, it gets disconnected too. getdata_request.inv = [CInv(2, big_old_block)] p2p_conns[1].send_message(getdata_request) p2p_conns[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 1) self.log.info("Peer 1 disconnected after trying to download old block") self.log.info("Advancing system time on node to clear counters...") # If we advance the time by 24 hours, then the counters should reset, # and p2p_conns[2] should be able to retrieve the old block. self.nodes[0].setmocktime(int(time.time())) p2p_conns[2].sync_with_ping() p2p_conns[2].send_message(getdata_request) p2p_conns[2].sync_with_ping() assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1) self.log.info("Peer 2 able to download old block") self.nodes[0].disconnect_p2ps() #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 self.log.info("Restarting nodes with -whitelist=127.0.0.1") self.stop_node(0) self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1"]) # Reconnect to self.nodes[0] self.nodes[0].add_p2p_connection(TestP2PConn()) #retrieve 20 blocks which should be enough to break the 1MB limit getdata_request.inv = [CInv(2, big_new_block)] for i in range(20): self.nodes[0].p2p.send_message(getdata_request) self.nodes[0].p2p.sync_with_ping() assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1) getdata_request.inv = [CInv(2, big_old_block)] self.nodes[0].p2p.send_and_ping(getdata_request) assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist self.log.info("Peer still connected after trying to download old block (whitelisted)")
nlgcoin/guldencoin-official
[ 136, 50, 136, 34, 1439462804 ]
def make_client(self, name, num_blocks, num_bits): return Client('riak', name, num_blocks, num_bits)
seomoz/simhash-db-py
[ 62, 22, 62, 4, 1350068054 ]
def create(kernel): result = Tangible() result.template = "object/tangible/ship/crafted/weapon/shared_quick_shot_upgrade_mk4.iff" result.attribute_template_id = 8 result.stfName("space_crafting_n","quick_shot_upgrade_mk4")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def _pool_for_credentials(user, password, repo_base, create_if_missing=True): pool_key = PoolKey(user, password, repo_base) # Create a new pool if one doesn't exist or if the existing one has been # closed. Normally a pool should only be closed during testing, to force # all hanging connections to a database to be closed. if pool_key not in connection_pools or connection_pools[pool_key].closed: if create_if_missing is False: return None # Maintains at least 1 connection. # Raises "PoolError: connection pool exausted" if a thread tries # holding onto than 10 connections to a single database. connection_pools[pool_key] = ThreadedConnectionPool( 0, 10, user=user, password=password, host=HOST, port=PORT, database=repo_base) return connection_pools[pool_key]
datahuborg/datahub
[ 210, 60, 210, 43, 1380229308 ]
def _convert_pg_exception(e): # Convert some psycopg2 errors into exceptions meaningful to # Django. if (e.pgcode == errorcodes.INSUFFICIENT_PRIVILEGE): raise PermissionDenied() if (e.pgcode == errorcodes.INVALID_PARAMETER_VALUE or e.pgcode == errorcodes.UNDEFINED_OBJECT): raise ValueError("Invalid parameter in query.") if e.pgcode == errorcodes.INVALID_SCHEMA_NAME: error = ('Repo not found. ' 'You must specify a repo in your query. ' 'i.e. select * from REPO_NAME.TABLE_NAME. ') raise LookupError(error) if e.pgcode == errorcodes.UNDEFINED_TABLE: raise LookupError("Table or view not found.") if e.pgcode == errorcodes.DUPLICATE_SCHEMA: raise ValueError("A repo with that name already exists.") if e.pgcode == errorcodes.DUPLICATE_TABLE: raise ValueError("A table with that name already exists.") raise e
datahuborg/datahub
[ 210, 60, 210, 43, 1380229308 ]
def __init__(self, user, password, host=HOST, port=PORT, repo_base=None): self.user = user self.password = password self.host = host self.port = port self.repo_base = repo_base self.connection = None # row level security is enabled unless the user is a superuser self.row_level_security = bool( user != settings.DATABASES['default']['USER']) # We only need a query rewriter if RLS is enabled if self.row_level_security: self.query_rewriter = core.db.query_rewriter.SQLQueryRewriter( self.repo_base, self.user) self.__open_connection__()
datahuborg/datahub
[ 210, 60, 210, 43, 1380229308 ]
def __open_connection__(self): pool = _pool_for_credentials(self.user, self.password, self.repo_base) self.connection = pool.getconn() self.connection.set_isolation_level( psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
datahuborg/datahub
[ 210, 60, 210, 43, 1380229308 ]
def close_connection(self): pool = _pool_for_credentials(self.user, self.password, self.repo_base, create_if_missing=False) if self.connection and pool and not pool.closed: pool.putconn(self.connection, close=True) self.connection = None
datahuborg/datahub
[ 210, 60, 210, 43, 1380229308 ]
def _validate_table_name(self, noun): """ Raises ValueError if the proposed table name is invalid. Valid table names contain only alphanumeric characters and underscores. """ invalid_noun_msg = ( "Table names may only contain " "alphanumeric characters and underscores, must begin with a " "letter, and must not begin or end with an underscore." ) regex = r'^(?![\d])[\w\_]+(?<![\_])$' valid_pattern = re.compile(regex) matches = valid_pattern.match(noun) if matches is None: raise ValueError(invalid_noun_msg)
datahuborg/datahub
[ 210, 60, 210, 43, 1380229308 ]
def list_repos(self): query = ('SELECT schema_name AS repo_name ' 'FROM information_schema.schemata ' 'WHERE schema_owner != %s') params = (settings.DATABASES['default']['USER'],) res = self.execute_sql(query, params) return [t[0] for t in res['tuples']]
datahuborg/datahub
[ 210, 60, 210, 43, 1380229308 ]
def delete_repo(self, repo, force=False): """Deletes a repo and the folder the user's repo files are in.""" self._check_for_injections(repo) # drop the schema query = 'DROP SCHEMA %s %s' params = (AsIs(repo), AsIs('CASCADE') if force is True else AsIs('')) res = self.execute_sql(query, params) return res['status']
datahuborg/datahub
[ 210, 60, 210, 43, 1380229308 ]