repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_sync_zonefile_inventory | def atlas_peer_sync_zonefile_inventory( my_hostport, peer_hostport, maxlen, timeout=None, peer_table=None ):
"""
Synchronize our knowledge of a peer's zonefiles up to a given byte length
NOT THREAD SAFE; CALL FROM ONLY ONE THREAD.
maxlen is the maximum length in bits of the expected zonefile.
Return the new inv vector if we synced it (updating the peer table in the process)
Return None if not
"""
if timeout is None:
timeout = atlas_inv_timeout()
peer_inv = ""
bit_offset = None
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return None
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
bit_offset = (len(peer_inv) - 1) * 8 # i.e. re-obtain the last byte
if bit_offset < 0:
bit_offset = 0
else:
peer_inv = peer_inv[:-1]
peer_inv = atlas_peer_download_zonefile_inventory( my_hostport, peer_hostport, maxlen, bit_offset=bit_offset, timeout=timeout, peer_table=peer_table )
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
log.debug("%s no longer a peer" % peer_hostport)
return None
inv_str = atlas_inventory_to_string(peer_inv)
if len(inv_str) > 40:
inv_str = inv_str[:40] + "..."
log.debug("Set zonefile inventory %s: %s" % (peer_hostport, inv_str))
atlas_peer_set_zonefile_inventory( peer_hostport, peer_inv, peer_table=ptbl ) # NOTE: may have trailing 0's for padding
return peer_inv | python | def atlas_peer_sync_zonefile_inventory( my_hostport, peer_hostport, maxlen, timeout=None, peer_table=None ):
"""
Synchronize our knowledge of a peer's zonefiles up to a given byte length
NOT THREAD SAFE; CALL FROM ONLY ONE THREAD.
maxlen is the maximum length in bits of the expected zonefile.
Return the new inv vector if we synced it (updating the peer table in the process)
Return None if not
"""
if timeout is None:
timeout = atlas_inv_timeout()
peer_inv = ""
bit_offset = None
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return None
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
bit_offset = (len(peer_inv) - 1) * 8 # i.e. re-obtain the last byte
if bit_offset < 0:
bit_offset = 0
else:
peer_inv = peer_inv[:-1]
peer_inv = atlas_peer_download_zonefile_inventory( my_hostport, peer_hostport, maxlen, bit_offset=bit_offset, timeout=timeout, peer_table=peer_table )
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
log.debug("%s no longer a peer" % peer_hostport)
return None
inv_str = atlas_inventory_to_string(peer_inv)
if len(inv_str) > 40:
inv_str = inv_str[:40] + "..."
log.debug("Set zonefile inventory %s: %s" % (peer_hostport, inv_str))
atlas_peer_set_zonefile_inventory( peer_hostport, peer_inv, peer_table=ptbl ) # NOTE: may have trailing 0's for padding
return peer_inv | [
"def",
"atlas_peer_sync_zonefile_inventory",
"(",
"my_hostport",
",",
"peer_hostport",
",",
"maxlen",
",",
"timeout",
"=",
"None",
",",
"peer_table",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"None",
":",
"timeout",
"=",
"atlas_inv_timeout",
"(",
")",
"pee... | Synchronize our knowledge of a peer's zonefiles up to a given byte length
NOT THREAD SAFE; CALL FROM ONLY ONE THREAD.
maxlen is the maximum length in bits of the expected zonefile.
Return the new inv vector if we synced it (updating the peer table in the process)
Return None if not | [
"Synchronize",
"our",
"knowledge",
"of",
"a",
"peer",
"s",
"zonefiles",
"up",
"to",
"a",
"given",
"byte",
"length",
"NOT",
"THREAD",
"SAFE",
";",
"CALL",
"FROM",
"ONLY",
"ONE",
"THREAD",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2031-L2074 | train | 225,300 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_refresh_zonefile_inventory | def atlas_peer_refresh_zonefile_inventory( my_hostport, peer_hostport, byte_offset, timeout=None, peer_table=None, con=None, path=None, local_inv=None ):
"""
Refresh a peer's zonefile recent inventory vector entries,
by removing every bit after byte_offset and re-synchronizing them.
The intuition here is that recent zonefiles are much rarer than older
zonefiles (which will have been near-100% replicated), meaning the tail
of the peer's zonefile inventory is a lot less stable than the head (since
peers will be actively distributing recent zonefiles).
NOT THREAD SAFE; CALL FROM ONLY ONE THREAD.
Return True if we synced all the way up to the expected inventory length, and update the refresh time in the peer table.
Return False if not.
"""
if timeout is None:
timeout = atlas_inv_timeout()
if local_inv is None:
# get local zonefile inv
inv_len = atlasdb_zonefile_inv_length( con=con, path=path )
local_inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path )
maxlen = len(local_inv)
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
# reset the peer's zonefile inventory, back to offset
cur_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
atlas_peer_set_zonefile_inventory( peer_hostport, cur_inv[:byte_offset], peer_table=ptbl )
inv = atlas_peer_sync_zonefile_inventory( my_hostport, peer_hostport, maxlen, timeout=timeout, peer_table=peer_table )
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
# Update refresh time (even if we fail)
ptbl[peer_hostport]['zonefile_inventory_last_refresh'] = time_now()
if inv is not None:
inv_str = atlas_inventory_to_string(inv)
if len(inv_str) > 40:
inv_str = inv_str[:40] + "..."
log.debug("%s: inventory of %s is now '%s'" % (my_hostport, peer_hostport, inv_str))
if inv is None:
return False
else:
return True | python | def atlas_peer_refresh_zonefile_inventory( my_hostport, peer_hostport, byte_offset, timeout=None, peer_table=None, con=None, path=None, local_inv=None ):
"""
Refresh a peer's zonefile recent inventory vector entries,
by removing every bit after byte_offset and re-synchronizing them.
The intuition here is that recent zonefiles are much rarer than older
zonefiles (which will have been near-100% replicated), meaning the tail
of the peer's zonefile inventory is a lot less stable than the head (since
peers will be actively distributing recent zonefiles).
NOT THREAD SAFE; CALL FROM ONLY ONE THREAD.
Return True if we synced all the way up to the expected inventory length, and update the refresh time in the peer table.
Return False if not.
"""
if timeout is None:
timeout = atlas_inv_timeout()
if local_inv is None:
# get local zonefile inv
inv_len = atlasdb_zonefile_inv_length( con=con, path=path )
local_inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path )
maxlen = len(local_inv)
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
# reset the peer's zonefile inventory, back to offset
cur_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
atlas_peer_set_zonefile_inventory( peer_hostport, cur_inv[:byte_offset], peer_table=ptbl )
inv = atlas_peer_sync_zonefile_inventory( my_hostport, peer_hostport, maxlen, timeout=timeout, peer_table=peer_table )
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
# Update refresh time (even if we fail)
ptbl[peer_hostport]['zonefile_inventory_last_refresh'] = time_now()
if inv is not None:
inv_str = atlas_inventory_to_string(inv)
if len(inv_str) > 40:
inv_str = inv_str[:40] + "..."
log.debug("%s: inventory of %s is now '%s'" % (my_hostport, peer_hostport, inv_str))
if inv is None:
return False
else:
return True | [
"def",
"atlas_peer_refresh_zonefile_inventory",
"(",
"my_hostport",
",",
"peer_hostport",
",",
"byte_offset",
",",
"timeout",
"=",
"None",
",",
"peer_table",
"=",
"None",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
",",
"local_inv",
"=",
"None",
")",
... | Refresh a peer's zonefile recent inventory vector entries,
by removing every bit after byte_offset and re-synchronizing them.
The intuition here is that recent zonefiles are much rarer than older
zonefiles (which will have been near-100% replicated), meaning the tail
of the peer's zonefile inventory is a lot less stable than the head (since
peers will be actively distributing recent zonefiles).
NOT THREAD SAFE; CALL FROM ONLY ONE THREAD.
Return True if we synced all the way up to the expected inventory length, and update the refresh time in the peer table.
Return False if not. | [
"Refresh",
"a",
"peer",
"s",
"zonefile",
"recent",
"inventory",
"vector",
"entries",
"by",
"removing",
"every",
"bit",
"after",
"byte_offset",
"and",
"re",
"-",
"synchronizing",
"them",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2077-L2131 | train | 225,301 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_has_fresh_zonefile_inventory | def atlas_peer_has_fresh_zonefile_inventory( peer_hostport, peer_table=None ):
"""
Does the given atlas node have a fresh zonefile inventory?
"""
fresh = False
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
now = time_now()
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
# NOTE: zero-length or None peer inventory means the peer is simply dead, but we've pinged it
if ptbl[peer_hostport].has_key('zonefile_inventory_last_refresh') and \
ptbl[peer_hostport]['zonefile_inventory_last_refresh'] + atlas_peer_ping_interval() > now:
fresh = True
return fresh | python | def atlas_peer_has_fresh_zonefile_inventory( peer_hostport, peer_table=None ):
"""
Does the given atlas node have a fresh zonefile inventory?
"""
fresh = False
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
now = time_now()
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
# NOTE: zero-length or None peer inventory means the peer is simply dead, but we've pinged it
if ptbl[peer_hostport].has_key('zonefile_inventory_last_refresh') and \
ptbl[peer_hostport]['zonefile_inventory_last_refresh'] + atlas_peer_ping_interval() > now:
fresh = True
return fresh | [
"def",
"atlas_peer_has_fresh_zonefile_inventory",
"(",
"peer_hostport",
",",
"peer_table",
"=",
"None",
")",
":",
"fresh",
"=",
"False",
"with",
"AtlasPeerTableLocked",
"(",
"peer_table",
")",
"as",
"ptbl",
":",
"if",
"peer_hostport",
"not",
"in",
"ptbl",
".",
"... | Does the given atlas node have a fresh zonefile inventory? | [
"Does",
"the",
"given",
"atlas",
"node",
"have",
"a",
"fresh",
"zonefile",
"inventory?"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2134-L2153 | train | 225,302 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_find_missing_zonefile_availability | def atlas_find_missing_zonefile_availability( peer_table=None, con=None, path=None, missing_zonefile_info=None ):
"""
Find the set of missing zonefiles, as well as their popularity amongst
our neighbors.
Only consider zonefiles that are known by at least
one peer; otherwise they're missing from
our clique (and we'll re-sync our neighborss' inventories
every so often to make sure we detect when zonefiles
become available).
Return a dict, structured as:
{
'zonefile hash': {
'names': [names],
'txid': first txid that set it,
'indexes': [...],
'popularity': ...,
'peers': [...],
'tried_storage': True|False
}
}
"""
# which zonefiles do we have?
bit_offset = 0
bit_count = 10000
missing = []
ret = {}
if missing_zonefile_info is None:
while True:
zfinfo = atlasdb_zonefile_find_missing( bit_offset, bit_count, con=con, path=path )
if len(zfinfo) == 0:
break
missing += zfinfo
bit_offset += len(zfinfo)
if len(missing) > 0:
log.debug("Missing %s zonefiles" % len(missing))
else:
missing = missing_zonefile_info
if len(missing) == 0:
# none!
return ret
with AtlasPeerTableLocked(peer_table) as ptbl:
# do any other peers have this zonefile?
for zfinfo in missing:
popularity = 0
byte_index = (zfinfo['inv_index'] - 1) / 8
bit_index = 7 - ((zfinfo['inv_index'] - 1) % 8)
peers = []
if not ret.has_key(zfinfo['zonefile_hash']):
ret[zfinfo['zonefile_hash']] = {
'names': [],
'txid': zfinfo['txid'],
'indexes': [],
'block_heights': [],
'popularity': 0,
'peers': [],
'tried_storage': False
}
for peer_hostport in ptbl.keys():
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
if len(peer_inv) <= byte_index:
# too new for this peer
continue
if (ord(peer_inv[byte_index]) & (1 << bit_index)) == 0:
# this peer doesn't have it
continue
if peer_hostport not in ret[zfinfo['zonefile_hash']]['peers']:
popularity += 1
peers.append( peer_hostport )
ret[zfinfo['zonefile_hash']]['names'].append( zfinfo['name'] )
ret[zfinfo['zonefile_hash']]['indexes'].append( zfinfo['inv_index']-1 )
ret[zfinfo['zonefile_hash']]['block_heights'].append( zfinfo['block_height'] )
ret[zfinfo['zonefile_hash']]['popularity'] += popularity
ret[zfinfo['zonefile_hash']]['peers'] += peers
ret[zfinfo['zonefile_hash']]['tried_storage'] = zfinfo['tried_storage']
return ret | python | def atlas_find_missing_zonefile_availability( peer_table=None, con=None, path=None, missing_zonefile_info=None ):
"""
Find the set of missing zonefiles, as well as their popularity amongst
our neighbors.
Only consider zonefiles that are known by at least
one peer; otherwise they're missing from
our clique (and we'll re-sync our neighborss' inventories
every so often to make sure we detect when zonefiles
become available).
Return a dict, structured as:
{
'zonefile hash': {
'names': [names],
'txid': first txid that set it,
'indexes': [...],
'popularity': ...,
'peers': [...],
'tried_storage': True|False
}
}
"""
# which zonefiles do we have?
bit_offset = 0
bit_count = 10000
missing = []
ret = {}
if missing_zonefile_info is None:
while True:
zfinfo = atlasdb_zonefile_find_missing( bit_offset, bit_count, con=con, path=path )
if len(zfinfo) == 0:
break
missing += zfinfo
bit_offset += len(zfinfo)
if len(missing) > 0:
log.debug("Missing %s zonefiles" % len(missing))
else:
missing = missing_zonefile_info
if len(missing) == 0:
# none!
return ret
with AtlasPeerTableLocked(peer_table) as ptbl:
# do any other peers have this zonefile?
for zfinfo in missing:
popularity = 0
byte_index = (zfinfo['inv_index'] - 1) / 8
bit_index = 7 - ((zfinfo['inv_index'] - 1) % 8)
peers = []
if not ret.has_key(zfinfo['zonefile_hash']):
ret[zfinfo['zonefile_hash']] = {
'names': [],
'txid': zfinfo['txid'],
'indexes': [],
'block_heights': [],
'popularity': 0,
'peers': [],
'tried_storage': False
}
for peer_hostport in ptbl.keys():
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
if len(peer_inv) <= byte_index:
# too new for this peer
continue
if (ord(peer_inv[byte_index]) & (1 << bit_index)) == 0:
# this peer doesn't have it
continue
if peer_hostport not in ret[zfinfo['zonefile_hash']]['peers']:
popularity += 1
peers.append( peer_hostport )
ret[zfinfo['zonefile_hash']]['names'].append( zfinfo['name'] )
ret[zfinfo['zonefile_hash']]['indexes'].append( zfinfo['inv_index']-1 )
ret[zfinfo['zonefile_hash']]['block_heights'].append( zfinfo['block_height'] )
ret[zfinfo['zonefile_hash']]['popularity'] += popularity
ret[zfinfo['zonefile_hash']]['peers'] += peers
ret[zfinfo['zonefile_hash']]['tried_storage'] = zfinfo['tried_storage']
return ret | [
"def",
"atlas_find_missing_zonefile_availability",
"(",
"peer_table",
"=",
"None",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
",",
"missing_zonefile_info",
"=",
"None",
")",
":",
"# which zonefiles do we have?",
"bit_offset",
"=",
"0",
"bit_count",
"=",
"1... | Find the set of missing zonefiles, as well as their popularity amongst
our neighbors.
Only consider zonefiles that are known by at least
one peer; otherwise they're missing from
our clique (and we'll re-sync our neighborss' inventories
every so often to make sure we detect when zonefiles
become available).
Return a dict, structured as:
{
'zonefile hash': {
'names': [names],
'txid': first txid that set it,
'indexes': [...],
'popularity': ...,
'peers': [...],
'tried_storage': True|False
}
} | [
"Find",
"the",
"set",
"of",
"missing",
"zonefiles",
"as",
"well",
"as",
"their",
"popularity",
"amongst",
"our",
"neighbors",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2177-L2266 | train | 225,303 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_has_zonefile | def atlas_peer_has_zonefile( peer_hostport, zonefile_hash, zonefile_bits=None, con=None, path=None, peer_table=None ):
"""
Does the given peer have the given zonefile defined?
Check its inventory vector
Return True if present
Return False if not present
Return None if we don't know about the zonefile ourselves, or if we don't know about the peer
"""
bits = None
if zonefile_bits is None:
bits = atlasdb_get_zonefile_bits( zonefile_hash, con=con, path=path )
if len(bits) == 0:
return None
else:
bits = zonefile_bits
zonefile_inv = None
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
zonefile_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
res = atlas_inventory_test_zonefile_bits( zonefile_inv, bits )
return res | python | def atlas_peer_has_zonefile( peer_hostport, zonefile_hash, zonefile_bits=None, con=None, path=None, peer_table=None ):
"""
Does the given peer have the given zonefile defined?
Check its inventory vector
Return True if present
Return False if not present
Return None if we don't know about the zonefile ourselves, or if we don't know about the peer
"""
bits = None
if zonefile_bits is None:
bits = atlasdb_get_zonefile_bits( zonefile_hash, con=con, path=path )
if len(bits) == 0:
return None
else:
bits = zonefile_bits
zonefile_inv = None
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
zonefile_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
res = atlas_inventory_test_zonefile_bits( zonefile_inv, bits )
return res | [
"def",
"atlas_peer_has_zonefile",
"(",
"peer_hostport",
",",
"zonefile_hash",
",",
"zonefile_bits",
"=",
"None",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
",",
"peer_table",
"=",
"None",
")",
":",
"bits",
"=",
"None",
"if",
"zonefile_bits",
"is",
... | Does the given peer have the given zonefile defined?
Check its inventory vector
Return True if present
Return False if not present
Return None if we don't know about the zonefile ourselves, or if we don't know about the peer | [
"Does",
"the",
"given",
"peer",
"have",
"the",
"given",
"zonefile",
"defined?",
"Check",
"its",
"inventory",
"vector"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2269-L2297 | train | 225,304 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_get_neighbors | def atlas_peer_get_neighbors( my_hostport, peer_hostport, timeout=None, peer_table=None, con=None, path=None ):
"""
Ask the peer server at the given URL for its neighbors.
Update the health info in peer_table
(if not given, the global peer table will be used instead)
Return the list on success
Return None on failure to contact
Raise on invalid URL
"""
if timeout is None:
timeout = atlas_neighbors_timeout()
peer_list = None
host, port = url_to_host_port( peer_hostport )
RPC = get_rpc_client_class()
rpc = RPC( host, port, timeout=timeout, src=my_hostport )
# sane limits
max_neighbors = atlas_max_neighbors()
assert not atlas_peer_table_is_locked_by_me()
try:
peer_list = blockstack_atlas_peer_exchange( peer_hostport, my_hostport, timeout=timeout, proxy=rpc )
if json_is_exception(peer_list):
# fall back to legacy method
peer_list = blockstack_get_atlas_peers(peer_hostport, timeout=timeout, proxy=rpc)
except (socket.timeout, socket.gaierror, socket.herror, socket.error), se:
atlas_log_socket_error( "atlas_peer_exchange(%s)" % peer_hostport, peer_hostport, se)
log.error("Socket error in response from '%s'" % peer_hostport)
except Exception, e:
if os.environ.get("BLOCKSTACK_DEBUG") == "1":
log.exception(e)
log.error("Failed to talk to '%s'" % peer_hostport)
if peer_list is None:
log.error("Failed to query remote peer %s" % peer_hostport)
atlas_peer_update_health( peer_hostport, False, peer_table=peer_table )
return None
if 'error' in peer_list:
log.debug("Remote peer error: %s" % peer_list['error'])
log.error("Remote peer error on %s" % peer_hostport)
atlas_peer_update_health( peer_hostport, False, peer_table=peer_table )
return None
ret = peer_list['peers']
atlas_peer_update_health( peer_hostport, True, peer_table=peer_table )
return ret | python | def atlas_peer_get_neighbors( my_hostport, peer_hostport, timeout=None, peer_table=None, con=None, path=None ):
"""
Ask the peer server at the given URL for its neighbors.
Update the health info in peer_table
(if not given, the global peer table will be used instead)
Return the list on success
Return None on failure to contact
Raise on invalid URL
"""
if timeout is None:
timeout = atlas_neighbors_timeout()
peer_list = None
host, port = url_to_host_port( peer_hostport )
RPC = get_rpc_client_class()
rpc = RPC( host, port, timeout=timeout, src=my_hostport )
# sane limits
max_neighbors = atlas_max_neighbors()
assert not atlas_peer_table_is_locked_by_me()
try:
peer_list = blockstack_atlas_peer_exchange( peer_hostport, my_hostport, timeout=timeout, proxy=rpc )
if json_is_exception(peer_list):
# fall back to legacy method
peer_list = blockstack_get_atlas_peers(peer_hostport, timeout=timeout, proxy=rpc)
except (socket.timeout, socket.gaierror, socket.herror, socket.error), se:
atlas_log_socket_error( "atlas_peer_exchange(%s)" % peer_hostport, peer_hostport, se)
log.error("Socket error in response from '%s'" % peer_hostport)
except Exception, e:
if os.environ.get("BLOCKSTACK_DEBUG") == "1":
log.exception(e)
log.error("Failed to talk to '%s'" % peer_hostport)
if peer_list is None:
log.error("Failed to query remote peer %s" % peer_hostport)
atlas_peer_update_health( peer_hostport, False, peer_table=peer_table )
return None
if 'error' in peer_list:
log.debug("Remote peer error: %s" % peer_list['error'])
log.error("Remote peer error on %s" % peer_hostport)
atlas_peer_update_health( peer_hostport, False, peer_table=peer_table )
return None
ret = peer_list['peers']
atlas_peer_update_health( peer_hostport, True, peer_table=peer_table )
return ret | [
"def",
"atlas_peer_get_neighbors",
"(",
"my_hostport",
",",
"peer_hostport",
",",
"timeout",
"=",
"None",
",",
"peer_table",
"=",
"None",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"None",
":",
"timeout",
"=",
"at... | Ask the peer server at the given URL for its neighbors.
Update the health info in peer_table
(if not given, the global peer table will be used instead)
Return the list on success
Return None on failure to contact
Raise on invalid URL | [
"Ask",
"the",
"peer",
"server",
"at",
"the",
"given",
"URL",
"for",
"its",
"neighbors",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2300-L2354 | train | 225,305 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_get_zonefiles | def atlas_get_zonefiles( my_hostport, peer_hostport, zonefile_hashes, timeout=None, peer_table=None ):
"""
Given a list of zonefile hashes.
go and get them from the given host.
Update node health
Return the newly-fetched zonefiles on success (as a dict mapping hashes to zonefile data)
Return None on error.
"""
if timeout is None:
timeout = atlas_zonefiles_timeout()
zf_payload = None
zonefile_datas = {}
host, port = url_to_host_port( peer_hostport )
RPC = get_rpc_client_class()
rpc = RPC( host, port, timeout=timeout, src=my_hostport )
assert not atlas_peer_table_is_locked_by_me()
# get in batches of 100 or less
zf_batches = []
for i in xrange(0, len(zonefile_hashes), 100):
zf_batches.append(zonefile_hashes[i:i+100])
for zf_batch in zf_batches:
zf_payload = None
try:
zf_payload = blockstack_get_zonefiles( peer_hostport, zf_batch, timeout=timeout, my_hostport=my_hostport, proxy=rpc )
except (socket.timeout, socket.gaierror, socket.herror, socket.error), se:
atlas_log_socket_error( "get_zonefiles(%s)" % peer_hostport, peer_hostport, se)
except Exception, e:
if os.environ.get("BLOCKSTACK_DEBUG") is not None:
log.exception(e)
log.error("Invalid zonefile data from %s" % peer_hostport)
if zf_payload is None:
log.error("Failed to fetch zonefile data from %s" % peer_hostport)
atlas_peer_update_health( peer_hostport, False, peer_table=peer_table )
zonefile_datas = None
break
if 'error' in zf_payload.keys():
log.error("Failed to fetch zonefile data from %s: %s" % (peer_hostport, zf_payload['error']))
atlas_peer_update_health( peer_hostport, False, peer_table=peer_table )
zonefile_datas = None
break
# success!
zonefile_datas.update( zf_payload['zonefiles'] )
atlas_peer_update_health( peer_hostport, True, peer_table=peer_table )
return zonefile_datas | python | def atlas_get_zonefiles( my_hostport, peer_hostport, zonefile_hashes, timeout=None, peer_table=None ):
"""
Given a list of zonefile hashes.
go and get them from the given host.
Update node health
Return the newly-fetched zonefiles on success (as a dict mapping hashes to zonefile data)
Return None on error.
"""
if timeout is None:
timeout = atlas_zonefiles_timeout()
zf_payload = None
zonefile_datas = {}
host, port = url_to_host_port( peer_hostport )
RPC = get_rpc_client_class()
rpc = RPC( host, port, timeout=timeout, src=my_hostport )
assert not atlas_peer_table_is_locked_by_me()
# get in batches of 100 or less
zf_batches = []
for i in xrange(0, len(zonefile_hashes), 100):
zf_batches.append(zonefile_hashes[i:i+100])
for zf_batch in zf_batches:
zf_payload = None
try:
zf_payload = blockstack_get_zonefiles( peer_hostport, zf_batch, timeout=timeout, my_hostport=my_hostport, proxy=rpc )
except (socket.timeout, socket.gaierror, socket.herror, socket.error), se:
atlas_log_socket_error( "get_zonefiles(%s)" % peer_hostport, peer_hostport, se)
except Exception, e:
if os.environ.get("BLOCKSTACK_DEBUG") is not None:
log.exception(e)
log.error("Invalid zonefile data from %s" % peer_hostport)
if zf_payload is None:
log.error("Failed to fetch zonefile data from %s" % peer_hostport)
atlas_peer_update_health( peer_hostport, False, peer_table=peer_table )
zonefile_datas = None
break
if 'error' in zf_payload.keys():
log.error("Failed to fetch zonefile data from %s: %s" % (peer_hostport, zf_payload['error']))
atlas_peer_update_health( peer_hostport, False, peer_table=peer_table )
zonefile_datas = None
break
# success!
zonefile_datas.update( zf_payload['zonefiles'] )
atlas_peer_update_health( peer_hostport, True, peer_table=peer_table )
return zonefile_datas | [
"def",
"atlas_get_zonefiles",
"(",
"my_hostport",
",",
"peer_hostport",
",",
"zonefile_hashes",
",",
"timeout",
"=",
"None",
",",
"peer_table",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"None",
":",
"timeout",
"=",
"atlas_zonefiles_timeout",
"(",
")",
"zf_... | Given a list of zonefile hashes.
go and get them from the given host.
Update node health
Return the newly-fetched zonefiles on success (as a dict mapping hashes to zonefile data)
Return None on error. | [
"Given",
"a",
"list",
"of",
"zonefile",
"hashes",
".",
"go",
"and",
"get",
"them",
"from",
"the",
"given",
"host",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2357-L2417 | train | 225,306 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_rank_peers_by_data_availability | def atlas_rank_peers_by_data_availability( peer_list=None, peer_table=None, local_inv=None, con=None, path=None ):
"""
Get a ranking of peers to contact for a zonefile.
Peers are ranked by the number of zonefiles they have
which we don't have.
This is used to select neighbors.
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_list is None:
peer_list = ptbl.keys()[:]
if local_inv is None:
# what's my inventory?
inv_len = atlasdb_zonefile_inv_length( con=con, path=path )
local_inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path )
peer_availability_ranking = [] # (health score, peer hostport)
for peer_hostport in peer_list:
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
# ignore peers that we don't have an inventory for
if len(peer_inv) == 0:
continue
availability_score = atlas_inventory_count_missing( local_inv, peer_inv )
peer_availability_ranking.append( (availability_score, peer_hostport) )
# sort on availability
peer_availability_ranking.sort()
peer_availability_ranking.reverse()
return [peer_hp for _, peer_hp in peer_availability_ranking] | python | def atlas_rank_peers_by_data_availability( peer_list=None, peer_table=None, local_inv=None, con=None, path=None ):
"""
Get a ranking of peers to contact for a zonefile.
Peers are ranked by the number of zonefiles they have
which we don't have.
This is used to select neighbors.
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_list is None:
peer_list = ptbl.keys()[:]
if local_inv is None:
# what's my inventory?
inv_len = atlasdb_zonefile_inv_length( con=con, path=path )
local_inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path )
peer_availability_ranking = [] # (health score, peer hostport)
for peer_hostport in peer_list:
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
# ignore peers that we don't have an inventory for
if len(peer_inv) == 0:
continue
availability_score = atlas_inventory_count_missing( local_inv, peer_inv )
peer_availability_ranking.append( (availability_score, peer_hostport) )
# sort on availability
peer_availability_ranking.sort()
peer_availability_ranking.reverse()
return [peer_hp for _, peer_hp in peer_availability_ranking] | [
"def",
"atlas_rank_peers_by_data_availability",
"(",
"peer_list",
"=",
"None",
",",
"peer_table",
"=",
"None",
",",
"local_inv",
"=",
"None",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"with",
"AtlasPeerTableLocked",
"(",
"peer_table",
")",
... | Get a ranking of peers to contact for a zonefile.
Peers are ranked by the number of zonefiles they have
which we don't have.
This is used to select neighbors. | [
"Get",
"a",
"ranking",
"of",
"peers",
"to",
"contact",
"for",
"a",
"zonefile",
".",
"Peers",
"are",
"ranked",
"by",
"the",
"number",
"of",
"zonefiles",
"they",
"have",
"which",
"we",
"don",
"t",
"have",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2453-L2488 | train | 225,307 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_dequeue_all | def atlas_peer_dequeue_all( peer_queue=None ):
"""
Get all queued peers
"""
peers = []
with AtlasPeerQueueLocked(peer_queue) as pq:
while len(pq) > 0:
peers.append( pq.pop(0) )
return peers | python | def atlas_peer_dequeue_all( peer_queue=None ):
"""
Get all queued peers
"""
peers = []
with AtlasPeerQueueLocked(peer_queue) as pq:
while len(pq) > 0:
peers.append( pq.pop(0) )
return peers | [
"def",
"atlas_peer_dequeue_all",
"(",
"peer_queue",
"=",
"None",
")",
":",
"peers",
"=",
"[",
"]",
"with",
"AtlasPeerQueueLocked",
"(",
"peer_queue",
")",
"as",
"pq",
":",
"while",
"len",
"(",
"pq",
")",
">",
"0",
":",
"peers",
".",
"append",
"(",
"pq"... | Get all queued peers | [
"Get",
"all",
"queued",
"peers"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2524-L2534 | train | 225,308 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_zonefile_push_enqueue | def atlas_zonefile_push_enqueue( zonefile_hash, name, txid, zonefile_data, zonefile_queue=None, con=None, path=None ):
"""
Enqueue the given zonefile into our "push" queue,
from which it will be replicated to storage and sent
out to other peers who don't have it.
Return True if we enqueued it
Return False if not
"""
res = False
bits = atlasdb_get_zonefile_bits( zonefile_hash, path=path, con=con )
if len(bits) == 0:
# invalid hash
return
with AtlasZonefileQueueLocked(zonefile_queue) as zfq:
if len(zfq) < MAX_QUEUED_ZONEFILES:
zfdata = {
'zonefile_hash': zonefile_hash,
'zonefile': zonefile_data,
'name': name,
'txid': txid
}
zfq.append( zfdata )
res = True
return res | python | def atlas_zonefile_push_enqueue( zonefile_hash, name, txid, zonefile_data, zonefile_queue=None, con=None, path=None ):
"""
Enqueue the given zonefile into our "push" queue,
from which it will be replicated to storage and sent
out to other peers who don't have it.
Return True if we enqueued it
Return False if not
"""
res = False
bits = atlasdb_get_zonefile_bits( zonefile_hash, path=path, con=con )
if len(bits) == 0:
# invalid hash
return
with AtlasZonefileQueueLocked(zonefile_queue) as zfq:
if len(zfq) < MAX_QUEUED_ZONEFILES:
zfdata = {
'zonefile_hash': zonefile_hash,
'zonefile': zonefile_data,
'name': name,
'txid': txid
}
zfq.append( zfdata )
res = True
return res | [
"def",
"atlas_zonefile_push_enqueue",
"(",
"zonefile_hash",
",",
"name",
",",
"txid",
",",
"zonefile_data",
",",
"zonefile_queue",
"=",
"None",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"res",
"=",
"False",
"bits",
"=",
"atlasdb_get_zonefi... | Enqueue the given zonefile into our "push" queue,
from which it will be replicated to storage and sent
out to other peers who don't have it.
Return True if we enqueued it
Return False if not | [
"Enqueue",
"the",
"given",
"zonefile",
"into",
"our",
"push",
"queue",
"from",
"which",
"it",
"will",
"be",
"replicated",
"to",
"storage",
"and",
"sent",
"out",
"to",
"other",
"peers",
"who",
"don",
"t",
"have",
"it",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2560-L2589 | train | 225,309 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_zonefile_push_dequeue | def atlas_zonefile_push_dequeue( zonefile_queue=None ):
"""
Dequeue a zonefile's information to replicate
Return None if there are none queued
"""
ret = None
with AtlasZonefileQueueLocked(zonefile_queue) as zfq:
if len(zfq) > 0:
ret = zfq.pop(0)
return ret | python | def atlas_zonefile_push_dequeue( zonefile_queue=None ):
"""
Dequeue a zonefile's information to replicate
Return None if there are none queued
"""
ret = None
with AtlasZonefileQueueLocked(zonefile_queue) as zfq:
if len(zfq) > 0:
ret = zfq.pop(0)
return ret | [
"def",
"atlas_zonefile_push_dequeue",
"(",
"zonefile_queue",
"=",
"None",
")",
":",
"ret",
"=",
"None",
"with",
"AtlasZonefileQueueLocked",
"(",
"zonefile_queue",
")",
"as",
"zfq",
":",
"if",
"len",
"(",
"zfq",
")",
">",
"0",
":",
"ret",
"=",
"zfq",
".",
... | Dequeue a zonefile's information to replicate
Return None if there are none queued | [
"Dequeue",
"a",
"zonefile",
"s",
"information",
"to",
"replicate",
"Return",
"None",
"if",
"there",
"are",
"none",
"queued"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2592-L2602 | train | 225,310 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_zonefile_push | def atlas_zonefile_push( my_hostport, peer_hostport, zonefile_data, timeout=None, peer_table=None ):
"""
Push the given zonefile to the given peer
Return True on success
Return False on failure
"""
if timeout is None:
timeout = atlas_push_zonefiles_timeout()
zonefile_hash = get_zonefile_data_hash(zonefile_data)
zonefile_data_b64 = base64.b64encode( zonefile_data )
host, port = url_to_host_port( peer_hostport )
RPC = get_rpc_client_class()
rpc = RPC( host, port, timeout=timeout, src=my_hostport )
status = False
assert not atlas_peer_table_is_locked_by_me()
try:
push_info = blockstack_put_zonefiles( peer_hostport, [zonefile_data_b64], timeout=timeout, my_hostport=my_hostport, proxy=rpc )
if 'error' not in push_info:
if push_info['saved'] == 1:
# woo!
saved = True
except (socket.timeout, socket.gaierror, socket.herror, socket.error), se:
atlas_log_socket_error( "put_zonefiles(%s)" % peer_hostport, peer_hostport, se)
except AssertionError, ae:
log.exception(ae)
log.error("Invalid server response from %s" % peer_hostport )
except Exception, e:
log.exception(e)
log.error("Failed to push zonefile %s to %s" % (zonefile_hash, peer_hostport))
with AtlasPeerTableLocked(peer_table) as ptbl:
atlas_peer_update_health( peer_hostport, status, peer_table=ptbl )
return status | python | def atlas_zonefile_push( my_hostport, peer_hostport, zonefile_data, timeout=None, peer_table=None ):
"""
Push the given zonefile to the given peer
Return True on success
Return False on failure
"""
if timeout is None:
timeout = atlas_push_zonefiles_timeout()
zonefile_hash = get_zonefile_data_hash(zonefile_data)
zonefile_data_b64 = base64.b64encode( zonefile_data )
host, port = url_to_host_port( peer_hostport )
RPC = get_rpc_client_class()
rpc = RPC( host, port, timeout=timeout, src=my_hostport )
status = False
assert not atlas_peer_table_is_locked_by_me()
try:
push_info = blockstack_put_zonefiles( peer_hostport, [zonefile_data_b64], timeout=timeout, my_hostport=my_hostport, proxy=rpc )
if 'error' not in push_info:
if push_info['saved'] == 1:
# woo!
saved = True
except (socket.timeout, socket.gaierror, socket.herror, socket.error), se:
atlas_log_socket_error( "put_zonefiles(%s)" % peer_hostport, peer_hostport, se)
except AssertionError, ae:
log.exception(ae)
log.error("Invalid server response from %s" % peer_hostport )
except Exception, e:
log.exception(e)
log.error("Failed to push zonefile %s to %s" % (zonefile_hash, peer_hostport))
with AtlasPeerTableLocked(peer_table) as ptbl:
atlas_peer_update_health( peer_hostport, status, peer_table=ptbl )
return status | [
"def",
"atlas_zonefile_push",
"(",
"my_hostport",
",",
"peer_hostport",
",",
"zonefile_data",
",",
"timeout",
"=",
"None",
",",
"peer_table",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"None",
":",
"timeout",
"=",
"atlas_push_zonefiles_timeout",
"(",
")",
"... | Push the given zonefile to the given peer
Return True on success
Return False on failure | [
"Push",
"the",
"given",
"zonefile",
"to",
"the",
"given",
"peer",
"Return",
"True",
"on",
"success",
"Return",
"False",
"on",
"failure"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2605-L2646 | train | 225,311 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_node_init | def atlas_node_init(my_hostname, my_portnum, atlasdb_path, zonefile_dir, working_dir):
"""
Start up the atlas node.
Return a bundle of atlas state
"""
atlas_state = {}
atlas_state['peer_crawler'] = AtlasPeerCrawler(my_hostname, my_portnum, atlasdb_path, working_dir)
atlas_state['health_checker'] = AtlasHealthChecker(my_hostname, my_portnum, atlasdb_path)
atlas_state['zonefile_crawler'] = AtlasZonefileCrawler(my_hostname, my_portnum, atlasdb_path, zonefile_dir)
# atlas_state['zonefile_pusher'] = AtlasZonefilePusher(my_hostname, my_portnum, atlasdb_path, zonefile_dir)
return atlas_state | python | def atlas_node_init(my_hostname, my_portnum, atlasdb_path, zonefile_dir, working_dir):
"""
Start up the atlas node.
Return a bundle of atlas state
"""
atlas_state = {}
atlas_state['peer_crawler'] = AtlasPeerCrawler(my_hostname, my_portnum, atlasdb_path, working_dir)
atlas_state['health_checker'] = AtlasHealthChecker(my_hostname, my_portnum, atlasdb_path)
atlas_state['zonefile_crawler'] = AtlasZonefileCrawler(my_hostname, my_portnum, atlasdb_path, zonefile_dir)
# atlas_state['zonefile_pusher'] = AtlasZonefilePusher(my_hostname, my_portnum, atlasdb_path, zonefile_dir)
return atlas_state | [
"def",
"atlas_node_init",
"(",
"my_hostname",
",",
"my_portnum",
",",
"atlasdb_path",
",",
"zonefile_dir",
",",
"working_dir",
")",
":",
"atlas_state",
"=",
"{",
"}",
"atlas_state",
"[",
"'peer_crawler'",
"]",
"=",
"AtlasPeerCrawler",
"(",
"my_hostname",
",",
"m... | Start up the atlas node.
Return a bundle of atlas state | [
"Start",
"up",
"the",
"atlas",
"node",
".",
"Return",
"a",
"bundle",
"of",
"atlas",
"state"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L3584-L3595 | train | 225,312 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_node_start | def atlas_node_start(atlas_state):
"""
Start up atlas threads
"""
for component in atlas_state.keys():
log.debug("Starting Atlas component '%s'" % component)
atlas_state[component].start() | python | def atlas_node_start(atlas_state):
"""
Start up atlas threads
"""
for component in atlas_state.keys():
log.debug("Starting Atlas component '%s'" % component)
atlas_state[component].start() | [
"def",
"atlas_node_start",
"(",
"atlas_state",
")",
":",
"for",
"component",
"in",
"atlas_state",
".",
"keys",
"(",
")",
":",
"log",
".",
"debug",
"(",
"\"Starting Atlas component '%s'\"",
"%",
"component",
")",
"atlas_state",
"[",
"component",
"]",
".",
"star... | Start up atlas threads | [
"Start",
"up",
"atlas",
"threads"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L3597-L3603 | train | 225,313 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_node_add_callback | def atlas_node_add_callback(atlas_state, callback_name, callback):
"""
Add a callback to the initialized atlas state
"""
if callback_name == 'store_zonefile':
atlas_state['zonefile_crawler'].set_store_zonefile_callback(callback)
else:
raise ValueError("Unrecognized callback {}".format(callback_name)) | python | def atlas_node_add_callback(atlas_state, callback_name, callback):
"""
Add a callback to the initialized atlas state
"""
if callback_name == 'store_zonefile':
atlas_state['zonefile_crawler'].set_store_zonefile_callback(callback)
else:
raise ValueError("Unrecognized callback {}".format(callback_name)) | [
"def",
"atlas_node_add_callback",
"(",
"atlas_state",
",",
"callback_name",
",",
"callback",
")",
":",
"if",
"callback_name",
"==",
"'store_zonefile'",
":",
"atlas_state",
"[",
"'zonefile_crawler'",
"]",
".",
"set_store_zonefile_callback",
"(",
"callback",
")",
"else"... | Add a callback to the initialized atlas state | [
"Add",
"a",
"callback",
"to",
"the",
"initialized",
"atlas",
"state"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L3606-L3614 | train | 225,314 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_node_stop | def atlas_node_stop( atlas_state ):
"""
Stop the atlas node threads
"""
for component in atlas_state.keys():
log.debug("Stopping Atlas component '%s'" % component)
atlas_state[component].ask_join()
atlas_state[component].join()
return True | python | def atlas_node_stop( atlas_state ):
"""
Stop the atlas node threads
"""
for component in atlas_state.keys():
log.debug("Stopping Atlas component '%s'" % component)
atlas_state[component].ask_join()
atlas_state[component].join()
return True | [
"def",
"atlas_node_stop",
"(",
"atlas_state",
")",
":",
"for",
"component",
"in",
"atlas_state",
".",
"keys",
"(",
")",
":",
"log",
".",
"debug",
"(",
"\"Stopping Atlas component '%s'\"",
"%",
"component",
")",
"atlas_state",
"[",
"component",
"]",
".",
"ask_j... | Stop the atlas node threads | [
"Stop",
"the",
"atlas",
"node",
"threads"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L3617-L3626 | train | 225,315 |
blockstack/blockstack-core | blockstack/lib/atlas.py | AtlasPeerCrawler.canonical_peer | def canonical_peer( self, peer ):
"""
Get the canonical peer name
"""
their_host, their_port = url_to_host_port( peer )
if their_host in ['127.0.0.1', '::1']:
their_host = 'localhost'
return "%s:%s" % (their_host, their_port) | python | def canonical_peer( self, peer ):
"""
Get the canonical peer name
"""
their_host, their_port = url_to_host_port( peer )
if their_host in ['127.0.0.1', '::1']:
their_host = 'localhost'
return "%s:%s" % (their_host, their_port) | [
"def",
"canonical_peer",
"(",
"self",
",",
"peer",
")",
":",
"their_host",
",",
"their_port",
"=",
"url_to_host_port",
"(",
"peer",
")",
"if",
"their_host",
"in",
"[",
"'127.0.0.1'",
",",
"'::1'",
"]",
":",
"their_host",
"=",
"'localhost'",
"return",
"\"%s:%... | Get the canonical peer name | [
"Get",
"the",
"canonical",
"peer",
"name"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2713-L2722 | train | 225,316 |
blockstack/blockstack-core | blockstack/lib/atlas.py | AtlasPeerCrawler.remove_unhealthy_peers | def remove_unhealthy_peers( self, count, con=None, path=None, peer_table=None, min_request_count=10, min_health=MIN_PEER_HEALTH ):
"""
Remove up to @count unhealthy peers
Return the list of peers we removed
"""
if path is None:
path = self.atlasdb_path
removed = []
rank_peer_list = atlas_rank_peers_by_health( peer_table=peer_table, with_rank=True )
for rank, peer in rank_peer_list:
reqcount = atlas_peer_get_request_count( peer, peer_table=peer_table )
if reqcount >= min_request_count and rank < min_health and not atlas_peer_is_whitelisted( peer, peer_table=peer_table ) and not atlas_peer_is_blacklisted( peer, peer_table=peer_table ):
removed.append( peer )
random.shuffle(removed)
if len(removed) > count:
removed = removed[:count]
for peer in removed:
log.debug("Remove unhealthy peer %s" % (peer))
atlasdb_remove_peer( peer, con=con, path=path, peer_table=peer_table )
return removed | python | def remove_unhealthy_peers( self, count, con=None, path=None, peer_table=None, min_request_count=10, min_health=MIN_PEER_HEALTH ):
"""
Remove up to @count unhealthy peers
Return the list of peers we removed
"""
if path is None:
path = self.atlasdb_path
removed = []
rank_peer_list = atlas_rank_peers_by_health( peer_table=peer_table, with_rank=True )
for rank, peer in rank_peer_list:
reqcount = atlas_peer_get_request_count( peer, peer_table=peer_table )
if reqcount >= min_request_count and rank < min_health and not atlas_peer_is_whitelisted( peer, peer_table=peer_table ) and not atlas_peer_is_blacklisted( peer, peer_table=peer_table ):
removed.append( peer )
random.shuffle(removed)
if len(removed) > count:
removed = removed[:count]
for peer in removed:
log.debug("Remove unhealthy peer %s" % (peer))
atlasdb_remove_peer( peer, con=con, path=path, peer_table=peer_table )
return removed | [
"def",
"remove_unhealthy_peers",
"(",
"self",
",",
"count",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
",",
"peer_table",
"=",
"None",
",",
"min_request_count",
"=",
"10",
",",
"min_health",
"=",
"MIN_PEER_HEALTH",
")",
":",
"if",
"path",
"is",
"... | Remove up to @count unhealthy peers
Return the list of peers we removed | [
"Remove",
"up",
"to"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2836-L2860 | train | 225,317 |
blockstack/blockstack-core | blockstack/lib/atlas.py | AtlasPeerCrawler.get_current_peers | def get_current_peers( self, peer_table=None ):
"""
Get the current set of peers
"""
# get current peers
current_peers = None
with AtlasPeerTableLocked(peer_table) as ptbl:
current_peers = ptbl.keys()[:]
return current_peers | python | def get_current_peers( self, peer_table=None ):
"""
Get the current set of peers
"""
# get current peers
current_peers = None
with AtlasPeerTableLocked(peer_table) as ptbl:
current_peers = ptbl.keys()[:]
return current_peers | [
"def",
"get_current_peers",
"(",
"self",
",",
"peer_table",
"=",
"None",
")",
":",
"# get current peers",
"current_peers",
"=",
"None",
"with",
"AtlasPeerTableLocked",
"(",
"peer_table",
")",
"as",
"ptbl",
":",
"current_peers",
"=",
"ptbl",
".",
"keys",
"(",
"... | Get the current set of peers | [
"Get",
"the",
"current",
"set",
"of",
"peers"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2945-L2955 | train | 225,318 |
blockstack/blockstack-core | blockstack/lib/atlas.py | AtlasPeerCrawler.canonical_new_peer_list | def canonical_new_peer_list( self, peers_to_add ):
"""
Make a list of canonical new peers, using the
self.new_peers and the given peers to add
Return a shuffled list of canonicalized host:port
strings.
"""
new_peers = list(set(self.new_peers + peers_to_add))
random.shuffle( new_peers )
# canonicalize
tmp = []
for peer in new_peers:
tmp.append( self.canonical_peer(peer) )
new_peers = tmp
# don't talk to myself
if self.my_hostport in new_peers:
new_peers.remove(self.my_hostport)
return new_peers | python | def canonical_new_peer_list( self, peers_to_add ):
"""
Make a list of canonical new peers, using the
self.new_peers and the given peers to add
Return a shuffled list of canonicalized host:port
strings.
"""
new_peers = list(set(self.new_peers + peers_to_add))
random.shuffle( new_peers )
# canonicalize
tmp = []
for peer in new_peers:
tmp.append( self.canonical_peer(peer) )
new_peers = tmp
# don't talk to myself
if self.my_hostport in new_peers:
new_peers.remove(self.my_hostport)
return new_peers | [
"def",
"canonical_new_peer_list",
"(",
"self",
",",
"peers_to_add",
")",
":",
"new_peers",
"=",
"list",
"(",
"set",
"(",
"self",
".",
"new_peers",
"+",
"peers_to_add",
")",
")",
"random",
".",
"shuffle",
"(",
"new_peers",
")",
"# canonicalize",
"tmp",
"=",
... | Make a list of canonical new peers, using the
self.new_peers and the given peers to add
Return a shuffled list of canonicalized host:port
strings. | [
"Make",
"a",
"list",
"of",
"canonical",
"new",
"peers",
"using",
"the",
"self",
".",
"new_peers",
"and",
"the",
"given",
"peers",
"to",
"add"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2958-L2980 | train | 225,319 |
blockstack/blockstack-core | blockstack/lib/atlas.py | AtlasHealthChecker.step | def step(self, con=None, path=None, peer_table=None, local_inv=None):
"""
Find peers with stale zonefile inventory data,
and refresh them.
Return True on success
Return False on error
"""
if path is None:
path = self.atlasdb_path
peer_hostports = []
stale_peers = []
num_peers = None
peer_hostports = None
with AtlasPeerTableLocked(peer_table) as ptbl:
num_peers = len(ptbl.keys())
peer_hostports = ptbl.keys()[:]
# who are we going to ping?
# someone we haven't pinged in a while, chosen at random
for peer in peer_hostports:
if not atlas_peer_has_fresh_zonefile_inventory( peer, peer_table=ptbl ):
# haven't talked to this peer in a while
stale_peers.append(peer)
log.debug("Peer %s has a stale zonefile inventory" % peer)
if len(stale_peers) > 0:
log.debug("Refresh zonefile inventories for %s peers" % len(stale_peers))
for peer_hostport in stale_peers:
# refresh everyone
log.debug("%s: Refresh zonefile inventory for %s" % (self.hostport, peer_hostport))
res = atlas_peer_refresh_zonefile_inventory( self.hostport, peer_hostport, 0, con=con, path=path, peer_table=peer_table, local_inv=local_inv )
if res is None:
log.warning("Failed to refresh zonefile inventory for %s" % peer_hostport)
return | python | def step(self, con=None, path=None, peer_table=None, local_inv=None):
"""
Find peers with stale zonefile inventory data,
and refresh them.
Return True on success
Return False on error
"""
if path is None:
path = self.atlasdb_path
peer_hostports = []
stale_peers = []
num_peers = None
peer_hostports = None
with AtlasPeerTableLocked(peer_table) as ptbl:
num_peers = len(ptbl.keys())
peer_hostports = ptbl.keys()[:]
# who are we going to ping?
# someone we haven't pinged in a while, chosen at random
for peer in peer_hostports:
if not atlas_peer_has_fresh_zonefile_inventory( peer, peer_table=ptbl ):
# haven't talked to this peer in a while
stale_peers.append(peer)
log.debug("Peer %s has a stale zonefile inventory" % peer)
if len(stale_peers) > 0:
log.debug("Refresh zonefile inventories for %s peers" % len(stale_peers))
for peer_hostport in stale_peers:
# refresh everyone
log.debug("%s: Refresh zonefile inventory for %s" % (self.hostport, peer_hostport))
res = atlas_peer_refresh_zonefile_inventory( self.hostport, peer_hostport, 0, con=con, path=path, peer_table=peer_table, local_inv=local_inv )
if res is None:
log.warning("Failed to refresh zonefile inventory for %s" % peer_hostport)
return | [
"def",
"step",
"(",
"self",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
",",
"peer_table",
"=",
"None",
",",
"local_inv",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"self",
".",
"atlasdb_path",
"peer_hostports",
"=",
... | Find peers with stale zonefile inventory data,
and refresh them.
Return True on success
Return False on error | [
"Find",
"peers",
"with",
"stale",
"zonefile",
"inventory",
"data",
"and",
"refresh",
"them",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L3171-L3210 | train | 225,320 |
blockstack/blockstack-core | blockstack/lib/atlas.py | AtlasHealthChecker.run | def run(self, peer_table=None):
"""
Loop forever, pinging someone every pass.
"""
self.running = True
while self.running:
local_inv = atlas_get_zonefile_inventory()
t1 = time_now()
self.step( peer_table=peer_table, local_inv=local_inv, path=self.atlasdb_path )
t2 = time_now()
# don't go too fast
if t2 - t1 < PEER_HEALTH_NEIGHBOR_WORK_INTERVAL:
deadline = time_now() + PEER_HEALTH_NEIGHBOR_WORK_INTERVAL - (t2 - t1)
while time_now() < deadline and self.running:
time_sleep( self.hostport, self.__class__.__name__, 1.0 )
if not self.running:
break | python | def run(self, peer_table=None):
"""
Loop forever, pinging someone every pass.
"""
self.running = True
while self.running:
local_inv = atlas_get_zonefile_inventory()
t1 = time_now()
self.step( peer_table=peer_table, local_inv=local_inv, path=self.atlasdb_path )
t2 = time_now()
# don't go too fast
if t2 - t1 < PEER_HEALTH_NEIGHBOR_WORK_INTERVAL:
deadline = time_now() + PEER_HEALTH_NEIGHBOR_WORK_INTERVAL - (t2 - t1)
while time_now() < deadline and self.running:
time_sleep( self.hostport, self.__class__.__name__, 1.0 )
if not self.running:
break | [
"def",
"run",
"(",
"self",
",",
"peer_table",
"=",
"None",
")",
":",
"self",
".",
"running",
"=",
"True",
"while",
"self",
".",
"running",
":",
"local_inv",
"=",
"atlas_get_zonefile_inventory",
"(",
")",
"t1",
"=",
"time_now",
"(",
")",
"self",
".",
"s... | Loop forever, pinging someone every pass. | [
"Loop",
"forever",
"pinging",
"someone",
"every",
"pass",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L3213-L3231 | train | 225,321 |
blockstack/blockstack-core | blockstack/lib/atlas.py | AtlasZonefileCrawler.set_zonefile_present | def set_zonefile_present(self, zfhash, block_height, con=None, path=None):
"""
Set a zonefile as present, and if it was previously absent, inform the storage listener
"""
was_present = atlasdb_set_zonefile_present( zfhash, True, con=con, path=path )
# tell anyone who cares that we got this zone file, if it was new
if not was_present and self.store_zonefile_cb:
log.debug('{} was new, so passing it along to zonefile storage watchers...'.format(zfhash))
self.store_zonefile_cb(zfhash, block_height)
else:
log.debug('{} was seen before, so not passing it along to zonefile storage watchers'.format(zfhash)) | python | def set_zonefile_present(self, zfhash, block_height, con=None, path=None):
"""
Set a zonefile as present, and if it was previously absent, inform the storage listener
"""
was_present = atlasdb_set_zonefile_present( zfhash, True, con=con, path=path )
# tell anyone who cares that we got this zone file, if it was new
if not was_present and self.store_zonefile_cb:
log.debug('{} was new, so passing it along to zonefile storage watchers...'.format(zfhash))
self.store_zonefile_cb(zfhash, block_height)
else:
log.debug('{} was seen before, so not passing it along to zonefile storage watchers'.format(zfhash)) | [
"def",
"set_zonefile_present",
"(",
"self",
",",
"zfhash",
",",
"block_height",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"was_present",
"=",
"atlasdb_set_zonefile_present",
"(",
"zfhash",
",",
"True",
",",
"con",
"=",
"con",
",",
"path"... | Set a zonefile as present, and if it was previously absent, inform the storage listener | [
"Set",
"a",
"zonefile",
"as",
"present",
"and",
"if",
"it",
"was",
"previously",
"absent",
"inform",
"the",
"storage",
"listener"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L3257-L3268 | train | 225,322 |
blockstack/blockstack-core | blockstack/lib/atlas.py | AtlasZonefileCrawler.find_zonefile_origins | def find_zonefile_origins( self, missing_zfinfo, peer_hostports ):
"""
Find out which peers can serve which zonefiles
"""
zonefile_origins = {} # map peer hostport to list of zonefile hashes
# which peers can serve each zonefile?
for zfhash in missing_zfinfo.keys():
for peer_hostport in peer_hostports:
if not zonefile_origins.has_key(peer_hostport):
zonefile_origins[peer_hostport] = []
if peer_hostport in missing_zfinfo[zfhash]['peers']:
zonefile_origins[peer_hostport].append( zfhash )
return zonefile_origins | python | def find_zonefile_origins( self, missing_zfinfo, peer_hostports ):
"""
Find out which peers can serve which zonefiles
"""
zonefile_origins = {} # map peer hostport to list of zonefile hashes
# which peers can serve each zonefile?
for zfhash in missing_zfinfo.keys():
for peer_hostport in peer_hostports:
if not zonefile_origins.has_key(peer_hostport):
zonefile_origins[peer_hostport] = []
if peer_hostport in missing_zfinfo[zfhash]['peers']:
zonefile_origins[peer_hostport].append( zfhash )
return zonefile_origins | [
"def",
"find_zonefile_origins",
"(",
"self",
",",
"missing_zfinfo",
",",
"peer_hostports",
")",
":",
"zonefile_origins",
"=",
"{",
"}",
"# map peer hostport to list of zonefile hashes",
"# which peers can serve each zonefile?",
"for",
"zfhash",
"in",
"missing_zfinfo",
".",
... | Find out which peers can serve which zonefiles | [
"Find",
"out",
"which",
"peers",
"can",
"serve",
"which",
"zonefiles"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L3316-L3331 | train | 225,323 |
blockstack/blockstack-core | blockstack/lib/atlas.py | AtlasZonefilePusher.step | def step( self, peer_table=None, zonefile_queue=None, path=None ):
"""
Run one step of this algorithm.
Push the zonefile to all the peers that need it.
Return the number of peers we sent to
"""
if path is None:
path = self.atlasdb_path
if BLOCKSTACK_TEST:
log.debug("%s: %s step" % (self.hostport, self.__class__.__name__))
if self.push_timeout is None:
self.push_timeout = atlas_push_zonefiles_timeout()
zfinfo = atlas_zonefile_push_dequeue( zonefile_queue=zonefile_queue )
if zfinfo is None:
return 0
zfhash = zfinfo['zonefile_hash']
zfdata_txt = zfinfo['zonefile']
name = zfinfo['name']
txid = zfinfo['txid']
zfbits = atlasdb_get_zonefile_bits( zfhash, path=path )
if len(zfbits) == 0:
# not recognized
return 0
# it's a valid zonefile. store it.
rc = add_atlas_zonefile_data( str(zfdata_txt), self.zonefile_dir )
if not rc:
log.error("Failed to replicate zonefile %s to external storage" % zfhash)
peers = None
# see if we can send this somewhere
with AtlasPeerTableLocked(peer_table) as ptbl:
peers = atlas_zonefile_find_push_peers( zfhash, peer_table=ptbl, zonefile_bits=zfbits )
if len(peers) == 0:
# everyone has it
log.debug("%s: All peers have zonefile %s" % (self.hostport, zfhash))
return 0
# push it off
ret = 0
for peer in peers:
log.debug("%s: Push to %s" % (self.hostport, peer))
atlas_zonefile_push( self.hostport, peer, zfdata_txt, timeout=self.push_timeout )
ret += 1
return ret | python | def step( self, peer_table=None, zonefile_queue=None, path=None ):
"""
Run one step of this algorithm.
Push the zonefile to all the peers that need it.
Return the number of peers we sent to
"""
if path is None:
path = self.atlasdb_path
if BLOCKSTACK_TEST:
log.debug("%s: %s step" % (self.hostport, self.__class__.__name__))
if self.push_timeout is None:
self.push_timeout = atlas_push_zonefiles_timeout()
zfinfo = atlas_zonefile_push_dequeue( zonefile_queue=zonefile_queue )
if zfinfo is None:
return 0
zfhash = zfinfo['zonefile_hash']
zfdata_txt = zfinfo['zonefile']
name = zfinfo['name']
txid = zfinfo['txid']
zfbits = atlasdb_get_zonefile_bits( zfhash, path=path )
if len(zfbits) == 0:
# not recognized
return 0
# it's a valid zonefile. store it.
rc = add_atlas_zonefile_data( str(zfdata_txt), self.zonefile_dir )
if not rc:
log.error("Failed to replicate zonefile %s to external storage" % zfhash)
peers = None
# see if we can send this somewhere
with AtlasPeerTableLocked(peer_table) as ptbl:
peers = atlas_zonefile_find_push_peers( zfhash, peer_table=ptbl, zonefile_bits=zfbits )
if len(peers) == 0:
# everyone has it
log.debug("%s: All peers have zonefile %s" % (self.hostport, zfhash))
return 0
# push it off
ret = 0
for peer in peers:
log.debug("%s: Push to %s" % (self.hostport, peer))
atlas_zonefile_push( self.hostport, peer, zfdata_txt, timeout=self.push_timeout )
ret += 1
return ret | [
"def",
"step",
"(",
"self",
",",
"peer_table",
"=",
"None",
",",
"zonefile_queue",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"self",
".",
"atlasdb_path",
"if",
"BLOCKSTACK_TEST",
":",
"log",
".",
"... | Run one step of this algorithm.
Push the zonefile to all the peers that need it.
Return the number of peers we sent to | [
"Run",
"one",
"step",
"of",
"this",
"algorithm",
".",
"Push",
"the",
"zonefile",
"to",
"all",
"the",
"peers",
"that",
"need",
"it",
".",
"Return",
"the",
"number",
"of",
"peers",
"we",
"sent",
"to"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L3509-L3561 | train | 225,324 |
blockstack/blockstack-core | blockstack/lib/queue.py | queuedb_create | def queuedb_create(path):
"""
Create a sqlite3 db at the given path.
Create all the tables and indexes we need.
Raises if the table already exists
"""
global QUEUE_SQL, ERROR_SQL
lines = [l + ";" for l in QUEUE_SQL.split(";")]
con = sqlite3.connect( path, isolation_level=None )
db_query_execute(con, 'pragma mmap_size=536870912', ())
for line in lines:
db_query_execute(con, line, ())
con.commit()
con.row_factory = queuedb_row_factory
return con | python | def queuedb_create(path):
"""
Create a sqlite3 db at the given path.
Create all the tables and indexes we need.
Raises if the table already exists
"""
global QUEUE_SQL, ERROR_SQL
lines = [l + ";" for l in QUEUE_SQL.split(";")]
con = sqlite3.connect( path, isolation_level=None )
db_query_execute(con, 'pragma mmap_size=536870912', ())
for line in lines:
db_query_execute(con, line, ())
con.commit()
con.row_factory = queuedb_row_factory
return con | [
"def",
"queuedb_create",
"(",
"path",
")",
":",
"global",
"QUEUE_SQL",
",",
"ERROR_SQL",
"lines",
"=",
"[",
"l",
"+",
"\";\"",
"for",
"l",
"in",
"QUEUE_SQL",
".",
"split",
"(",
"\";\"",
")",
"]",
"con",
"=",
"sqlite3",
".",
"connect",
"(",
"path",
",... | Create a sqlite3 db at the given path.
Create all the tables and indexes we need.
Raises if the table already exists | [
"Create",
"a",
"sqlite3",
"db",
"at",
"the",
"given",
"path",
".",
"Create",
"all",
"the",
"tables",
"and",
"indexes",
"we",
"need",
".",
"Raises",
"if",
"the",
"table",
"already",
"exists"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/queue.py#L46-L63 | train | 225,325 |
blockstack/blockstack-core | blockstack/lib/queue.py | queuedb_row_factory | def queuedb_row_factory(cursor, row):
"""
Dict row factory
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d | python | def queuedb_row_factory(cursor, row):
"""
Dict row factory
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d | [
"def",
"queuedb_row_factory",
"(",
"cursor",
",",
"row",
")",
":",
"d",
"=",
"{",
"}",
"for",
"idx",
",",
"col",
"in",
"enumerate",
"(",
"cursor",
".",
"description",
")",
":",
"d",
"[",
"col",
"[",
"0",
"]",
"]",
"=",
"row",
"[",
"idx",
"]",
"... | Dict row factory | [
"Dict",
"row",
"factory"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/queue.py#L75-L83 | train | 225,326 |
blockstack/blockstack-core | blockstack/lib/queue.py | queuedb_findall | def queuedb_findall(path, queue_id, name=None, offset=None, limit=None):
"""
Get all queued entries for a queue and a name.
If name is None, then find all queue entries
Return the rows on success (empty list if not found)
Raise on error
"""
sql = "SELECT * FROM queue WHERE queue_id = ? ORDER BY rowid ASC"
args = (queue_id,)
if name:
sql += ' AND name = ?'
args += (name,)
if limit:
sql += ' LIMIT ?'
args += (limit,)
if offset:
sql += ' OFFSET ?'
args += (offset,)
sql += ';'
db = queuedb_open(path)
if db is None:
raise Exception("Failed to open %s" % path)
cur = db.cursor()
rows = queuedb_query_execute(cur, sql, args)
count = 0
ret = []
for row in rows:
dat = {}
dat.update(row)
ret.append(dat)
db.close()
return ret | python | def queuedb_findall(path, queue_id, name=None, offset=None, limit=None):
"""
Get all queued entries for a queue and a name.
If name is None, then find all queue entries
Return the rows on success (empty list if not found)
Raise on error
"""
sql = "SELECT * FROM queue WHERE queue_id = ? ORDER BY rowid ASC"
args = (queue_id,)
if name:
sql += ' AND name = ?'
args += (name,)
if limit:
sql += ' LIMIT ?'
args += (limit,)
if offset:
sql += ' OFFSET ?'
args += (offset,)
sql += ';'
db = queuedb_open(path)
if db is None:
raise Exception("Failed to open %s" % path)
cur = db.cursor()
rows = queuedb_query_execute(cur, sql, args)
count = 0
ret = []
for row in rows:
dat = {}
dat.update(row)
ret.append(dat)
db.close()
return ret | [
"def",
"queuedb_findall",
"(",
"path",
",",
"queue_id",
",",
"name",
"=",
"None",
",",
"offset",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"sql",
"=",
"\"SELECT * FROM queue WHERE queue_id = ? ORDER BY rowid ASC\"",
"args",
"=",
"(",
"queue_id",
",",
"... | Get all queued entries for a queue and a name.
If name is None, then find all queue entries
Return the rows on success (empty list if not found)
Raise on error | [
"Get",
"all",
"queued",
"entries",
"for",
"a",
"queue",
"and",
"a",
"name",
".",
"If",
"name",
"is",
"None",
"then",
"find",
"all",
"queue",
"entries"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/queue.py#L103-L143 | train | 225,327 |
blockstack/blockstack-core | blockstack/lib/queue.py | queuedb_append | def queuedb_append(path, queue_id, name, data):
"""
Append an element to the back of the queue.
Return True on success
Raise on error
"""
sql = "INSERT INTO queue VALUES (?,?,?);"
args = (name, queue_id, data)
db = queuedb_open(path)
if db is None:
raise Exception("Failed to open %s" % path)
cur = db.cursor()
res = queuedb_query_execute(cur, sql, args)
db.commit()
db.close()
return True | python | def queuedb_append(path, queue_id, name, data):
"""
Append an element to the back of the queue.
Return True on success
Raise on error
"""
sql = "INSERT INTO queue VALUES (?,?,?);"
args = (name, queue_id, data)
db = queuedb_open(path)
if db is None:
raise Exception("Failed to open %s" % path)
cur = db.cursor()
res = queuedb_query_execute(cur, sql, args)
db.commit()
db.close()
return True | [
"def",
"queuedb_append",
"(",
"path",
",",
"queue_id",
",",
"name",
",",
"data",
")",
":",
"sql",
"=",
"\"INSERT INTO queue VALUES (?,?,?);\"",
"args",
"=",
"(",
"name",
",",
"queue_id",
",",
"data",
")",
"db",
"=",
"queuedb_open",
"(",
"path",
")",
"if",
... | Append an element to the back of the queue.
Return True on success
Raise on error | [
"Append",
"an",
"element",
"to",
"the",
"back",
"of",
"the",
"queue",
".",
"Return",
"True",
"on",
"success",
"Raise",
"on",
"error"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/queue.py#L146-L164 | train | 225,328 |
blockstack/blockstack-core | blockstack/lib/queue.py | queuedb_remove | def queuedb_remove(path, entry, cur=None):
"""
Remove an element from a queue.
Return True on success
Raise on error
"""
sql = "DELETE FROM queue WHERE queue_id = ? AND name = ?;"
args = (entry['queue_id'], entry['name'])
cursor = None
if cur:
cursor = cur
else:
db = queuedb_open(path)
if db is None:
raise Exception("Failed to open %s" % path)
cursor = db.cursor()
res = queuedb_query_execute(cursor, sql, args)
if cur is None:
db.commit()
db.close()
return True | python | def queuedb_remove(path, entry, cur=None):
"""
Remove an element from a queue.
Return True on success
Raise on error
"""
sql = "DELETE FROM queue WHERE queue_id = ? AND name = ?;"
args = (entry['queue_id'], entry['name'])
cursor = None
if cur:
cursor = cur
else:
db = queuedb_open(path)
if db is None:
raise Exception("Failed to open %s" % path)
cursor = db.cursor()
res = queuedb_query_execute(cursor, sql, args)
if cur is None:
db.commit()
db.close()
return True | [
"def",
"queuedb_remove",
"(",
"path",
",",
"entry",
",",
"cur",
"=",
"None",
")",
":",
"sql",
"=",
"\"DELETE FROM queue WHERE queue_id = ? AND name = ?;\"",
"args",
"=",
"(",
"entry",
"[",
"'queue_id'",
"]",
",",
"entry",
"[",
"'name'",
"]",
")",
"cursor",
"... | Remove an element from a queue.
Return True on success
Raise on error | [
"Remove",
"an",
"element",
"from",
"a",
"queue",
".",
"Return",
"True",
"on",
"success",
"Raise",
"on",
"error"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/queue.py#L167-L192 | train | 225,329 |
blockstack/blockstack-core | blockstack/lib/queue.py | queuedb_removeall | def queuedb_removeall(path, entries):
"""
Remove all entries from a queue
"""
db = queuedb_open(path)
if db is None:
raise Exception("Failed to open %s" % path)
cursor = db.cursor()
queuedb_query_execute(cursor, 'BEGIN', ())
for entry in entries:
queuedb_remove(path, entry, cur=cursor)
queuedb_query_execute(cursor, 'END', ())
db.commit()
db.close()
return True | python | def queuedb_removeall(path, entries):
"""
Remove all entries from a queue
"""
db = queuedb_open(path)
if db is None:
raise Exception("Failed to open %s" % path)
cursor = db.cursor()
queuedb_query_execute(cursor, 'BEGIN', ())
for entry in entries:
queuedb_remove(path, entry, cur=cursor)
queuedb_query_execute(cursor, 'END', ())
db.commit()
db.close()
return True | [
"def",
"queuedb_removeall",
"(",
"path",
",",
"entries",
")",
":",
"db",
"=",
"queuedb_open",
"(",
"path",
")",
"if",
"db",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Failed to open %s\"",
"%",
"path",
")",
"cursor",
"=",
"db",
".",
"cursor",
"(",
... | Remove all entries from a queue | [
"Remove",
"all",
"entries",
"from",
"a",
"queue"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/queue.py#L195-L213 | train | 225,330 |
blockstack/blockstack-core | blockstack/lib/operations/register.py | check_payment_in_stacks | def check_payment_in_stacks(state_engine, nameop, state_op_type, fee_block_id):
"""
Verify that if tokens were paid for a name priced in BTC, that enough were paid.
Does not check account balances or namespace types; it only inspects the transaction data.
Returns {'status': True, 'tokens_paid': ..., 'token_units': ...} on success
Returns {'status': False} on error
"""
name = nameop['name']
namespace_id = get_namespace_from_name(name)
name_without_namespace = get_name_from_fq_name(name)
namespace = state_engine.get_namespace( namespace_id )
stacks_payment_info = get_stacks_payment(state_engine, nameop, state_op_type)
if stacks_payment_info['status']:
# got a stacks payment! check price and make sure we paid the right amount
tokens_paid = stacks_payment_info['tokens_paid']
token_units = stacks_payment_info['token_units']
log.debug('Transaction pays {} units of {} for {}, even though its namespace was priced in BTC'.format(tokens_paid, token_units, name))
stacks_price = price_name_stacks(name_without_namespace, namespace, fee_block_id) # price in Stacks, but following the BTC-given price curve
res = check_token_payment(name, stacks_price, stacks_payment_info)
if res['status']:
# success
return {'status': True, 'tokens_paid': tokens_paid, 'token_units': token_units}
return {'status': False} | python | def check_payment_in_stacks(state_engine, nameop, state_op_type, fee_block_id):
"""
Verify that if tokens were paid for a name priced in BTC, that enough were paid.
Does not check account balances or namespace types; it only inspects the transaction data.
Returns {'status': True, 'tokens_paid': ..., 'token_units': ...} on success
Returns {'status': False} on error
"""
name = nameop['name']
namespace_id = get_namespace_from_name(name)
name_without_namespace = get_name_from_fq_name(name)
namespace = state_engine.get_namespace( namespace_id )
stacks_payment_info = get_stacks_payment(state_engine, nameop, state_op_type)
if stacks_payment_info['status']:
# got a stacks payment! check price and make sure we paid the right amount
tokens_paid = stacks_payment_info['tokens_paid']
token_units = stacks_payment_info['token_units']
log.debug('Transaction pays {} units of {} for {}, even though its namespace was priced in BTC'.format(tokens_paid, token_units, name))
stacks_price = price_name_stacks(name_without_namespace, namespace, fee_block_id) # price in Stacks, but following the BTC-given price curve
res = check_token_payment(name, stacks_price, stacks_payment_info)
if res['status']:
# success
return {'status': True, 'tokens_paid': tokens_paid, 'token_units': token_units}
return {'status': False} | [
"def",
"check_payment_in_stacks",
"(",
"state_engine",
",",
"nameop",
",",
"state_op_type",
",",
"fee_block_id",
")",
":",
"name",
"=",
"nameop",
"[",
"'name'",
"]",
"namespace_id",
"=",
"get_namespace_from_name",
"(",
"name",
")",
"name_without_namespace",
"=",
"... | Verify that if tokens were paid for a name priced in BTC, that enough were paid.
Does not check account balances or namespace types; it only inspects the transaction data.
Returns {'status': True, 'tokens_paid': ..., 'token_units': ...} on success
Returns {'status': False} on error | [
"Verify",
"that",
"if",
"tokens",
"were",
"paid",
"for",
"a",
"name",
"priced",
"in",
"BTC",
"that",
"enough",
"were",
"paid",
".",
"Does",
"not",
"check",
"account",
"balances",
"or",
"namespace",
"types",
";",
"it",
"only",
"inspects",
"the",
"transactio... | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/register.py#L201-L228 | train | 225,331 |
blockstack/blockstack-core | blockstack/lib/operations/register.py | check_payment | def check_payment(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id):
"""
Verify that the right payment was made, in the right cryptocurrency units.
Does not check any accounts or modify the nameop in any way; it only checks that the name was paid for by the transaction.
NOTE: if state_op_type is NAME_REGISTRATION, you will need to have called state_create_put_preorder() before calling this method!
Returns {'status': True, 'tokens_paid': tokens_paid, 'token_units': ...} if the payment information is correct.
Returns {'status': False} if not
"""
assert state_op_type in ['NAME_REGISTRATION', 'NAME_RENEWAL'], 'Invalid op type {}'.format(state_op_type)
assert name_fee is not None
assert isinstance(name_fee, (int,long))
name = nameop['name']
namespace_id = get_namespace_from_name(name)
namespace = state_engine.get_namespace( namespace_id )
res = None
log.debug('{} is a version-0x{} namespace'.format(namespace['namespace_id'], namespace['version']))
# check name fee, depending on which version.
if namespace['version'] == NAMESPACE_VERSION_PAY_TO_BURN:
res = check_payment_v1(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id)
elif namespace['version'] == NAMESPACE_VERSION_PAY_TO_CREATOR:
res = check_payment_v2(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id)
elif namespace['version'] == NAMESPACE_VERSION_PAY_WITH_STACKS:
res = check_payment_v3(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id)
else:
# unrecognized namespace rules
log.warning("Namespace {} has version bits 0x{:x}, which has unknown registration rules".format(namespace['namespace_id'], namespace['version']))
return {'status': False}
if not res['status']:
return res
tokens_paid = res['tokens_paid']
token_units = res['token_units']
return {'status': True, 'tokens_paid': tokens_paid, 'token_units': token_units} | python | def check_payment(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id):
"""
Verify that the right payment was made, in the right cryptocurrency units.
Does not check any accounts or modify the nameop in any way; it only checks that the name was paid for by the transaction.
NOTE: if state_op_type is NAME_REGISTRATION, you will need to have called state_create_put_preorder() before calling this method!
Returns {'status': True, 'tokens_paid': tokens_paid, 'token_units': ...} if the payment information is correct.
Returns {'status': False} if not
"""
assert state_op_type in ['NAME_REGISTRATION', 'NAME_RENEWAL'], 'Invalid op type {}'.format(state_op_type)
assert name_fee is not None
assert isinstance(name_fee, (int,long))
name = nameop['name']
namespace_id = get_namespace_from_name(name)
namespace = state_engine.get_namespace( namespace_id )
res = None
log.debug('{} is a version-0x{} namespace'.format(namespace['namespace_id'], namespace['version']))
# check name fee, depending on which version.
if namespace['version'] == NAMESPACE_VERSION_PAY_TO_BURN:
res = check_payment_v1(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id)
elif namespace['version'] == NAMESPACE_VERSION_PAY_TO_CREATOR:
res = check_payment_v2(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id)
elif namespace['version'] == NAMESPACE_VERSION_PAY_WITH_STACKS:
res = check_payment_v3(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id)
else:
# unrecognized namespace rules
log.warning("Namespace {} has version bits 0x{:x}, which has unknown registration rules".format(namespace['namespace_id'], namespace['version']))
return {'status': False}
if not res['status']:
return res
tokens_paid = res['tokens_paid']
token_units = res['token_units']
return {'status': True, 'tokens_paid': tokens_paid, 'token_units': token_units} | [
"def",
"check_payment",
"(",
"state_engine",
",",
"state_op_type",
",",
"nameop",
",",
"fee_block_id",
",",
"token_address",
",",
"burn_address",
",",
"name_fee",
",",
"block_id",
")",
":",
"assert",
"state_op_type",
"in",
"[",
"'NAME_REGISTRATION'",
",",
"'NAME_R... | Verify that the right payment was made, in the right cryptocurrency units.
Does not check any accounts or modify the nameop in any way; it only checks that the name was paid for by the transaction.
NOTE: if state_op_type is NAME_REGISTRATION, you will need to have called state_create_put_preorder() before calling this method!
Returns {'status': True, 'tokens_paid': tokens_paid, 'token_units': ...} if the payment information is correct.
Returns {'status': False} if not | [
"Verify",
"that",
"the",
"right",
"payment",
"was",
"made",
"in",
"the",
"right",
"cryptocurrency",
"units",
".",
"Does",
"not",
"check",
"any",
"accounts",
"or",
"modify",
"the",
"nameop",
"in",
"any",
"way",
";",
"it",
"only",
"checks",
"that",
"the",
... | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/register.py#L405-L448 | train | 225,332 |
blockstack/blockstack-core | blockstack/lib/operations/namespacepreorder.py | check | def check( state_engine, nameop, block_id, checked_ops ):
"""
Given a NAMESPACE_PREORDER nameop, see if we can preorder it.
It must be unqiue.
Return True if accepted.
Return False if not.
"""
namespace_id_hash = nameop['preorder_hash']
consensus_hash = nameop['consensus_hash']
token_fee = nameop['token_fee']
# cannot be preordered already
if not state_engine.is_new_namespace_preorder( namespace_id_hash ):
log.warning("Namespace preorder '%s' already in use" % namespace_id_hash)
return False
# has to have a reasonable consensus hash
if not state_engine.is_consensus_hash_valid( block_id, consensus_hash ):
valid_consensus_hashes = state_engine.get_valid_consensus_hashes( block_id )
log.warning("Invalid consensus hash '%s': expected any of %s" % (consensus_hash, ",".join( valid_consensus_hashes )) )
return False
# has to have paid a fee
if not 'op_fee' in nameop:
log.warning("Missing namespace preorder fee")
return False
# paid to the right burn address
if nameop['burn_address'] != BLOCKSTACK_BURN_ADDRESS:
log.warning("Invalid burn address: expected {}, got {}".format(BLOCKSTACK_BURN_ADDRESS, nameop['burn_address']))
return False
# token burn fee must be present, if we're in the right epoch for it
epoch_features = get_epoch_features(block_id)
if EPOCH_FEATURE_STACKS_BUY_NAMESPACES in epoch_features:
# must pay in STACKs
if 'token_fee' not in nameop:
log.warning("Missing token fee")
return False
token_fee = nameop['token_fee']
token_address = nameop['address']
token_type = TOKEN_TYPE_STACKS
# was a token fee paid?
if token_fee is None:
log.warning("No tokens paid by this NAMESPACE_PREORDER")
return False
# does this account have enough balance?
account_info = state_engine.get_account(token_address, token_type)
if account_info is None:
log.warning("No account for {} ({})".format(token_address, token_type))
return False
account_balance = state_engine.get_account_balance(account_info)
assert isinstance(account_balance, (int,long)), 'BUG: account_balance of {} is {} (type {})'.format(token_address, account_balance, type(account_balance))
assert isinstance(token_fee, (int,long)), 'BUG: token_fee is {} (type {})'.format(token_fee, type(token_fee))
if account_balance < token_fee:
# can't afford
log.warning("Account {} has balance {} {}, but needs to pay {} {}".format(token_address, account_balance, token_type, token_fee, token_type))
return False
# debit this account when we commit
state_preorder_put_account_payment_info(nameop, token_address, token_type, token_fee)
# NOTE: must be a string, to avoid overflow
nameop['token_fee'] = '{}'.format(token_fee)
nameop['token_units'] = TOKEN_TYPE_STACKS
else:
# must pay in BTC
# not paying in tokens, but say so!
state_preorder_put_account_payment_info(nameop, None, None, None)
nameop['token_fee'] = '0'
nameop['token_units'] = 'BTC'
return True | python | def check( state_engine, nameop, block_id, checked_ops ):
"""
Given a NAMESPACE_PREORDER nameop, see if we can preorder it.
It must be unqiue.
Return True if accepted.
Return False if not.
"""
namespace_id_hash = nameop['preorder_hash']
consensus_hash = nameop['consensus_hash']
token_fee = nameop['token_fee']
# cannot be preordered already
if not state_engine.is_new_namespace_preorder( namespace_id_hash ):
log.warning("Namespace preorder '%s' already in use" % namespace_id_hash)
return False
# has to have a reasonable consensus hash
if not state_engine.is_consensus_hash_valid( block_id, consensus_hash ):
valid_consensus_hashes = state_engine.get_valid_consensus_hashes( block_id )
log.warning("Invalid consensus hash '%s': expected any of %s" % (consensus_hash, ",".join( valid_consensus_hashes )) )
return False
# has to have paid a fee
if not 'op_fee' in nameop:
log.warning("Missing namespace preorder fee")
return False
# paid to the right burn address
if nameop['burn_address'] != BLOCKSTACK_BURN_ADDRESS:
log.warning("Invalid burn address: expected {}, got {}".format(BLOCKSTACK_BURN_ADDRESS, nameop['burn_address']))
return False
# token burn fee must be present, if we're in the right epoch for it
epoch_features = get_epoch_features(block_id)
if EPOCH_FEATURE_STACKS_BUY_NAMESPACES in epoch_features:
# must pay in STACKs
if 'token_fee' not in nameop:
log.warning("Missing token fee")
return False
token_fee = nameop['token_fee']
token_address = nameop['address']
token_type = TOKEN_TYPE_STACKS
# was a token fee paid?
if token_fee is None:
log.warning("No tokens paid by this NAMESPACE_PREORDER")
return False
# does this account have enough balance?
account_info = state_engine.get_account(token_address, token_type)
if account_info is None:
log.warning("No account for {} ({})".format(token_address, token_type))
return False
account_balance = state_engine.get_account_balance(account_info)
assert isinstance(account_balance, (int,long)), 'BUG: account_balance of {} is {} (type {})'.format(token_address, account_balance, type(account_balance))
assert isinstance(token_fee, (int,long)), 'BUG: token_fee is {} (type {})'.format(token_fee, type(token_fee))
if account_balance < token_fee:
# can't afford
log.warning("Account {} has balance {} {}, but needs to pay {} {}".format(token_address, account_balance, token_type, token_fee, token_type))
return False
# debit this account when we commit
state_preorder_put_account_payment_info(nameop, token_address, token_type, token_fee)
# NOTE: must be a string, to avoid overflow
nameop['token_fee'] = '{}'.format(token_fee)
nameop['token_units'] = TOKEN_TYPE_STACKS
else:
# must pay in BTC
# not paying in tokens, but say so!
state_preorder_put_account_payment_info(nameop, None, None, None)
nameop['token_fee'] = '0'
nameop['token_units'] = 'BTC'
return True | [
"def",
"check",
"(",
"state_engine",
",",
"nameop",
",",
"block_id",
",",
"checked_ops",
")",
":",
"namespace_id_hash",
"=",
"nameop",
"[",
"'preorder_hash'",
"]",
"consensus_hash",
"=",
"nameop",
"[",
"'consensus_hash'",
"]",
"token_fee",
"=",
"nameop",
"[",
... | Given a NAMESPACE_PREORDER nameop, see if we can preorder it.
It must be unqiue.
Return True if accepted.
Return False if not. | [
"Given",
"a",
"NAMESPACE_PREORDER",
"nameop",
"see",
"if",
"we",
"can",
"preorder",
"it",
".",
"It",
"must",
"be",
"unqiue",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/namespacepreorder.py#L53-L134 | train | 225,333 |
blockstack/blockstack-core | blockstack/lib/fast_sync.py | snapshot_peek_number | def snapshot_peek_number( fd, off ):
"""
Read the last 8 bytes of fd
and interpret it as an int.
"""
# read number of 8 bytes
fd.seek( off - 8, os.SEEK_SET )
value_hex = fd.read(8)
if len(value_hex) != 8:
return None
try:
value = int(value_hex, 16)
except ValueError:
return None
return value | python | def snapshot_peek_number( fd, off ):
"""
Read the last 8 bytes of fd
and interpret it as an int.
"""
# read number of 8 bytes
fd.seek( off - 8, os.SEEK_SET )
value_hex = fd.read(8)
if len(value_hex) != 8:
return None
try:
value = int(value_hex, 16)
except ValueError:
return None
return value | [
"def",
"snapshot_peek_number",
"(",
"fd",
",",
"off",
")",
":",
"# read number of 8 bytes ",
"fd",
".",
"seek",
"(",
"off",
"-",
"8",
",",
"os",
".",
"SEEK_SET",
")",
"value_hex",
"=",
"fd",
".",
"read",
"(",
"8",
")",
"if",
"len",
"(",
"value_hex",
... | Read the last 8 bytes of fd
and interpret it as an int. | [
"Read",
"the",
"last",
"8",
"bytes",
"of",
"fd",
"and",
"interpret",
"it",
"as",
"an",
"int",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/fast_sync.py#L52-L67 | train | 225,334 |
blockstack/blockstack-core | blockstack/lib/fast_sync.py | get_file_hash | def get_file_hash( fd, hashfunc, fd_len=None ):
"""
Get the hex-encoded hash of the fd's data
"""
h = hashfunc()
fd.seek(0, os.SEEK_SET)
count = 0
while True:
buf = fd.read(65536)
if len(buf) == 0:
break
if fd_len is not None:
if count + len(buf) > fd_len:
buf = buf[:fd_len - count]
h.update(buf)
count += len(buf)
hashed = h.hexdigest()
return hashed | python | def get_file_hash( fd, hashfunc, fd_len=None ):
"""
Get the hex-encoded hash of the fd's data
"""
h = hashfunc()
fd.seek(0, os.SEEK_SET)
count = 0
while True:
buf = fd.read(65536)
if len(buf) == 0:
break
if fd_len is not None:
if count + len(buf) > fd_len:
buf = buf[:fd_len - count]
h.update(buf)
count += len(buf)
hashed = h.hexdigest()
return hashed | [
"def",
"get_file_hash",
"(",
"fd",
",",
"hashfunc",
",",
"fd_len",
"=",
"None",
")",
":",
"h",
"=",
"hashfunc",
"(",
")",
"fd",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_SET",
")",
"count",
"=",
"0",
"while",
"True",
":",
"buf",
"=",
"fd",
"... | Get the hex-encoded hash of the fd's data | [
"Get",
"the",
"hex",
"-",
"encoded",
"hash",
"of",
"the",
"fd",
"s",
"data"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/fast_sync.py#L89-L111 | train | 225,335 |
blockstack/blockstack-core | blockstack/lib/fast_sync.py | fast_sync_sign_snapshot | def fast_sync_sign_snapshot( snapshot_path, private_key, first=False ):
"""
Append a signature to the end of a snapshot path
with the given private key.
If first is True, then don't expect the signature trailer.
Return True on success
Return False on error
"""
if not os.path.exists(snapshot_path):
log.error("No such file or directory: {}".format(snapshot_path))
return False
file_size = 0
payload_size = 0
write_offset = 0
try:
sb = os.stat(snapshot_path)
file_size = sb.st_size
assert file_size > 8
except Exception as e:
log.exception(e)
return False
num_sigs = 0
snapshot_hash = None
with open(snapshot_path, 'r+') as f:
if not first:
info = fast_sync_inspect(f)
if 'error' in info:
log.error("Failed to inspect {}: {}".format(snapshot_path, info['error']))
return False
num_sigs = len(info['signatures'])
write_offset = info['sig_append_offset']
payload_size = info['payload_size']
else:
# no one has signed yet.
write_offset = file_size
num_sigs = 0
payload_size = file_size
# hash the file and sign the (bin-encoded) hash
privkey_hex = keylib.ECPrivateKey(private_key).to_hex()
hash_hex = get_file_hash( f, hashlib.sha256, fd_len=payload_size )
sigb64 = sign_digest( hash_hex, privkey_hex, hashfunc=hashlib.sha256 )
if BLOCKSTACK_TEST:
log.debug("Signed {} with {} to make {}".format(hash_hex, keylib.ECPrivateKey(private_key).public_key().to_hex(), sigb64))
# append
f.seek(write_offset, os.SEEK_SET)
f.write(sigb64)
f.write('{:08x}'.format(len(sigb64)))
# append number of signatures
num_sigs += 1
f.write('{:08x}'.format(num_sigs))
f.flush()
os.fsync(f.fileno())
return True | python | def fast_sync_sign_snapshot( snapshot_path, private_key, first=False ):
"""
Append a signature to the end of a snapshot path
with the given private key.
If first is True, then don't expect the signature trailer.
Return True on success
Return False on error
"""
if not os.path.exists(snapshot_path):
log.error("No such file or directory: {}".format(snapshot_path))
return False
file_size = 0
payload_size = 0
write_offset = 0
try:
sb = os.stat(snapshot_path)
file_size = sb.st_size
assert file_size > 8
except Exception as e:
log.exception(e)
return False
num_sigs = 0
snapshot_hash = None
with open(snapshot_path, 'r+') as f:
if not first:
info = fast_sync_inspect(f)
if 'error' in info:
log.error("Failed to inspect {}: {}".format(snapshot_path, info['error']))
return False
num_sigs = len(info['signatures'])
write_offset = info['sig_append_offset']
payload_size = info['payload_size']
else:
# no one has signed yet.
write_offset = file_size
num_sigs = 0
payload_size = file_size
# hash the file and sign the (bin-encoded) hash
privkey_hex = keylib.ECPrivateKey(private_key).to_hex()
hash_hex = get_file_hash( f, hashlib.sha256, fd_len=payload_size )
sigb64 = sign_digest( hash_hex, privkey_hex, hashfunc=hashlib.sha256 )
if BLOCKSTACK_TEST:
log.debug("Signed {} with {} to make {}".format(hash_hex, keylib.ECPrivateKey(private_key).public_key().to_hex(), sigb64))
# append
f.seek(write_offset, os.SEEK_SET)
f.write(sigb64)
f.write('{:08x}'.format(len(sigb64)))
# append number of signatures
num_sigs += 1
f.write('{:08x}'.format(num_sigs))
f.flush()
os.fsync(f.fileno())
return True | [
"def",
"fast_sync_sign_snapshot",
"(",
"snapshot_path",
",",
"private_key",
",",
"first",
"=",
"False",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"snapshot_path",
")",
":",
"log",
".",
"error",
"(",
"\"No such file or directory: {}\"",
".",... | Append a signature to the end of a snapshot path
with the given private key.
If first is True, then don't expect the signature trailer.
Return True on success
Return False on error | [
"Append",
"a",
"signature",
"to",
"the",
"end",
"of",
"a",
"snapshot",
"path",
"with",
"the",
"given",
"private",
"key",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/fast_sync.py#L114-L180 | train | 225,336 |
blockstack/blockstack-core | blockstack/lib/fast_sync.py | fast_sync_snapshot_compress | def fast_sync_snapshot_compress( snapshot_dir, export_path ):
"""
Given the path to a directory, compress it and export it to the
given path.
Return {'status': True} on success
Return {'error': ...} on failure
"""
snapshot_dir = os.path.abspath(snapshot_dir)
export_path = os.path.abspath(export_path)
if os.path.exists(export_path):
return {'error': 'Snapshot path exists: {}'.format(export_path)}
old_dir = os.getcwd()
count_ref = [0]
def print_progress(tarinfo):
count_ref[0] += 1
if count_ref[0] % 100 == 0:
log.debug("{} files compressed...".format(count_ref[0]))
return tarinfo
try:
os.chdir(snapshot_dir)
with tarfile.TarFile.bz2open(export_path, "w") as f:
f.add(".", filter=print_progress)
except:
os.chdir(old_dir)
raise
finally:
os.chdir(old_dir)
return {'status': True} | python | def fast_sync_snapshot_compress( snapshot_dir, export_path ):
"""
Given the path to a directory, compress it and export it to the
given path.
Return {'status': True} on success
Return {'error': ...} on failure
"""
snapshot_dir = os.path.abspath(snapshot_dir)
export_path = os.path.abspath(export_path)
if os.path.exists(export_path):
return {'error': 'Snapshot path exists: {}'.format(export_path)}
old_dir = os.getcwd()
count_ref = [0]
def print_progress(tarinfo):
count_ref[0] += 1
if count_ref[0] % 100 == 0:
log.debug("{} files compressed...".format(count_ref[0]))
return tarinfo
try:
os.chdir(snapshot_dir)
with tarfile.TarFile.bz2open(export_path, "w") as f:
f.add(".", filter=print_progress)
except:
os.chdir(old_dir)
raise
finally:
os.chdir(old_dir)
return {'status': True} | [
"def",
"fast_sync_snapshot_compress",
"(",
"snapshot_dir",
",",
"export_path",
")",
":",
"snapshot_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"snapshot_dir",
")",
"export_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"export_path",
")",
"if",
"... | Given the path to a directory, compress it and export it to the
given path.
Return {'status': True} on success
Return {'error': ...} on failure | [
"Given",
"the",
"path",
"to",
"a",
"directory",
"compress",
"it",
"and",
"export",
"it",
"to",
"the",
"given",
"path",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/fast_sync.py#L183-L220 | train | 225,337 |
blockstack/blockstack-core | blockstack/lib/fast_sync.py | fast_sync_snapshot_decompress | def fast_sync_snapshot_decompress( snapshot_path, output_dir ):
"""
Given the path to a snapshot file, decompress it and
write its contents to the given output directory
Return {'status': True} on success
Return {'error': ...} on failure
"""
if not tarfile.is_tarfile(snapshot_path):
return {'error': 'Not a tarfile-compatible archive: {}'.format(snapshot_path)}
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with tarfile.TarFile.bz2open(snapshot_path, 'r') as f:
tarfile.TarFile.extractall(f, path=output_dir)
return {'status': True} | python | def fast_sync_snapshot_decompress( snapshot_path, output_dir ):
"""
Given the path to a snapshot file, decompress it and
write its contents to the given output directory
Return {'status': True} on success
Return {'error': ...} on failure
"""
if not tarfile.is_tarfile(snapshot_path):
return {'error': 'Not a tarfile-compatible archive: {}'.format(snapshot_path)}
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with tarfile.TarFile.bz2open(snapshot_path, 'r') as f:
tarfile.TarFile.extractall(f, path=output_dir)
return {'status': True} | [
"def",
"fast_sync_snapshot_decompress",
"(",
"snapshot_path",
",",
"output_dir",
")",
":",
"if",
"not",
"tarfile",
".",
"is_tarfile",
"(",
"snapshot_path",
")",
":",
"return",
"{",
"'error'",
":",
"'Not a tarfile-compatible archive: {}'",
".",
"format",
"(",
"snapsh... | Given the path to a snapshot file, decompress it and
write its contents to the given output directory
Return {'status': True} on success
Return {'error': ...} on failure | [
"Given",
"the",
"path",
"to",
"a",
"snapshot",
"file",
"decompress",
"it",
"and",
"write",
"its",
"contents",
"to",
"the",
"given",
"output",
"directory"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/fast_sync.py#L223-L240 | train | 225,338 |
blockstack/blockstack-core | blockstack/lib/fast_sync.py | fast_sync_fetch | def fast_sync_fetch(working_dir, import_url):
"""
Get the data for an import snapshot.
Store it to a temporary path
Return the path on success
Return None on error
"""
try:
fd, tmppath = tempfile.mkstemp(prefix='.blockstack-fast-sync-', dir=working_dir)
except Exception, e:
log.exception(e)
return None
log.debug("Fetch {} to {}...".format(import_url, tmppath))
try:
path, headers = urllib.urlretrieve(import_url, tmppath)
except Exception, e:
os.close(fd)
log.exception(e)
return None
os.close(fd)
return tmppath | python | def fast_sync_fetch(working_dir, import_url):
"""
Get the data for an import snapshot.
Store it to a temporary path
Return the path on success
Return None on error
"""
try:
fd, tmppath = tempfile.mkstemp(prefix='.blockstack-fast-sync-', dir=working_dir)
except Exception, e:
log.exception(e)
return None
log.debug("Fetch {} to {}...".format(import_url, tmppath))
try:
path, headers = urllib.urlretrieve(import_url, tmppath)
except Exception, e:
os.close(fd)
log.exception(e)
return None
os.close(fd)
return tmppath | [
"def",
"fast_sync_fetch",
"(",
"working_dir",
",",
"import_url",
")",
":",
"try",
":",
"fd",
",",
"tmppath",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"'.blockstack-fast-sync-'",
",",
"dir",
"=",
"working_dir",
")",
"except",
"Exception",
",",
"e"... | Get the data for an import snapshot.
Store it to a temporary path
Return the path on success
Return None on error | [
"Get",
"the",
"data",
"for",
"an",
"import",
"snapshot",
".",
"Store",
"it",
"to",
"a",
"temporary",
"path",
"Return",
"the",
"path",
"on",
"success",
"Return",
"None",
"on",
"error"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/fast_sync.py#L410-L433 | train | 225,339 |
blockstack/blockstack-core | blockstack/lib/nameset/__init__.py | state_check_collisions | def state_check_collisions( state_engine, nameop, history_id_key, block_id, checked_ops, collision_checker ):
"""
See that there are no state-creating or state-preordering collisions at this block, for this history ID.
Return True if collided; False if not
"""
# verify no collisions against already-accepted names
collision_check = getattr( state_engine, collision_checker, None )
try:
assert collision_check is not None, "Collision-checker '%s' not defined" % collision_checker
assert hasattr( collision_check, "__call__" ), "Collision-checker '%s' is not callable" % collision_checker
assert history_id_key in nameop.keys(), "History ID key '%s' not in name operation" % (history_id_key)
assert 'op' in nameop.keys(), "BUG: no op in nameop"
except Exception, e:
log.exception(e)
log.error("FATAL: incorrect state_create() decorator")
sys.exit(1)
rc = collision_check( nameop[history_id_key], block_id, checked_ops )
return rc | python | def state_check_collisions( state_engine, nameop, history_id_key, block_id, checked_ops, collision_checker ):
"""
See that there are no state-creating or state-preordering collisions at this block, for this history ID.
Return True if collided; False if not
"""
# verify no collisions against already-accepted names
collision_check = getattr( state_engine, collision_checker, None )
try:
assert collision_check is not None, "Collision-checker '%s' not defined" % collision_checker
assert hasattr( collision_check, "__call__" ), "Collision-checker '%s' is not callable" % collision_checker
assert history_id_key in nameop.keys(), "History ID key '%s' not in name operation" % (history_id_key)
assert 'op' in nameop.keys(), "BUG: no op in nameop"
except Exception, e:
log.exception(e)
log.error("FATAL: incorrect state_create() decorator")
sys.exit(1)
rc = collision_check( nameop[history_id_key], block_id, checked_ops )
return rc | [
"def",
"state_check_collisions",
"(",
"state_engine",
",",
"nameop",
",",
"history_id_key",
",",
"block_id",
",",
"checked_ops",
",",
"collision_checker",
")",
":",
"# verify no collisions against already-accepted names",
"collision_check",
"=",
"getattr",
"(",
"state_engin... | See that there are no state-creating or state-preordering collisions at this block, for this history ID.
Return True if collided; False if not | [
"See",
"that",
"there",
"are",
"no",
"state",
"-",
"creating",
"or",
"state",
"-",
"preordering",
"collisions",
"at",
"this",
"block",
"for",
"this",
"history",
"ID",
".",
"Return",
"True",
"if",
"collided",
";",
"False",
"if",
"not"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/__init__.py#L100-L119 | train | 225,340 |
blockstack/blockstack-core | blockstack/lib/nameset/__init__.py | state_create_is_valid | def state_create_is_valid( nameop ):
"""
Is a nameop a valid state-preorder operation?
"""
assert '__state_create__' in nameop, "Not tagged with @state_create"
assert nameop['__state_create__'], "BUG: tagged False by @state_create"
assert '__preorder__' in nameop, "No preorder"
assert '__table__' in nameop, "No table given"
assert '__history_id_key__' in nameop, "No history ID key given"
assert nameop['__history_id_key__'] in nameop, "No history ID given"
assert '__always_set__' in nameop, "No always-set fields given"
return True | python | def state_create_is_valid( nameop ):
"""
Is a nameop a valid state-preorder operation?
"""
assert '__state_create__' in nameop, "Not tagged with @state_create"
assert nameop['__state_create__'], "BUG: tagged False by @state_create"
assert '__preorder__' in nameop, "No preorder"
assert '__table__' in nameop, "No table given"
assert '__history_id_key__' in nameop, "No history ID key given"
assert nameop['__history_id_key__'] in nameop, "No history ID given"
assert '__always_set__' in nameop, "No always-set fields given"
return True | [
"def",
"state_create_is_valid",
"(",
"nameop",
")",
":",
"assert",
"'__state_create__'",
"in",
"nameop",
",",
"\"Not tagged with @state_create\"",
"assert",
"nameop",
"[",
"'__state_create__'",
"]",
",",
"\"BUG: tagged False by @state_create\"",
"assert",
"'__preorder__'",
... | Is a nameop a valid state-preorder operation? | [
"Is",
"a",
"nameop",
"a",
"valid",
"state",
"-",
"preorder",
"operation?"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/__init__.py#L348-L360 | train | 225,341 |
blockstack/blockstack-core | blockstack/lib/nameset/__init__.py | state_transition_is_valid | def state_transition_is_valid( nameop ):
"""
Is this a valid state transition?
"""
assert '__state_transition__' in nameop, "Not tagged with @state_transition"
assert nameop['__state_transition__'], "BUG: @state_transition tagged False"
assert '__history_id_key__' in nameop, "Missing __history_id_key__"
history_id_key = nameop['__history_id_key__']
assert history_id_key in ["name", "namespace_id"], "Invalid history ID key '%s'" % history_id_key
assert '__table__' in nameop, "Missing __table__"
assert '__always_set__' in nameop, "No always-set fields given"
assert '__account_payment_info__' in nameop, 'No account payment information present'
return True | python | def state_transition_is_valid( nameop ):
"""
Is this a valid state transition?
"""
assert '__state_transition__' in nameop, "Not tagged with @state_transition"
assert nameop['__state_transition__'], "BUG: @state_transition tagged False"
assert '__history_id_key__' in nameop, "Missing __history_id_key__"
history_id_key = nameop['__history_id_key__']
assert history_id_key in ["name", "namespace_id"], "Invalid history ID key '%s'" % history_id_key
assert '__table__' in nameop, "Missing __table__"
assert '__always_set__' in nameop, "No always-set fields given"
assert '__account_payment_info__' in nameop, 'No account payment information present'
return True | [
"def",
"state_transition_is_valid",
"(",
"nameop",
")",
":",
"assert",
"'__state_transition__'",
"in",
"nameop",
",",
"\"Not tagged with @state_transition\"",
"assert",
"nameop",
"[",
"'__state_transition__'",
"]",
",",
"\"BUG: @state_transition tagged False\"",
"assert",
"'_... | Is this a valid state transition? | [
"Is",
"this",
"a",
"valid",
"state",
"transition?"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/__init__.py#L391-L404 | train | 225,342 |
blockstack/blockstack-core | blockstack/lib/storage/crawl.py | _read_atlas_zonefile | def _read_atlas_zonefile( zonefile_path, zonefile_hash ):
"""
Read and verify an atlas zone file
"""
with open(zonefile_path, "rb") as f:
data = f.read()
# sanity check
if zonefile_hash is not None:
if not verify_zonefile( data, zonefile_hash ):
log.debug("Corrupt zonefile '%s'" % zonefile_hash)
return None
return data | python | def _read_atlas_zonefile( zonefile_path, zonefile_hash ):
"""
Read and verify an atlas zone file
"""
with open(zonefile_path, "rb") as f:
data = f.read()
# sanity check
if zonefile_hash is not None:
if not verify_zonefile( data, zonefile_hash ):
log.debug("Corrupt zonefile '%s'" % zonefile_hash)
return None
return data | [
"def",
"_read_atlas_zonefile",
"(",
"zonefile_path",
",",
"zonefile_hash",
")",
":",
"with",
"open",
"(",
"zonefile_path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
"# sanity check ",
"if",
"zonefile_hash",
"is",
"not",
"... | Read and verify an atlas zone file | [
"Read",
"and",
"verify",
"an",
"atlas",
"zone",
"file"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/storage/crawl.py#L37-L51 | train | 225,343 |
blockstack/blockstack-core | blockstack/lib/storage/crawl.py | get_atlas_zonefile_data | def get_atlas_zonefile_data( zonefile_hash, zonefile_dir, check=True ):
"""
Get a serialized cached zonefile from local disk
Return None if not found
"""
zonefile_path = atlas_zonefile_path(zonefile_dir, zonefile_hash)
zonefile_path_legacy = atlas_zonefile_path_legacy(zonefile_dir, zonefile_hash)
for zfp in [zonefile_path, zonefile_path_legacy]:
if not os.path.exists( zfp ):
continue
if check:
res = _read_atlas_zonefile(zfp, zonefile_hash)
else:
res = _read_atlas_zonefile(zfp, None)
if res:
return res
return None | python | def get_atlas_zonefile_data( zonefile_hash, zonefile_dir, check=True ):
"""
Get a serialized cached zonefile from local disk
Return None if not found
"""
zonefile_path = atlas_zonefile_path(zonefile_dir, zonefile_hash)
zonefile_path_legacy = atlas_zonefile_path_legacy(zonefile_dir, zonefile_hash)
for zfp in [zonefile_path, zonefile_path_legacy]:
if not os.path.exists( zfp ):
continue
if check:
res = _read_atlas_zonefile(zfp, zonefile_hash)
else:
res = _read_atlas_zonefile(zfp, None)
if res:
return res
return None | [
"def",
"get_atlas_zonefile_data",
"(",
"zonefile_hash",
",",
"zonefile_dir",
",",
"check",
"=",
"True",
")",
":",
"zonefile_path",
"=",
"atlas_zonefile_path",
"(",
"zonefile_dir",
",",
"zonefile_hash",
")",
"zonefile_path_legacy",
"=",
"atlas_zonefile_path_legacy",
"(",... | Get a serialized cached zonefile from local disk
Return None if not found | [
"Get",
"a",
"serialized",
"cached",
"zonefile",
"from",
"local",
"disk",
"Return",
"None",
"if",
"not",
"found"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/storage/crawl.py#L54-L75 | train | 225,344 |
blockstack/blockstack-core | blockstack/lib/storage/crawl.py | store_atlas_zonefile_data | def store_atlas_zonefile_data(zonefile_data, zonefile_dir, fsync=True):
"""
Store a validated zonefile.
zonefile_data should be a dict.
The caller should first authenticate the zonefile.
Return True on success
Return False on error
"""
if not os.path.exists(zonefile_dir):
os.makedirs(zonefile_dir, 0700 )
zonefile_hash = get_zonefile_data_hash( zonefile_data )
# only store to the latest supported directory
zonefile_path = atlas_zonefile_path( zonefile_dir, zonefile_hash )
zonefile_dir_path = os.path.dirname(zonefile_path)
if os.path.exists(zonefile_path):
# already exists
return True
if not os.path.exists(zonefile_dir_path):
os.makedirs(zonefile_dir_path)
try:
with open( zonefile_path, "wb" ) as f:
f.write(zonefile_data)
f.flush()
if fsync:
os.fsync(f.fileno())
except Exception, e:
log.exception(e)
return False
return True | python | def store_atlas_zonefile_data(zonefile_data, zonefile_dir, fsync=True):
"""
Store a validated zonefile.
zonefile_data should be a dict.
The caller should first authenticate the zonefile.
Return True on success
Return False on error
"""
if not os.path.exists(zonefile_dir):
os.makedirs(zonefile_dir, 0700 )
zonefile_hash = get_zonefile_data_hash( zonefile_data )
# only store to the latest supported directory
zonefile_path = atlas_zonefile_path( zonefile_dir, zonefile_hash )
zonefile_dir_path = os.path.dirname(zonefile_path)
if os.path.exists(zonefile_path):
# already exists
return True
if not os.path.exists(zonefile_dir_path):
os.makedirs(zonefile_dir_path)
try:
with open( zonefile_path, "wb" ) as f:
f.write(zonefile_data)
f.flush()
if fsync:
os.fsync(f.fileno())
except Exception, e:
log.exception(e)
return False
return True | [
"def",
"store_atlas_zonefile_data",
"(",
"zonefile_data",
",",
"zonefile_dir",
",",
"fsync",
"=",
"True",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"zonefile_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"zonefile_dir",
",",
"0700",
")",
... | Store a validated zonefile.
zonefile_data should be a dict.
The caller should first authenticate the zonefile.
Return True on success
Return False on error | [
"Store",
"a",
"validated",
"zonefile",
".",
"zonefile_data",
"should",
"be",
"a",
"dict",
".",
"The",
"caller",
"should",
"first",
"authenticate",
"the",
"zonefile",
".",
"Return",
"True",
"on",
"success",
"Return",
"False",
"on",
"error"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/storage/crawl.py#L146-L181 | train | 225,345 |
blockstack/blockstack-core | blockstack/lib/storage/crawl.py | remove_atlas_zonefile_data | def remove_atlas_zonefile_data( zonefile_hash, zonefile_dir ):
"""
Remove a cached zonefile.
Idempotent; returns True if deleted or it didn't exist.
Returns False on error
"""
if not os.path.exists(zonefile_dir):
return True
zonefile_path = atlas_zonefile_path( zonefile_dir, zonefile_hash )
zonefile_path_legacy = atlas_zonefile_path_legacy( zonefile_dir, zonefile_hash )
for zfp in [zonefile_path, zonefile_path_legacy]:
if not os.path.exists(zonefile_path):
continue
try:
os.unlink(zonefile_path)
except:
log.error("Failed to unlink zonefile %s (%s)" % (zonefile_hash, zonefile_path))
return True | python | def remove_atlas_zonefile_data( zonefile_hash, zonefile_dir ):
"""
Remove a cached zonefile.
Idempotent; returns True if deleted or it didn't exist.
Returns False on error
"""
if not os.path.exists(zonefile_dir):
return True
zonefile_path = atlas_zonefile_path( zonefile_dir, zonefile_hash )
zonefile_path_legacy = atlas_zonefile_path_legacy( zonefile_dir, zonefile_hash )
for zfp in [zonefile_path, zonefile_path_legacy]:
if not os.path.exists(zonefile_path):
continue
try:
os.unlink(zonefile_path)
except:
log.error("Failed to unlink zonefile %s (%s)" % (zonefile_hash, zonefile_path))
return True | [
"def",
"remove_atlas_zonefile_data",
"(",
"zonefile_hash",
",",
"zonefile_dir",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"zonefile_dir",
")",
":",
"return",
"True",
"zonefile_path",
"=",
"atlas_zonefile_path",
"(",
"zonefile_dir",
",",
"zone... | Remove a cached zonefile.
Idempotent; returns True if deleted or it didn't exist.
Returns False on error | [
"Remove",
"a",
"cached",
"zonefile",
".",
"Idempotent",
";",
"returns",
"True",
"if",
"deleted",
"or",
"it",
"didn",
"t",
"exist",
".",
"Returns",
"False",
"on",
"error"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/storage/crawl.py#L184-L205 | train | 225,346 |
blockstack/blockstack-core | blockstack/lib/storage/crawl.py | add_atlas_zonefile_data | def add_atlas_zonefile_data(zonefile_text, zonefile_dir, fsync=True):
"""
Add a zone file to the atlas zonefiles
Return True on success
Return False on error
"""
rc = store_atlas_zonefile_data(zonefile_text, zonefile_dir, fsync=fsync)
if not rc:
zonefile_hash = get_zonefile_data_hash( zonefile_text )
log.error("Failed to save zonefile {}".format(zonefile_hash))
rc = False
return rc | python | def add_atlas_zonefile_data(zonefile_text, zonefile_dir, fsync=True):
"""
Add a zone file to the atlas zonefiles
Return True on success
Return False on error
"""
rc = store_atlas_zonefile_data(zonefile_text, zonefile_dir, fsync=fsync)
if not rc:
zonefile_hash = get_zonefile_data_hash( zonefile_text )
log.error("Failed to save zonefile {}".format(zonefile_hash))
rc = False
return rc | [
"def",
"add_atlas_zonefile_data",
"(",
"zonefile_text",
",",
"zonefile_dir",
",",
"fsync",
"=",
"True",
")",
":",
"rc",
"=",
"store_atlas_zonefile_data",
"(",
"zonefile_text",
",",
"zonefile_dir",
",",
"fsync",
"=",
"fsync",
")",
"if",
"not",
"rc",
":",
"zonef... | Add a zone file to the atlas zonefiles
Return True on success
Return False on error | [
"Add",
"a",
"zone",
"file",
"to",
"the",
"atlas",
"zonefiles",
"Return",
"True",
"on",
"success",
"Return",
"False",
"on",
"error"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/storage/crawl.py#L208-L221 | train | 225,347 |
blockstack/blockstack-core | blockstack/lib/operations/transfer.py | transfer_sanity_check | def transfer_sanity_check( name, consensus_hash ):
"""
Verify that data for a transfer is valid.
Return True on success
Raise Exception on error
"""
if name is not None and (not is_b40( name ) or "+" in name or name.count(".") > 1):
raise Exception("Name '%s' has non-base-38 characters" % name)
# without the scheme, name must be 37 bytes
if name is not None and (len(name) > LENGTHS['blockchain_id_name']):
raise Exception("Name '%s' is too long; expected %s bytes" % (name, LENGTHS['blockchain_id_name']))
return True | python | def transfer_sanity_check( name, consensus_hash ):
"""
Verify that data for a transfer is valid.
Return True on success
Raise Exception on error
"""
if name is not None and (not is_b40( name ) or "+" in name or name.count(".") > 1):
raise Exception("Name '%s' has non-base-38 characters" % name)
# without the scheme, name must be 37 bytes
if name is not None and (len(name) > LENGTHS['blockchain_id_name']):
raise Exception("Name '%s' is too long; expected %s bytes" % (name, LENGTHS['blockchain_id_name']))
return True | [
"def",
"transfer_sanity_check",
"(",
"name",
",",
"consensus_hash",
")",
":",
"if",
"name",
"is",
"not",
"None",
"and",
"(",
"not",
"is_b40",
"(",
"name",
")",
"or",
"\"+\"",
"in",
"name",
"or",
"name",
".",
"count",
"(",
"\".\"",
")",
">",
"1",
")",... | Verify that data for a transfer is valid.
Return True on success
Raise Exception on error | [
"Verify",
"that",
"data",
"for",
"a",
"transfer",
"is",
"valid",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/transfer.py#L70-L84 | train | 225,348 |
blockstack/blockstack-core | blockstack/lib/operations/transfer.py | find_transfer_consensus_hash | def find_transfer_consensus_hash( name_rec, block_id, vtxindex, nameop_consensus_hash ):
"""
Given a name record, find the last consensus hash set by a non-NAME_TRANSFER operation.
@name_rec is the current name record, before this NAME_TRANSFER.
@block_id is the current block height.
@vtxindex is the relative index of this transaction in this block.
@nameop_consensus_hash is the consensus hash given in the NAME_TRANSFER.
This preserves compatibility from a bug prior to 0.14.x where the consensus hash from a NAME_TRANSFER
is ignored in favor of the last consensus hash (if any) supplied by an operation to the affected name.
This method finds that consensus hash (if present).
The behavior emulated comes from the fact that in the original release of this software, the fields from
a name operation fed into the block's consensus hash included the consensus hashes given in each of the
a name operations' transactions. However, a quirk in the behavior of the NAME_TRANSFER-handling code
prevented this from happening consistently for NAME_TRANSFERs. Specifically, the only time a NAME_TRANSFER's
consensus hash was used to calculate the block's new consensus hash was if the name it affected had never
been affected by a prior state transition other than a NAME_TRANSFER. If the name was affected by
a prior state transition that set a consensus hash, then that prior state transition's consensus hash
(not the NAME_TRANSFER's) would be used in the block consensus hash calculation. If the name was NOT
affected by a prior state transition that set a consensus hash (back to the point of its last NAME_REGISTRATION),
then the consensus hash fed into the block would be that from the NAME_TRANSFER itself.
In practice, the only name operation that consistently sets a consensus hash is NAME_UPDATE. As for the others:
* NAME_REGISTRATION sets it to None
* NAME_IMPORT sets it to None
* NAME_RENEWAL doesn't set it at all; it just takes what was already there
* NAME_TRANSFER only sets it if there were no prior NAME_UPDATEs between now and the last NAME_REGISTRATION or NAME_IMPORT.
Here are some example name histories, and the consensus hash that should be used to calculate this block's consensus hash:
NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash
NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER, NAME_TRANSFER: nameop_consensus_hash
NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_TRANSFER: whatever it was from the last NAME_UPDATE
NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_TRANSFER, NAME_UPDATE, NAME_TRANSFER: whatever it was from the last NAME_UPDATE
NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_RENEWAL, NAME_TRANSFER: whatever it was from the last NAME_UPDATE
NAME_PREORDER, NAME_REGISTRATION, NAME_RENEWAL, NAME_TRANSFER: nameop_consensus_hash
NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER, NAME_RENEWAL, NAME_TRANSFER: nameop_consensus_hash
NAME_IMPORT, NAME_TRANSFER: nameop_consensus_hash
NAME_IMPORT, NAME_UPDATE, NAME_TRANSFER whatever it was from the last NAME_UPDATE
NAME_IMPORT, NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash
NAME_IMPORT, NAME_TRANSFER, NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash
"""
# work backwards from the last block
for historic_block_number in reversed(sorted(name_rec['history'].keys())):
for historic_state in reversed(name_rec['history'][historic_block_number]):
if historic_state['block_number'] > block_id or (historic_state['block_number'] == block_id and historic_state['vtxindex'] > vtxindex):
# from the future
continue
if historic_state['op'] in [NAME_REGISTRATION, NAME_IMPORT]:
# out of history without finding a NAME_UPDATE
return nameop_consensus_hash
if historic_state['op'] == NAME_UPDATE:
# reuse this consensus hash
assert historic_state['consensus_hash'] is not None, 'BUG: NAME_UPDATE did not set "consensus_hash": {}'.format(historic_state)
return historic_state['consensus_hash']
return nameop_consensus_hash | python | def find_transfer_consensus_hash( name_rec, block_id, vtxindex, nameop_consensus_hash ):
"""
Given a name record, find the last consensus hash set by a non-NAME_TRANSFER operation.
@name_rec is the current name record, before this NAME_TRANSFER.
@block_id is the current block height.
@vtxindex is the relative index of this transaction in this block.
@nameop_consensus_hash is the consensus hash given in the NAME_TRANSFER.
This preserves compatibility from a bug prior to 0.14.x where the consensus hash from a NAME_TRANSFER
is ignored in favor of the last consensus hash (if any) supplied by an operation to the affected name.
This method finds that consensus hash (if present).
The behavior emulated comes from the fact that in the original release of this software, the fields from
a name operation fed into the block's consensus hash included the consensus hashes given in each of the
a name operations' transactions. However, a quirk in the behavior of the NAME_TRANSFER-handling code
prevented this from happening consistently for NAME_TRANSFERs. Specifically, the only time a NAME_TRANSFER's
consensus hash was used to calculate the block's new consensus hash was if the name it affected had never
been affected by a prior state transition other than a NAME_TRANSFER. If the name was affected by
a prior state transition that set a consensus hash, then that prior state transition's consensus hash
(not the NAME_TRANSFER's) would be used in the block consensus hash calculation. If the name was NOT
affected by a prior state transition that set a consensus hash (back to the point of its last NAME_REGISTRATION),
then the consensus hash fed into the block would be that from the NAME_TRANSFER itself.
In practice, the only name operation that consistently sets a consensus hash is NAME_UPDATE. As for the others:
* NAME_REGISTRATION sets it to None
* NAME_IMPORT sets it to None
* NAME_RENEWAL doesn't set it at all; it just takes what was already there
* NAME_TRANSFER only sets it if there were no prior NAME_UPDATEs between now and the last NAME_REGISTRATION or NAME_IMPORT.
Here are some example name histories, and the consensus hash that should be used to calculate this block's consensus hash:
NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash
NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER, NAME_TRANSFER: nameop_consensus_hash
NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_TRANSFER: whatever it was from the last NAME_UPDATE
NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_TRANSFER, NAME_UPDATE, NAME_TRANSFER: whatever it was from the last NAME_UPDATE
NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_RENEWAL, NAME_TRANSFER: whatever it was from the last NAME_UPDATE
NAME_PREORDER, NAME_REGISTRATION, NAME_RENEWAL, NAME_TRANSFER: nameop_consensus_hash
NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER, NAME_RENEWAL, NAME_TRANSFER: nameop_consensus_hash
NAME_IMPORT, NAME_TRANSFER: nameop_consensus_hash
NAME_IMPORT, NAME_UPDATE, NAME_TRANSFER whatever it was from the last NAME_UPDATE
NAME_IMPORT, NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash
NAME_IMPORT, NAME_TRANSFER, NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash
"""
# work backwards from the last block
for historic_block_number in reversed(sorted(name_rec['history'].keys())):
for historic_state in reversed(name_rec['history'][historic_block_number]):
if historic_state['block_number'] > block_id or (historic_state['block_number'] == block_id and historic_state['vtxindex'] > vtxindex):
# from the future
continue
if historic_state['op'] in [NAME_REGISTRATION, NAME_IMPORT]:
# out of history without finding a NAME_UPDATE
return nameop_consensus_hash
if historic_state['op'] == NAME_UPDATE:
# reuse this consensus hash
assert historic_state['consensus_hash'] is not None, 'BUG: NAME_UPDATE did not set "consensus_hash": {}'.format(historic_state)
return historic_state['consensus_hash']
return nameop_consensus_hash | [
"def",
"find_transfer_consensus_hash",
"(",
"name_rec",
",",
"block_id",
",",
"vtxindex",
",",
"nameop_consensus_hash",
")",
":",
"# work backwards from the last block",
"for",
"historic_block_number",
"in",
"reversed",
"(",
"sorted",
"(",
"name_rec",
"[",
"'history'",
... | Given a name record, find the last consensus hash set by a non-NAME_TRANSFER operation.
@name_rec is the current name record, before this NAME_TRANSFER.
@block_id is the current block height.
@vtxindex is the relative index of this transaction in this block.
@nameop_consensus_hash is the consensus hash given in the NAME_TRANSFER.
This preserves compatibility from a bug prior to 0.14.x where the consensus hash from a NAME_TRANSFER
is ignored in favor of the last consensus hash (if any) supplied by an operation to the affected name.
This method finds that consensus hash (if present).
The behavior emulated comes from the fact that in the original release of this software, the fields from
a name operation fed into the block's consensus hash included the consensus hashes given in each of the
a name operations' transactions. However, a quirk in the behavior of the NAME_TRANSFER-handling code
prevented this from happening consistently for NAME_TRANSFERs. Specifically, the only time a NAME_TRANSFER's
consensus hash was used to calculate the block's new consensus hash was if the name it affected had never
been affected by a prior state transition other than a NAME_TRANSFER. If the name was affected by
a prior state transition that set a consensus hash, then that prior state transition's consensus hash
(not the NAME_TRANSFER's) would be used in the block consensus hash calculation. If the name was NOT
affected by a prior state transition that set a consensus hash (back to the point of its last NAME_REGISTRATION),
then the consensus hash fed into the block would be that from the NAME_TRANSFER itself.
In practice, the only name operation that consistently sets a consensus hash is NAME_UPDATE. As for the others:
* NAME_REGISTRATION sets it to None
* NAME_IMPORT sets it to None
* NAME_RENEWAL doesn't set it at all; it just takes what was already there
* NAME_TRANSFER only sets it if there were no prior NAME_UPDATEs between now and the last NAME_REGISTRATION or NAME_IMPORT.
Here are some example name histories, and the consensus hash that should be used to calculate this block's consensus hash:
NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash
NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER, NAME_TRANSFER: nameop_consensus_hash
NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_TRANSFER: whatever it was from the last NAME_UPDATE
NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_TRANSFER, NAME_UPDATE, NAME_TRANSFER: whatever it was from the last NAME_UPDATE
NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_RENEWAL, NAME_TRANSFER: whatever it was from the last NAME_UPDATE
NAME_PREORDER, NAME_REGISTRATION, NAME_RENEWAL, NAME_TRANSFER: nameop_consensus_hash
NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER, NAME_RENEWAL, NAME_TRANSFER: nameop_consensus_hash
NAME_IMPORT, NAME_TRANSFER: nameop_consensus_hash
NAME_IMPORT, NAME_UPDATE, NAME_TRANSFER whatever it was from the last NAME_UPDATE
NAME_IMPORT, NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash
NAME_IMPORT, NAME_TRANSFER, NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash | [
"Given",
"a",
"name",
"record",
"find",
"the",
"last",
"consensus",
"hash",
"set",
"by",
"a",
"non",
"-",
"NAME_TRANSFER",
"operation",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/transfer.py#L87-L146 | train | 225,349 |
blockstack/blockstack-core | blockstack/lib/operations/transfer.py | canonicalize | def canonicalize(parsed_op):
"""
Get the "canonical form" of this operation, putting it into a form where it can be serialized
to form a consensus hash. This method is meant to preserve compatibility across blockstackd releases.
For NAME_TRANSFER, this means:
* add 'keep_data' flag
"""
assert 'op' in parsed_op
assert len(parsed_op['op']) == 2
if parsed_op['op'][1] == TRANSFER_KEEP_DATA:
parsed_op['keep_data'] = True
elif parsed_op['op'][1] == TRANSFER_REMOVE_DATA:
parsed_op['keep_data'] = False
else:
raise ValueError("Invalid op '{}'".format(parsed_op['op']))
return parsed_op | python | def canonicalize(parsed_op):
"""
Get the "canonical form" of this operation, putting it into a form where it can be serialized
to form a consensus hash. This method is meant to preserve compatibility across blockstackd releases.
For NAME_TRANSFER, this means:
* add 'keep_data' flag
"""
assert 'op' in parsed_op
assert len(parsed_op['op']) == 2
if parsed_op['op'][1] == TRANSFER_KEEP_DATA:
parsed_op['keep_data'] = True
elif parsed_op['op'][1] == TRANSFER_REMOVE_DATA:
parsed_op['keep_data'] = False
else:
raise ValueError("Invalid op '{}'".format(parsed_op['op']))
return parsed_op | [
"def",
"canonicalize",
"(",
"parsed_op",
")",
":",
"assert",
"'op'",
"in",
"parsed_op",
"assert",
"len",
"(",
"parsed_op",
"[",
"'op'",
"]",
")",
"==",
"2",
"if",
"parsed_op",
"[",
"'op'",
"]",
"[",
"1",
"]",
"==",
"TRANSFER_KEEP_DATA",
":",
"parsed_op",... | Get the "canonical form" of this operation, putting it into a form where it can be serialized
to form a consensus hash. This method is meant to preserve compatibility across blockstackd releases.
For NAME_TRANSFER, this means:
* add 'keep_data' flag | [
"Get",
"the",
"canonical",
"form",
"of",
"this",
"operation",
"putting",
"it",
"into",
"a",
"form",
"where",
"it",
"can",
"be",
"serialized",
"to",
"form",
"a",
"consensus",
"hash",
".",
"This",
"method",
"is",
"meant",
"to",
"preserve",
"compatibility",
"... | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/transfer.py#L405-L423 | train | 225,350 |
blockstack/blockstack-core | blockstack/blockstackd.py | get_bitcoind | def get_bitcoind( new_bitcoind_opts=None, reset=False, new=False ):
"""
Get or instantiate our bitcoind client.
Optionally re-set the bitcoind options.
"""
global bitcoind
if reset:
bitcoind = None
elif not new and bitcoind is not None:
return bitcoind
if new or bitcoind is None:
if new_bitcoind_opts is not None:
set_bitcoin_opts( new_bitcoind_opts )
bitcoin_opts = get_bitcoin_opts()
new_bitcoind = None
try:
try:
new_bitcoind = virtualchain.connect_bitcoind( bitcoin_opts )
except KeyError, ke:
log.exception(ke)
log.error("Invalid configuration: %s" % bitcoin_opts)
return None
if new:
return new_bitcoind
else:
# save for subsequent reuse
bitcoind = new_bitcoind
return bitcoind
except Exception, e:
log.exception( e )
return None | python | def get_bitcoind( new_bitcoind_opts=None, reset=False, new=False ):
"""
Get or instantiate our bitcoind client.
Optionally re-set the bitcoind options.
"""
global bitcoind
if reset:
bitcoind = None
elif not new and bitcoind is not None:
return bitcoind
if new or bitcoind is None:
if new_bitcoind_opts is not None:
set_bitcoin_opts( new_bitcoind_opts )
bitcoin_opts = get_bitcoin_opts()
new_bitcoind = None
try:
try:
new_bitcoind = virtualchain.connect_bitcoind( bitcoin_opts )
except KeyError, ke:
log.exception(ke)
log.error("Invalid configuration: %s" % bitcoin_opts)
return None
if new:
return new_bitcoind
else:
# save for subsequent reuse
bitcoind = new_bitcoind
return bitcoind
except Exception, e:
log.exception( e )
return None | [
"def",
"get_bitcoind",
"(",
"new_bitcoind_opts",
"=",
"None",
",",
"reset",
"=",
"False",
",",
"new",
"=",
"False",
")",
":",
"global",
"bitcoind",
"if",
"reset",
":",
"bitcoind",
"=",
"None",
"elif",
"not",
"new",
"and",
"bitcoind",
"is",
"not",
"None",... | Get or instantiate our bitcoind client.
Optionally re-set the bitcoind options. | [
"Get",
"or",
"instantiate",
"our",
"bitcoind",
"client",
".",
"Optionally",
"re",
"-",
"set",
"the",
"bitcoind",
"options",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L95-L133 | train | 225,351 |
blockstack/blockstack-core | blockstack/blockstackd.py | get_pidfile_path | def get_pidfile_path(working_dir):
"""
Get the PID file path.
"""
pid_filename = virtualchain_hooks.get_virtual_chain_name() + ".pid"
return os.path.join( working_dir, pid_filename ) | python | def get_pidfile_path(working_dir):
"""
Get the PID file path.
"""
pid_filename = virtualchain_hooks.get_virtual_chain_name() + ".pid"
return os.path.join( working_dir, pid_filename ) | [
"def",
"get_pidfile_path",
"(",
"working_dir",
")",
":",
"pid_filename",
"=",
"virtualchain_hooks",
".",
"get_virtual_chain_name",
"(",
")",
"+",
"\".pid\"",
"return",
"os",
".",
"path",
".",
"join",
"(",
"working_dir",
",",
"pid_filename",
")"
] | Get the PID file path. | [
"Get",
"the",
"PID",
"file",
"path",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L136-L141 | train | 225,352 |
blockstack/blockstack-core | blockstack/blockstackd.py | put_pidfile | def put_pidfile( pidfile_path, pid ):
"""
Put a PID into a pidfile
"""
with open( pidfile_path, "w" ) as f:
f.write("%s" % pid)
os.fsync(f.fileno())
return | python | def put_pidfile( pidfile_path, pid ):
"""
Put a PID into a pidfile
"""
with open( pidfile_path, "w" ) as f:
f.write("%s" % pid)
os.fsync(f.fileno())
return | [
"def",
"put_pidfile",
"(",
"pidfile_path",
",",
"pid",
")",
":",
"with",
"open",
"(",
"pidfile_path",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"%s\"",
"%",
"pid",
")",
"os",
".",
"fsync",
"(",
"f",
".",
"fileno",
"(",
")",
")"... | Put a PID into a pidfile | [
"Put",
"a",
"PID",
"into",
"a",
"pidfile"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L144-L152 | train | 225,353 |
blockstack/blockstack-core | blockstack/blockstackd.py | get_logfile_path | def get_logfile_path(working_dir):
"""
Get the logfile path for our service endpoint.
"""
logfile_filename = virtualchain_hooks.get_virtual_chain_name() + ".log"
return os.path.join( working_dir, logfile_filename ) | python | def get_logfile_path(working_dir):
"""
Get the logfile path for our service endpoint.
"""
logfile_filename = virtualchain_hooks.get_virtual_chain_name() + ".log"
return os.path.join( working_dir, logfile_filename ) | [
"def",
"get_logfile_path",
"(",
"working_dir",
")",
":",
"logfile_filename",
"=",
"virtualchain_hooks",
".",
"get_virtual_chain_name",
"(",
")",
"+",
"\".log\"",
"return",
"os",
".",
"path",
".",
"join",
"(",
"working_dir",
",",
"logfile_filename",
")"
] | Get the logfile path for our service endpoint. | [
"Get",
"the",
"logfile",
"path",
"for",
"our",
"service",
"endpoint",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L155-L160 | train | 225,354 |
blockstack/blockstack-core | blockstack/blockstackd.py | get_index_range | def get_index_range(working_dir):
"""
Get the bitcoin block index range.
Mask connection failures with timeouts.
Always try to reconnect.
The last block will be the last block to search for names.
This will be NUM_CONFIRMATIONS behind the actual last-block the
cryptocurrency node knows about.
"""
bitcoind_session = get_bitcoind(new=True)
assert bitcoind_session is not None
first_block = None
last_block = None
wait = 1.0
while last_block is None and is_running():
first_block, last_block = virtualchain.get_index_range('bitcoin', bitcoind_session, virtualchain_hooks, working_dir)
if first_block is None or last_block is None:
# try to reconnnect
log.error("Reconnect to bitcoind in {} seconds".format(wait))
time.sleep(wait)
wait = min(wait * 2.0 + random.random() * wait, 60)
bitcoind_session = get_bitcoind( new=True )
continue
else:
return first_block, last_block - NUM_CONFIRMATIONS
return None, None | python | def get_index_range(working_dir):
"""
Get the bitcoin block index range.
Mask connection failures with timeouts.
Always try to reconnect.
The last block will be the last block to search for names.
This will be NUM_CONFIRMATIONS behind the actual last-block the
cryptocurrency node knows about.
"""
bitcoind_session = get_bitcoind(new=True)
assert bitcoind_session is not None
first_block = None
last_block = None
wait = 1.0
while last_block is None and is_running():
first_block, last_block = virtualchain.get_index_range('bitcoin', bitcoind_session, virtualchain_hooks, working_dir)
if first_block is None or last_block is None:
# try to reconnnect
log.error("Reconnect to bitcoind in {} seconds".format(wait))
time.sleep(wait)
wait = min(wait * 2.0 + random.random() * wait, 60)
bitcoind_session = get_bitcoind( new=True )
continue
else:
return first_block, last_block - NUM_CONFIRMATIONS
return None, None | [
"def",
"get_index_range",
"(",
"working_dir",
")",
":",
"bitcoind_session",
"=",
"get_bitcoind",
"(",
"new",
"=",
"True",
")",
"assert",
"bitcoind_session",
"is",
"not",
"None",
"first_block",
"=",
"None",
"last_block",
"=",
"None",
"wait",
"=",
"1.0",
"while"... | Get the bitcoin block index range.
Mask connection failures with timeouts.
Always try to reconnect.
The last block will be the last block to search for names.
This will be NUM_CONFIRMATIONS behind the actual last-block the
cryptocurrency node knows about. | [
"Get",
"the",
"bitcoin",
"block",
"index",
"range",
".",
"Mask",
"connection",
"failures",
"with",
"timeouts",
".",
"Always",
"try",
"to",
"reconnect",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L163-L197 | train | 225,355 |
blockstack/blockstack-core | blockstack/blockstackd.py | rpc_start | def rpc_start( working_dir, port, subdomain_index=None, thread=True ):
"""
Start the global RPC server thread
Returns the RPC server thread
"""
rpc_srv = BlockstackdRPCServer( working_dir, port, subdomain_index=subdomain_index )
log.debug("Starting RPC on port {}".format(port))
if thread:
rpc_srv.start()
return rpc_srv | python | def rpc_start( working_dir, port, subdomain_index=None, thread=True ):
"""
Start the global RPC server thread
Returns the RPC server thread
"""
rpc_srv = BlockstackdRPCServer( working_dir, port, subdomain_index=subdomain_index )
log.debug("Starting RPC on port {}".format(port))
if thread:
rpc_srv.start()
return rpc_srv | [
"def",
"rpc_start",
"(",
"working_dir",
",",
"port",
",",
"subdomain_index",
"=",
"None",
",",
"thread",
"=",
"True",
")",
":",
"rpc_srv",
"=",
"BlockstackdRPCServer",
"(",
"working_dir",
",",
"port",
",",
"subdomain_index",
"=",
"subdomain_index",
")",
"log",... | Start the global RPC server thread
Returns the RPC server thread | [
"Start",
"the",
"global",
"RPC",
"server",
"thread",
"Returns",
"the",
"RPC",
"server",
"thread"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2079-L2090 | train | 225,356 |
blockstack/blockstack-core | blockstack/blockstackd.py | rpc_chain_sync | def rpc_chain_sync(server_state, new_block_height, finish_time):
"""
Flush the global RPC server cache, and tell the rpc server that we've
reached the given block height at the given time.
"""
rpc_srv = server_state['rpc']
if rpc_srv is not None:
rpc_srv.cache_flush()
rpc_srv.set_last_index_time(finish_time) | python | def rpc_chain_sync(server_state, new_block_height, finish_time):
"""
Flush the global RPC server cache, and tell the rpc server that we've
reached the given block height at the given time.
"""
rpc_srv = server_state['rpc']
if rpc_srv is not None:
rpc_srv.cache_flush()
rpc_srv.set_last_index_time(finish_time) | [
"def",
"rpc_chain_sync",
"(",
"server_state",
",",
"new_block_height",
",",
"finish_time",
")",
":",
"rpc_srv",
"=",
"server_state",
"[",
"'rpc'",
"]",
"if",
"rpc_srv",
"is",
"not",
"None",
":",
"rpc_srv",
".",
"cache_flush",
"(",
")",
"rpc_srv",
".",
"set_l... | Flush the global RPC server cache, and tell the rpc server that we've
reached the given block height at the given time. | [
"Flush",
"the",
"global",
"RPC",
"server",
"cache",
"and",
"tell",
"the",
"rpc",
"server",
"that",
"we",
"ve",
"reached",
"the",
"given",
"block",
"height",
"at",
"the",
"given",
"time",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2093-L2101 | train | 225,357 |
blockstack/blockstack-core | blockstack/blockstackd.py | rpc_stop | def rpc_stop(server_state):
"""
Stop the global RPC server thread
"""
rpc_srv = server_state['rpc']
if rpc_srv is not None:
log.info("Shutting down RPC")
rpc_srv.stop_server()
rpc_srv.join()
log.info("RPC joined")
else:
log.info("RPC already joined")
server_state['rpc'] = None | python | def rpc_stop(server_state):
"""
Stop the global RPC server thread
"""
rpc_srv = server_state['rpc']
if rpc_srv is not None:
log.info("Shutting down RPC")
rpc_srv.stop_server()
rpc_srv.join()
log.info("RPC joined")
else:
log.info("RPC already joined")
server_state['rpc'] = None | [
"def",
"rpc_stop",
"(",
"server_state",
")",
":",
"rpc_srv",
"=",
"server_state",
"[",
"'rpc'",
"]",
"if",
"rpc_srv",
"is",
"not",
"None",
":",
"log",
".",
"info",
"(",
"\"Shutting down RPC\"",
")",
"rpc_srv",
".",
"stop_server",
"(",
")",
"rpc_srv",
".",
... | Stop the global RPC server thread | [
"Stop",
"the",
"global",
"RPC",
"server",
"thread"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2104-L2119 | train | 225,358 |
blockstack/blockstack-core | blockstack/blockstackd.py | gc_stop | def gc_stop():
"""
Stop a the optimistic GC thread
"""
global gc_thread
if gc_thread:
log.info("Shutting down GC thread")
gc_thread.signal_stop()
gc_thread.join()
log.info("GC thread joined")
gc_thread = None
else:
log.info("GC thread already joined") | python | def gc_stop():
"""
Stop a the optimistic GC thread
"""
global gc_thread
if gc_thread:
log.info("Shutting down GC thread")
gc_thread.signal_stop()
gc_thread.join()
log.info("GC thread joined")
gc_thread = None
else:
log.info("GC thread already joined") | [
"def",
"gc_stop",
"(",
")",
":",
"global",
"gc_thread",
"if",
"gc_thread",
":",
"log",
".",
"info",
"(",
"\"Shutting down GC thread\"",
")",
"gc_thread",
".",
"signal_stop",
"(",
")",
"gc_thread",
".",
"join",
"(",
")",
"log",
".",
"info",
"(",
"\"GC threa... | Stop a the optimistic GC thread | [
"Stop",
"a",
"the",
"optimistic",
"GC",
"thread"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2133-L2146 | train | 225,359 |
blockstack/blockstack-core | blockstack/blockstackd.py | api_start | def api_start(working_dir, host, port, thread=True):
"""
Start the global API server
Returns the API server thread
"""
api_srv = BlockstackdAPIServer( working_dir, host, port )
log.info("Starting API server on port {}".format(port))
if thread:
api_srv.start()
return api_srv | python | def api_start(working_dir, host, port, thread=True):
"""
Start the global API server
Returns the API server thread
"""
api_srv = BlockstackdAPIServer( working_dir, host, port )
log.info("Starting API server on port {}".format(port))
if thread:
api_srv.start()
return api_srv | [
"def",
"api_start",
"(",
"working_dir",
",",
"host",
",",
"port",
",",
"thread",
"=",
"True",
")",
":",
"api_srv",
"=",
"BlockstackdAPIServer",
"(",
"working_dir",
",",
"host",
",",
"port",
")",
"log",
".",
"info",
"(",
"\"Starting API server on port {}\"",
... | Start the global API server
Returns the API server thread | [
"Start",
"the",
"global",
"API",
"server",
"Returns",
"the",
"API",
"server",
"thread"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2157-L2167 | train | 225,360 |
blockstack/blockstack-core | blockstack/blockstackd.py | api_stop | def api_stop(server_state):
"""
Stop the global API server thread
"""
api_srv = server_state['api']
if api_srv is not None:
log.info("Shutting down API")
api_srv.stop_server()
api_srv.join()
log.info("API server joined")
else:
log.info("API already joined")
server_state['api'] = None | python | def api_stop(server_state):
"""
Stop the global API server thread
"""
api_srv = server_state['api']
if api_srv is not None:
log.info("Shutting down API")
api_srv.stop_server()
api_srv.join()
log.info("API server joined")
else:
log.info("API already joined")
server_state['api'] = None | [
"def",
"api_stop",
"(",
"server_state",
")",
":",
"api_srv",
"=",
"server_state",
"[",
"'api'",
"]",
"if",
"api_srv",
"is",
"not",
"None",
":",
"log",
".",
"info",
"(",
"\"Shutting down API\"",
")",
"api_srv",
".",
"stop_server",
"(",
")",
"api_srv",
".",
... | Stop the global API server thread | [
"Stop",
"the",
"global",
"API",
"server",
"thread"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2170-L2184 | train | 225,361 |
blockstack/blockstack-core | blockstack/blockstackd.py | atlas_init | def atlas_init(blockstack_opts, db, recover=False, port=None):
"""
Start up atlas functionality
"""
if port is None:
port = blockstack_opts['rpc_port']
# start atlas node
atlas_state = None
if is_atlas_enabled(blockstack_opts):
atlas_seed_peers = filter( lambda x: len(x) > 0, blockstack_opts['atlas_seeds'].split(","))
atlas_blacklist = filter( lambda x: len(x) > 0, blockstack_opts['atlas_blacklist'].split(","))
zonefile_dir = blockstack_opts['zonefiles']
my_hostname = blockstack_opts['atlas_hostname']
my_port = blockstack_opts['atlas_port']
initial_peer_table = atlasdb_init(blockstack_opts['atlasdb_path'], zonefile_dir, db, atlas_seed_peers, atlas_blacklist, validate=True, recover=recover)
atlas_peer_table_init(initial_peer_table)
atlas_state = atlas_node_init(my_hostname, my_port, blockstack_opts['atlasdb_path'], zonefile_dir, db.working_dir)
return atlas_state | python | def atlas_init(blockstack_opts, db, recover=False, port=None):
"""
Start up atlas functionality
"""
if port is None:
port = blockstack_opts['rpc_port']
# start atlas node
atlas_state = None
if is_atlas_enabled(blockstack_opts):
atlas_seed_peers = filter( lambda x: len(x) > 0, blockstack_opts['atlas_seeds'].split(","))
atlas_blacklist = filter( lambda x: len(x) > 0, blockstack_opts['atlas_blacklist'].split(","))
zonefile_dir = blockstack_opts['zonefiles']
my_hostname = blockstack_opts['atlas_hostname']
my_port = blockstack_opts['atlas_port']
initial_peer_table = atlasdb_init(blockstack_opts['atlasdb_path'], zonefile_dir, db, atlas_seed_peers, atlas_blacklist, validate=True, recover=recover)
atlas_peer_table_init(initial_peer_table)
atlas_state = atlas_node_init(my_hostname, my_port, blockstack_opts['atlasdb_path'], zonefile_dir, db.working_dir)
return atlas_state | [
"def",
"atlas_init",
"(",
"blockstack_opts",
",",
"db",
",",
"recover",
"=",
"False",
",",
"port",
"=",
"None",
")",
":",
"if",
"port",
"is",
"None",
":",
"port",
"=",
"blockstack_opts",
"[",
"'rpc_port'",
"]",
"# start atlas node",
"atlas_state",
"=",
"No... | Start up atlas functionality | [
"Start",
"up",
"atlas",
"functionality"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2187-L2208 | train | 225,362 |
blockstack/blockstack-core | blockstack/blockstackd.py | read_pid_file | def read_pid_file(pidfile_path):
"""
Read the PID from the PID file
"""
try:
fin = open(pidfile_path, "r")
except Exception, e:
return None
else:
pid_data = fin.read().strip()
fin.close()
try:
pid = int(pid_data)
return pid
except:
return None | python | def read_pid_file(pidfile_path):
"""
Read the PID from the PID file
"""
try:
fin = open(pidfile_path, "r")
except Exception, e:
return None
else:
pid_data = fin.read().strip()
fin.close()
try:
pid = int(pid_data)
return pid
except:
return None | [
"def",
"read_pid_file",
"(",
"pidfile_path",
")",
":",
"try",
":",
"fin",
"=",
"open",
"(",
"pidfile_path",
",",
"\"r\"",
")",
"except",
"Exception",
",",
"e",
":",
"return",
"None",
"else",
":",
"pid_data",
"=",
"fin",
".",
"read",
"(",
")",
".",
"s... | Read the PID from the PID file | [
"Read",
"the",
"PID",
"from",
"the",
"PID",
"file"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2220-L2238 | train | 225,363 |
blockstack/blockstack-core | blockstack/blockstackd.py | check_server_running | def check_server_running(pid):
"""
Determine if the given process is running
"""
if pid == os.getpid():
# special case--we're in Docker or some other kind of container
# (or we got really unlucky and got the same PID twice).
# this PID does not correspond to another running server, either way.
return False
try:
os.kill(pid, 0)
return True
except OSError as oe:
if oe.errno == errno.ESRCH:
return False
else:
raise | python | def check_server_running(pid):
"""
Determine if the given process is running
"""
if pid == os.getpid():
# special case--we're in Docker or some other kind of container
# (or we got really unlucky and got the same PID twice).
# this PID does not correspond to another running server, either way.
return False
try:
os.kill(pid, 0)
return True
except OSError as oe:
if oe.errno == errno.ESRCH:
return False
else:
raise | [
"def",
"check_server_running",
"(",
"pid",
")",
":",
"if",
"pid",
"==",
"os",
".",
"getpid",
"(",
")",
":",
"# special case--we're in Docker or some other kind of container",
"# (or we got really unlucky and got the same PID twice).",
"# this PID does not correspond to another runn... | Determine if the given process is running | [
"Determine",
"if",
"the",
"given",
"process",
"is",
"running"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2241-L2258 | train | 225,364 |
blockstack/blockstack-core | blockstack/blockstackd.py | stop_server | def stop_server( working_dir, clean=False, kill=False ):
"""
Stop the blockstackd server.
"""
timeout = 1.0
dead = False
for i in xrange(0, 5):
# try to kill the main supervisor
pid_file = get_pidfile_path(working_dir)
if not os.path.exists(pid_file):
dead = True
break
pid = read_pid_file(pid_file)
if pid is not None:
try:
os.kill(pid, signal.SIGTERM)
except OSError, oe:
if oe.errno == errno.ESRCH:
# already dead
log.info("Process %s is not running" % pid)
try:
os.unlink(pid_file)
except:
pass
return
except Exception, e:
log.exception(e)
os.abort()
else:
log.info("Corrupt PID file. Please make sure all instances of this program have stopped and remove {}".format(pid_file))
os.abort()
# is it actually dead?
blockstack_opts = get_blockstack_opts()
srv = BlockstackRPCClient('localhost', blockstack_opts['rpc_port'], timeout=5, protocol='http')
try:
res = blockstack_ping(proxy=srv)
except socket.error as se:
# dead?
if se.errno == errno.ECONNREFUSED:
# couldn't connect, so infer dead
try:
os.kill(pid, 0)
log.info("Server %s is not dead yet..." % pid)
except OSError, oe:
log.info("Server %s is dead to us" % pid)
dead = True
break
else:
continue
log.info("Server %s is still running; trying again in %s seconds" % (pid, timeout))
time.sleep(timeout)
timeout *= 2
if not dead and kill:
# be sure to clean up the pidfile
log.info("Killing server %s" % pid)
clean = True
try:
os.kill(pid, signal.SIGKILL)
except Exception, e:
pass
if clean:
# blow away the pid file
try:
os.unlink(pid_file)
except:
pass
log.debug("Blockstack server stopped") | python | def stop_server( working_dir, clean=False, kill=False ):
"""
Stop the blockstackd server.
"""
timeout = 1.0
dead = False
for i in xrange(0, 5):
# try to kill the main supervisor
pid_file = get_pidfile_path(working_dir)
if not os.path.exists(pid_file):
dead = True
break
pid = read_pid_file(pid_file)
if pid is not None:
try:
os.kill(pid, signal.SIGTERM)
except OSError, oe:
if oe.errno == errno.ESRCH:
# already dead
log.info("Process %s is not running" % pid)
try:
os.unlink(pid_file)
except:
pass
return
except Exception, e:
log.exception(e)
os.abort()
else:
log.info("Corrupt PID file. Please make sure all instances of this program have stopped and remove {}".format(pid_file))
os.abort()
# is it actually dead?
blockstack_opts = get_blockstack_opts()
srv = BlockstackRPCClient('localhost', blockstack_opts['rpc_port'], timeout=5, protocol='http')
try:
res = blockstack_ping(proxy=srv)
except socket.error as se:
# dead?
if se.errno == errno.ECONNREFUSED:
# couldn't connect, so infer dead
try:
os.kill(pid, 0)
log.info("Server %s is not dead yet..." % pid)
except OSError, oe:
log.info("Server %s is dead to us" % pid)
dead = True
break
else:
continue
log.info("Server %s is still running; trying again in %s seconds" % (pid, timeout))
time.sleep(timeout)
timeout *= 2
if not dead and kill:
# be sure to clean up the pidfile
log.info("Killing server %s" % pid)
clean = True
try:
os.kill(pid, signal.SIGKILL)
except Exception, e:
pass
if clean:
# blow away the pid file
try:
os.unlink(pid_file)
except:
pass
log.debug("Blockstack server stopped") | [
"def",
"stop_server",
"(",
"working_dir",
",",
"clean",
"=",
"False",
",",
"kill",
"=",
"False",
")",
":",
"timeout",
"=",
"1.0",
"dead",
"=",
"False",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"5",
")",
":",
"# try to kill the main supervisor",
"pid_fi... | Stop the blockstackd server. | [
"Stop",
"the",
"blockstackd",
"server",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2261-L2340 | train | 225,365 |
blockstack/blockstack-core | blockstack/blockstackd.py | genesis_block_load | def genesis_block_load(module_path=None):
"""
Make sure the genesis block is good to go.
Load and instantiate it.
"""
if os.environ.get('BLOCKSTACK_GENESIS_BLOCK_PATH') is not None:
log.warning('Using envar-given genesis block')
module_path = os.environ['BLOCKSTACK_GENESIS_BLOCK_PATH']
genesis_block = None
genesis_block_stages = None
if module_path:
log.debug('Load genesis block from {}'.format(module_path))
genesis_block_path = module_path
try:
genesis_block_mod = imp.load_source('genesis_block', genesis_block_path)
genesis_block = genesis_block_mod.GENESIS_BLOCK
genesis_block_stages = genesis_block_mod.GENESIS_BLOCK_STAGES
if BLOCKSTACK_TEST:
print ''
print 'genesis block'
print json.dumps(genesis_block, indent=4, sort_keys=True)
print ''
except Exception as e:
log.exception(e)
log.fatal('Failed to load genesis block')
os.abort()
else:
log.debug('Load built-in genesis block')
genesis_block = get_genesis_block()
genesis_block_stages = get_genesis_block_stages()
try:
for stage in genesis_block_stages:
jsonschema.validate(GENESIS_BLOCK_SCHEMA, stage)
jsonschema.validate(GENESIS_BLOCK_SCHEMA, genesis_block)
set_genesis_block(genesis_block)
set_genesis_block_stages(genesis_block_stages)
log.debug('Genesis block has {} stages'.format(len(genesis_block_stages)))
for i, stage in enumerate(genesis_block_stages):
log.debug('Stage {} has {} row(s)'.format(i+1, len(stage['rows'])))
except Exception as e:
log.fatal("Invalid genesis block")
os.abort()
return True | python | def genesis_block_load(module_path=None):
"""
Make sure the genesis block is good to go.
Load and instantiate it.
"""
if os.environ.get('BLOCKSTACK_GENESIS_BLOCK_PATH') is not None:
log.warning('Using envar-given genesis block')
module_path = os.environ['BLOCKSTACK_GENESIS_BLOCK_PATH']
genesis_block = None
genesis_block_stages = None
if module_path:
log.debug('Load genesis block from {}'.format(module_path))
genesis_block_path = module_path
try:
genesis_block_mod = imp.load_source('genesis_block', genesis_block_path)
genesis_block = genesis_block_mod.GENESIS_BLOCK
genesis_block_stages = genesis_block_mod.GENESIS_BLOCK_STAGES
if BLOCKSTACK_TEST:
print ''
print 'genesis block'
print json.dumps(genesis_block, indent=4, sort_keys=True)
print ''
except Exception as e:
log.exception(e)
log.fatal('Failed to load genesis block')
os.abort()
else:
log.debug('Load built-in genesis block')
genesis_block = get_genesis_block()
genesis_block_stages = get_genesis_block_stages()
try:
for stage in genesis_block_stages:
jsonschema.validate(GENESIS_BLOCK_SCHEMA, stage)
jsonschema.validate(GENESIS_BLOCK_SCHEMA, genesis_block)
set_genesis_block(genesis_block)
set_genesis_block_stages(genesis_block_stages)
log.debug('Genesis block has {} stages'.format(len(genesis_block_stages)))
for i, stage in enumerate(genesis_block_stages):
log.debug('Stage {} has {} row(s)'.format(i+1, len(stage['rows'])))
except Exception as e:
log.fatal("Invalid genesis block")
os.abort()
return True | [
"def",
"genesis_block_load",
"(",
"module_path",
"=",
"None",
")",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'BLOCKSTACK_GENESIS_BLOCK_PATH'",
")",
"is",
"not",
"None",
":",
"log",
".",
"warning",
"(",
"'Using envar-given genesis block'",
")",
"module_p... | Make sure the genesis block is good to go.
Load and instantiate it. | [
"Make",
"sure",
"the",
"genesis",
"block",
"is",
"good",
"to",
"go",
".",
"Load",
"and",
"instantiate",
"it",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2431-L2484 | train | 225,366 |
blockstack/blockstack-core | blockstack/blockstackd.py | server_setup | def server_setup(working_dir, port=None, api_port=None, indexer_enabled=None, indexer_url=None, api_enabled=None, recover=False):
"""
Set up the server.
Start all subsystems, write pid file, set up signal handlers, set up DB.
Returns a server instance.
"""
if not is_genesis_block_instantiated():
# default genesis block
genesis_block_load()
blockstack_opts = get_blockstack_opts()
blockstack_api_opts = get_blockstack_api_opts()
pid_file = get_pidfile_path(working_dir)
indexer_enabled = indexer_enabled if indexer_enabled is not None else blockstack_opts['enabled']
api_enabled = api_enabled if api_enabled is not None else blockstack_api_opts['enabled']
indexer_url = indexer_url if indexer_url is not None else blockstack_api_opts.get('indexer_url', None)
# sanity check
if api_enabled and not indexer_url:
print("FATAL: no 'indexer_url' in the config file, and no --indexer_url given in the arguments")
sys.exit(1)
if port is None:
port = blockstack_opts['rpc_port']
if api_port is None:
api_port = blockstack_api_opts['api_port']
# set up signals
signal.signal( signal.SIGINT, blockstack_signal_handler )
signal.signal( signal.SIGQUIT, blockstack_signal_handler )
signal.signal( signal.SIGTERM, blockstack_signal_handler )
# put pid file
put_pidfile(pid_file, os.getpid())
# clear indexing state
set_indexing(working_dir, False)
# process overrides
if blockstack_opts['enabled'] != indexer_enabled:
log.debug("Override blockstack.enabled to {}".format(indexer_enabled))
blockstack_opts['enabled'] = indexer_enabled
set_blockstack_opts(blockstack_opts)
if blockstack_api_opts['enabled'] != api_enabled:
log.debug("Override blockstack-api.enabled to {}".format(indexer_enabled))
blockstack_api_opts['enabled'] = api_enabled
set_blockstack_api_opts(blockstack_api_opts)
if blockstack_api_opts['indexer_url'] != indexer_url:
log.debug("Override blockstack-api.indexer_url to {}".format(indexer_url))
blockstack_api_opts['indexer_url'] = indexer_url
set_blockstack_api_opts(blockstack_api_opts)
# start API servers
rpc_srv = None
api_srv = None
atlas_state = None
subdomain_state = None
if blockstack_opts['enabled']:
# get db state
db = get_or_instantiate_db_state(working_dir)
# set up atlas state, if we're an indexer
atlas_state = atlas_init(blockstack_opts, db, port=port, recover=recover)
db.close()
# set up subdomains state
subdomain_state = subdomains_init(blockstack_opts, working_dir, atlas_state)
# start atlas node
if atlas_state:
atlas_node_start(atlas_state)
# start back-plane API server
rpc_srv = rpc_start(working_dir, port, subdomain_index=subdomain_state, thread=False)
if blockstack_api_opts['enabled']:
# start public RESTful API server
api_srv = api_start(working_dir, blockstack_api_opts['api_host'], api_port, thread=False)
if rpc_srv:
rpc_srv.start()
if api_srv:
api_srv.start()
# start GC
gc_start()
set_running(True)
# clear any stale indexing state
set_indexing(working_dir, False)
log.debug("Server setup: API = {}, Indexer = {}, Indexer URL = {}".format(blockstack_api_opts['enabled'], blockstack_opts['enabled'], blockstack_api_opts['indexer_url']))
ret = {
'working_dir': working_dir,
'atlas': atlas_state,
'subdomains': subdomain_state,
'subdomains_initialized': False,
'rpc': rpc_srv,
'api': api_srv,
'pid_file': pid_file,
'port': port,
'api_port': api_port
}
return ret | python | def server_setup(working_dir, port=None, api_port=None, indexer_enabled=None, indexer_url=None, api_enabled=None, recover=False):
"""
Set up the server.
Start all subsystems, write pid file, set up signal handlers, set up DB.
Returns a server instance.
"""
if not is_genesis_block_instantiated():
# default genesis block
genesis_block_load()
blockstack_opts = get_blockstack_opts()
blockstack_api_opts = get_blockstack_api_opts()
pid_file = get_pidfile_path(working_dir)
indexer_enabled = indexer_enabled if indexer_enabled is not None else blockstack_opts['enabled']
api_enabled = api_enabled if api_enabled is not None else blockstack_api_opts['enabled']
indexer_url = indexer_url if indexer_url is not None else blockstack_api_opts.get('indexer_url', None)
# sanity check
if api_enabled and not indexer_url:
print("FATAL: no 'indexer_url' in the config file, and no --indexer_url given in the arguments")
sys.exit(1)
if port is None:
port = blockstack_opts['rpc_port']
if api_port is None:
api_port = blockstack_api_opts['api_port']
# set up signals
signal.signal( signal.SIGINT, blockstack_signal_handler )
signal.signal( signal.SIGQUIT, blockstack_signal_handler )
signal.signal( signal.SIGTERM, blockstack_signal_handler )
# put pid file
put_pidfile(pid_file, os.getpid())
# clear indexing state
set_indexing(working_dir, False)
# process overrides
if blockstack_opts['enabled'] != indexer_enabled:
log.debug("Override blockstack.enabled to {}".format(indexer_enabled))
blockstack_opts['enabled'] = indexer_enabled
set_blockstack_opts(blockstack_opts)
if blockstack_api_opts['enabled'] != api_enabled:
log.debug("Override blockstack-api.enabled to {}".format(indexer_enabled))
blockstack_api_opts['enabled'] = api_enabled
set_blockstack_api_opts(blockstack_api_opts)
if blockstack_api_opts['indexer_url'] != indexer_url:
log.debug("Override blockstack-api.indexer_url to {}".format(indexer_url))
blockstack_api_opts['indexer_url'] = indexer_url
set_blockstack_api_opts(blockstack_api_opts)
# start API servers
rpc_srv = None
api_srv = None
atlas_state = None
subdomain_state = None
if blockstack_opts['enabled']:
# get db state
db = get_or_instantiate_db_state(working_dir)
# set up atlas state, if we're an indexer
atlas_state = atlas_init(blockstack_opts, db, port=port, recover=recover)
db.close()
# set up subdomains state
subdomain_state = subdomains_init(blockstack_opts, working_dir, atlas_state)
# start atlas node
if atlas_state:
atlas_node_start(atlas_state)
# start back-plane API server
rpc_srv = rpc_start(working_dir, port, subdomain_index=subdomain_state, thread=False)
if blockstack_api_opts['enabled']:
# start public RESTful API server
api_srv = api_start(working_dir, blockstack_api_opts['api_host'], api_port, thread=False)
if rpc_srv:
rpc_srv.start()
if api_srv:
api_srv.start()
# start GC
gc_start()
set_running(True)
# clear any stale indexing state
set_indexing(working_dir, False)
log.debug("Server setup: API = {}, Indexer = {}, Indexer URL = {}".format(blockstack_api_opts['enabled'], blockstack_opts['enabled'], blockstack_api_opts['indexer_url']))
ret = {
'working_dir': working_dir,
'atlas': atlas_state,
'subdomains': subdomain_state,
'subdomains_initialized': False,
'rpc': rpc_srv,
'api': api_srv,
'pid_file': pid_file,
'port': port,
'api_port': api_port
}
return ret | [
"def",
"server_setup",
"(",
"working_dir",
",",
"port",
"=",
"None",
",",
"api_port",
"=",
"None",
",",
"indexer_enabled",
"=",
"None",
",",
"indexer_url",
"=",
"None",
",",
"api_enabled",
"=",
"None",
",",
"recover",
"=",
"False",
")",
":",
"if",
"not",... | Set up the server.
Start all subsystems, write pid file, set up signal handlers, set up DB.
Returns a server instance. | [
"Set",
"up",
"the",
"server",
".",
"Start",
"all",
"subsystems",
"write",
"pid",
"file",
"set",
"up",
"signal",
"handlers",
"set",
"up",
"DB",
".",
"Returns",
"a",
"server",
"instance",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2487-L2599 | train | 225,367 |
blockstack/blockstack-core | blockstack/blockstackd.py | server_shutdown | def server_shutdown(server_state):
"""
Shut down server subsystems.
Remove PID file.
"""
set_running( False )
# stop API servers
rpc_stop(server_state)
api_stop(server_state)
# stop atlas node
server_atlas_shutdown(server_state)
# stopping GC
gc_stop()
# clear PID file
try:
if os.path.exists(server_state['pid_file']):
os.unlink(server_state['pid_file'])
except:
pass
return True | python | def server_shutdown(server_state):
"""
Shut down server subsystems.
Remove PID file.
"""
set_running( False )
# stop API servers
rpc_stop(server_state)
api_stop(server_state)
# stop atlas node
server_atlas_shutdown(server_state)
# stopping GC
gc_stop()
# clear PID file
try:
if os.path.exists(server_state['pid_file']):
os.unlink(server_state['pid_file'])
except:
pass
return True | [
"def",
"server_shutdown",
"(",
"server_state",
")",
":",
"set_running",
"(",
"False",
")",
"# stop API servers",
"rpc_stop",
"(",
"server_state",
")",
"api_stop",
"(",
"server_state",
")",
"# stop atlas node",
"server_atlas_shutdown",
"(",
"server_state",
")",
"# stop... | Shut down server subsystems.
Remove PID file. | [
"Shut",
"down",
"server",
"subsystems",
".",
"Remove",
"PID",
"file",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2614-L2638 | train | 225,368 |
blockstack/blockstack-core | blockstack/blockstackd.py | run_server | def run_server(working_dir, foreground=False, expected_snapshots=GENESIS_SNAPSHOT, port=None, api_port=None, use_api=None, use_indexer=None, indexer_url=None, recover=False):
"""
Run blockstackd. Optionally daemonize.
Return 0 on success
Return negative on error
"""
global rpc_server
global api_server
indexer_log_path = get_logfile_path(working_dir)
logfile = None
if not foreground:
if os.path.exists(indexer_log_path):
logfile = open(indexer_log_path, 'a')
else:
logfile = open(indexer_log_path, 'a+')
child_pid = daemonize(logfile)
if child_pid < 0:
log.error("Failed to daemonize: {}".format(child_pid))
return -1
if child_pid > 0:
# we're the parent
log.debug("Running in the background as PID {}".format(child_pid))
sys.exit(0)
server_state = server_setup(working_dir, port=port, api_port=api_port, indexer_enabled=use_indexer, indexer_url=indexer_url, api_enabled=use_api, recover=recover)
atexit.register(server_shutdown, server_state)
rpc_server = server_state['rpc']
blockstack_opts = get_blockstack_opts()
blockstack_api_opts = get_blockstack_api_opts()
if blockstack_opts['enabled']:
log.debug("Begin Indexing")
while is_running():
try:
running = index_blockchain(server_state, expected_snapshots=expected_snapshots)
except Exception, e:
log.exception(e)
log.error("FATAL: caught exception while indexing")
os.abort()
# wait for the next block
deadline = time.time() + REINDEX_FREQUENCY
while time.time() < deadline and is_running():
try:
time.sleep(1.0)
except:
# interrupt
break
log.debug("End Indexing")
elif blockstack_api_opts['enabled']:
log.debug("Begin serving REST requests")
while is_running():
try:
time.sleep(1.0)
except:
# interrupt
break
log.debug("End serving REST requests")
server_shutdown(server_state)
# close logfile
if logfile is not None:
logfile.flush()
logfile.close()
return 0 | python | def run_server(working_dir, foreground=False, expected_snapshots=GENESIS_SNAPSHOT, port=None, api_port=None, use_api=None, use_indexer=None, indexer_url=None, recover=False):
"""
Run blockstackd. Optionally daemonize.
Return 0 on success
Return negative on error
"""
global rpc_server
global api_server
indexer_log_path = get_logfile_path(working_dir)
logfile = None
if not foreground:
if os.path.exists(indexer_log_path):
logfile = open(indexer_log_path, 'a')
else:
logfile = open(indexer_log_path, 'a+')
child_pid = daemonize(logfile)
if child_pid < 0:
log.error("Failed to daemonize: {}".format(child_pid))
return -1
if child_pid > 0:
# we're the parent
log.debug("Running in the background as PID {}".format(child_pid))
sys.exit(0)
server_state = server_setup(working_dir, port=port, api_port=api_port, indexer_enabled=use_indexer, indexer_url=indexer_url, api_enabled=use_api, recover=recover)
atexit.register(server_shutdown, server_state)
rpc_server = server_state['rpc']
blockstack_opts = get_blockstack_opts()
blockstack_api_opts = get_blockstack_api_opts()
if blockstack_opts['enabled']:
log.debug("Begin Indexing")
while is_running():
try:
running = index_blockchain(server_state, expected_snapshots=expected_snapshots)
except Exception, e:
log.exception(e)
log.error("FATAL: caught exception while indexing")
os.abort()
# wait for the next block
deadline = time.time() + REINDEX_FREQUENCY
while time.time() < deadline and is_running():
try:
time.sleep(1.0)
except:
# interrupt
break
log.debug("End Indexing")
elif blockstack_api_opts['enabled']:
log.debug("Begin serving REST requests")
while is_running():
try:
time.sleep(1.0)
except:
# interrupt
break
log.debug("End serving REST requests")
server_shutdown(server_state)
# close logfile
if logfile is not None:
logfile.flush()
logfile.close()
return 0 | [
"def",
"run_server",
"(",
"working_dir",
",",
"foreground",
"=",
"False",
",",
"expected_snapshots",
"=",
"GENESIS_SNAPSHOT",
",",
"port",
"=",
"None",
",",
"api_port",
"=",
"None",
",",
"use_api",
"=",
"None",
",",
"use_indexer",
"=",
"None",
",",
"indexer_... | Run blockstackd. Optionally daemonize.
Return 0 on success
Return negative on error | [
"Run",
"blockstackd",
".",
"Optionally",
"daemonize",
".",
"Return",
"0",
"on",
"success",
"Return",
"negative",
"on",
"error"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2641-L2716 | train | 225,369 |
blockstack/blockstack-core | blockstack/blockstackd.py | setup | def setup(working_dir, interactive=False):
"""
Do one-time initialization.
Call this to set up global state.
"""
# set up our implementation
log.debug("Working dir: {}".format(working_dir))
if not os.path.exists( working_dir ):
os.makedirs( working_dir, 0700 )
node_config = load_configuration(working_dir)
if node_config is None:
sys.exit(1)
log.debug("config\n{}".format(json.dumps(node_config, indent=4, sort_keys=True)))
return node_config | python | def setup(working_dir, interactive=False):
"""
Do one-time initialization.
Call this to set up global state.
"""
# set up our implementation
log.debug("Working dir: {}".format(working_dir))
if not os.path.exists( working_dir ):
os.makedirs( working_dir, 0700 )
node_config = load_configuration(working_dir)
if node_config is None:
sys.exit(1)
log.debug("config\n{}".format(json.dumps(node_config, indent=4, sort_keys=True)))
return node_config | [
"def",
"setup",
"(",
"working_dir",
",",
"interactive",
"=",
"False",
")",
":",
"# set up our implementation",
"log",
".",
"debug",
"(",
"\"Working dir: {}\"",
".",
"format",
"(",
"working_dir",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
... | Do one-time initialization.
Call this to set up global state. | [
"Do",
"one",
"-",
"time",
"initialization",
".",
"Call",
"this",
"to",
"set",
"up",
"global",
"state",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2719-L2734 | train | 225,370 |
blockstack/blockstack-core | blockstack/blockstackd.py | reconfigure | def reconfigure(working_dir):
"""
Reconfigure blockstackd.
"""
configure(working_dir, force=True, interactive=True)
print "Blockstack successfully reconfigured."
sys.exit(0) | python | def reconfigure(working_dir):
"""
Reconfigure blockstackd.
"""
configure(working_dir, force=True, interactive=True)
print "Blockstack successfully reconfigured."
sys.exit(0) | [
"def",
"reconfigure",
"(",
"working_dir",
")",
":",
"configure",
"(",
"working_dir",
",",
"force",
"=",
"True",
",",
"interactive",
"=",
"True",
")",
"print",
"\"Blockstack successfully reconfigured.\"",
"sys",
".",
"exit",
"(",
"0",
")"
] | Reconfigure blockstackd. | [
"Reconfigure",
"blockstackd",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2737-L2743 | train | 225,371 |
blockstack/blockstack-core | blockstack/blockstackd.py | verify_database | def verify_database(trusted_consensus_hash, consensus_block_height, untrusted_working_dir, trusted_working_dir, start_block=None, expected_snapshots={}):
"""
Verify that a database is consistent with a
known-good consensus hash.
Return True if valid.
Return False if not
"""
db = BlockstackDB.get_readwrite_instance(trusted_working_dir)
consensus_impl = virtualchain_hooks
return virtualchain.state_engine_verify(trusted_consensus_hash, consensus_block_height, consensus_impl, untrusted_working_dir, db, start_block=start_block, expected_snapshots=expected_snapshots) | python | def verify_database(trusted_consensus_hash, consensus_block_height, untrusted_working_dir, trusted_working_dir, start_block=None, expected_snapshots={}):
"""
Verify that a database is consistent with a
known-good consensus hash.
Return True if valid.
Return False if not
"""
db = BlockstackDB.get_readwrite_instance(trusted_working_dir)
consensus_impl = virtualchain_hooks
return virtualchain.state_engine_verify(trusted_consensus_hash, consensus_block_height, consensus_impl, untrusted_working_dir, db, start_block=start_block, expected_snapshots=expected_snapshots) | [
"def",
"verify_database",
"(",
"trusted_consensus_hash",
",",
"consensus_block_height",
",",
"untrusted_working_dir",
",",
"trusted_working_dir",
",",
"start_block",
"=",
"None",
",",
"expected_snapshots",
"=",
"{",
"}",
")",
":",
"db",
"=",
"BlockstackDB",
".",
"ge... | Verify that a database is consistent with a
known-good consensus hash.
Return True if valid.
Return False if not | [
"Verify",
"that",
"a",
"database",
"is",
"consistent",
"with",
"a",
"known",
"-",
"good",
"consensus",
"hash",
".",
"Return",
"True",
"if",
"valid",
".",
"Return",
"False",
"if",
"not"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2746-L2755 | train | 225,372 |
blockstack/blockstack-core | blockstack/blockstackd.py | check_and_set_envars | def check_and_set_envars( argv ):
"""
Go through argv and find any special command-line flags
that set environment variables that affect multiple modules.
If any of them are given, then set them in this process's
environment and re-exec the process without the CLI flags.
argv should be like sys.argv: argv[0] is the binary
Does not return on re-exec.
Returns {args} on success
Returns False on error.
"""
special_flags = {
'--debug': {
'arg': False,
'envar': 'BLOCKSTACK_DEBUG',
'exec': True,
},
'--verbose': {
'arg': False,
'envar': 'BLOCKSTACK_DEBUG',
'exec': True,
},
'--testnet-id': {
'arg': True,
'envar': 'BLOCKSTACK_TESTNET_ID',
'exec': True,
},
'--testnet-start-block': {
'arg': True,
'envar': 'BLOCKSTACK_TESTNET_START_BLOCK',
'exec': True,
},
'--working_dir': {
'arg': True,
'argname': 'working_dir',
'exec': False,
},
'--working-dir': {
'arg': True,
'argname': 'working_dir',
'exec': False,
},
}
cli_envs = {}
cli_args = {}
new_argv = []
stripped_argv = []
do_exec = False
i = 0
while i < len(argv):
arg = argv[i]
value = None
for special_flag in special_flags.keys():
if not arg.startswith(special_flag):
continue
if special_flags[special_flag]['arg']:
if '=' in arg:
argparts = arg.split("=")
value_parts = argparts[1:]
arg = argparts[0]
value = '='.join(value_parts)
elif i + 1 < len(argv):
value = argv[i+1]
i += 1
else:
print >> sys.stderr, "%s requires an argument" % special_flag
return False
else:
# just set
value = "1"
break
i += 1
if value is not None:
if 'envar' in special_flags[special_flag]:
# recognized
cli_envs[ special_flags[special_flag]['envar'] ] = value
if 'argname' in special_flags[special_flag]:
# recognized as special argument
cli_args[ special_flags[special_flag]['argname'] ] = value
new_argv.append(arg)
new_argv.append(value)
if special_flags[special_flag]['exec']:
do_exec = True
else:
# not recognized
new_argv.append(arg)
stripped_argv.append(arg)
if do_exec:
# re-exec
for cli_env, cli_env_value in cli_envs.items():
os.environ[cli_env] = cli_env_value
if os.environ.get("BLOCKSTACK_DEBUG") is not None:
print "Re-exec as {}".format(" ".join(new_argv))
os.execv(new_argv[0], new_argv)
log.debug("Stripped argv: {}".format(' '.join(stripped_argv)))
return cli_args, stripped_argv | python | def check_and_set_envars( argv ):
"""
Go through argv and find any special command-line flags
that set environment variables that affect multiple modules.
If any of them are given, then set them in this process's
environment and re-exec the process without the CLI flags.
argv should be like sys.argv: argv[0] is the binary
Does not return on re-exec.
Returns {args} on success
Returns False on error.
"""
special_flags = {
'--debug': {
'arg': False,
'envar': 'BLOCKSTACK_DEBUG',
'exec': True,
},
'--verbose': {
'arg': False,
'envar': 'BLOCKSTACK_DEBUG',
'exec': True,
},
'--testnet-id': {
'arg': True,
'envar': 'BLOCKSTACK_TESTNET_ID',
'exec': True,
},
'--testnet-start-block': {
'arg': True,
'envar': 'BLOCKSTACK_TESTNET_START_BLOCK',
'exec': True,
},
'--working_dir': {
'arg': True,
'argname': 'working_dir',
'exec': False,
},
'--working-dir': {
'arg': True,
'argname': 'working_dir',
'exec': False,
},
}
cli_envs = {}
cli_args = {}
new_argv = []
stripped_argv = []
do_exec = False
i = 0
while i < len(argv):
arg = argv[i]
value = None
for special_flag in special_flags.keys():
if not arg.startswith(special_flag):
continue
if special_flags[special_flag]['arg']:
if '=' in arg:
argparts = arg.split("=")
value_parts = argparts[1:]
arg = argparts[0]
value = '='.join(value_parts)
elif i + 1 < len(argv):
value = argv[i+1]
i += 1
else:
print >> sys.stderr, "%s requires an argument" % special_flag
return False
else:
# just set
value = "1"
break
i += 1
if value is not None:
if 'envar' in special_flags[special_flag]:
# recognized
cli_envs[ special_flags[special_flag]['envar'] ] = value
if 'argname' in special_flags[special_flag]:
# recognized as special argument
cli_args[ special_flags[special_flag]['argname'] ] = value
new_argv.append(arg)
new_argv.append(value)
if special_flags[special_flag]['exec']:
do_exec = True
else:
# not recognized
new_argv.append(arg)
stripped_argv.append(arg)
if do_exec:
# re-exec
for cli_env, cli_env_value in cli_envs.items():
os.environ[cli_env] = cli_env_value
if os.environ.get("BLOCKSTACK_DEBUG") is not None:
print "Re-exec as {}".format(" ".join(new_argv))
os.execv(new_argv[0], new_argv)
log.debug("Stripped argv: {}".format(' '.join(stripped_argv)))
return cli_args, stripped_argv | [
"def",
"check_and_set_envars",
"(",
"argv",
")",
":",
"special_flags",
"=",
"{",
"'--debug'",
":",
"{",
"'arg'",
":",
"False",
",",
"'envar'",
":",
"'BLOCKSTACK_DEBUG'",
",",
"'exec'",
":",
"True",
",",
"}",
",",
"'--verbose'",
":",
"{",
"'arg'",
":",
"F... | Go through argv and find any special command-line flags
that set environment variables that affect multiple modules.
If any of them are given, then set them in this process's
environment and re-exec the process without the CLI flags.
argv should be like sys.argv: argv[0] is the binary
Does not return on re-exec.
Returns {args} on success
Returns False on error. | [
"Go",
"through",
"argv",
"and",
"find",
"any",
"special",
"command",
"-",
"line",
"flags",
"that",
"set",
"environment",
"variables",
"that",
"affect",
"multiple",
"modules",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2758-L2875 | train | 225,373 |
blockstack/blockstack-core | blockstack/blockstackd.py | load_expected_snapshots | def load_expected_snapshots( snapshots_path ):
"""
Load expected consensus hashes from a .snapshots file.
Return the snapshots as a dict on success
Return None on error
"""
# use snapshots?
snapshots_path = os.path.expanduser(snapshots_path)
expected_snapshots = {}
# legacy chainstate?
try:
with open(snapshots_path, "r") as f:
snapshots_json = f.read()
snapshots_data = json.loads(snapshots_json)
assert 'snapshots' in snapshots_data.keys(), "Not a valid snapshots file"
# extract snapshots: map int to consensus hash
for (block_id_str, consensus_hash) in snapshots_data['snapshots'].items():
expected_snapshots[ int(block_id_str) ] = str(consensus_hash)
log.debug("Loaded expected snapshots from legacy JSON {}; {} entries".format(snapshots_path, len(expected_snapshots)))
return expected_snapshots
except ValueError as ve:
log.debug("Snapshots file {} is not JSON".format(snapshots_path))
except Exception as e:
if os.environ.get('BLOCKSTACK_DEBUG') == '1':
log.exception(e)
log.debug("Failed to read expected snapshots from '{}'".format(snapshots_path))
return None
try:
# sqlite3 db?
db_con = virtualchain.StateEngine.db_connect(snapshots_path)
expected_snapshots = virtualchain.StateEngine.get_consensus_hashes(None, None, db_con=db_con, completeness_check=False)
log.debug("Loaded expected snapshots from chainstate DB {}, {} entries".format(snapshots_path, len(expected_snapshots)))
return expected_snapshots
except:
log.debug("{} does not appear to be a chainstate DB".format(snapshots_path))
return None | python | def load_expected_snapshots( snapshots_path ):
"""
Load expected consensus hashes from a .snapshots file.
Return the snapshots as a dict on success
Return None on error
"""
# use snapshots?
snapshots_path = os.path.expanduser(snapshots_path)
expected_snapshots = {}
# legacy chainstate?
try:
with open(snapshots_path, "r") as f:
snapshots_json = f.read()
snapshots_data = json.loads(snapshots_json)
assert 'snapshots' in snapshots_data.keys(), "Not a valid snapshots file"
# extract snapshots: map int to consensus hash
for (block_id_str, consensus_hash) in snapshots_data['snapshots'].items():
expected_snapshots[ int(block_id_str) ] = str(consensus_hash)
log.debug("Loaded expected snapshots from legacy JSON {}; {} entries".format(snapshots_path, len(expected_snapshots)))
return expected_snapshots
except ValueError as ve:
log.debug("Snapshots file {} is not JSON".format(snapshots_path))
except Exception as e:
if os.environ.get('BLOCKSTACK_DEBUG') == '1':
log.exception(e)
log.debug("Failed to read expected snapshots from '{}'".format(snapshots_path))
return None
try:
# sqlite3 db?
db_con = virtualchain.StateEngine.db_connect(snapshots_path)
expected_snapshots = virtualchain.StateEngine.get_consensus_hashes(None, None, db_con=db_con, completeness_check=False)
log.debug("Loaded expected snapshots from chainstate DB {}, {} entries".format(snapshots_path, len(expected_snapshots)))
return expected_snapshots
except:
log.debug("{} does not appear to be a chainstate DB".format(snapshots_path))
return None | [
"def",
"load_expected_snapshots",
"(",
"snapshots_path",
")",
":",
"# use snapshots?",
"snapshots_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"snapshots_path",
")",
"expected_snapshots",
"=",
"{",
"}",
"# legacy chainstate?",
"try",
":",
"with",
"open",
... | Load expected consensus hashes from a .snapshots file.
Return the snapshots as a dict on success
Return None on error | [
"Load",
"expected",
"consensus",
"hashes",
"from",
"a",
".",
"snapshots",
"file",
".",
"Return",
"the",
"snapshots",
"as",
"a",
"dict",
"on",
"success",
"Return",
"None",
"on",
"error"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2878-L2923 | train | 225,374 |
blockstack/blockstack-core | blockstack/blockstackd.py | do_genesis_block_audit | def do_genesis_block_audit(genesis_block_path=None, key_id=None):
"""
Loads and audits the genesis block, optionally using an alternative key
"""
signing_keys = GENESIS_BLOCK_SIGNING_KEYS
if genesis_block_path is not None:
# alternative genesis block
genesis_block_load(genesis_block_path)
if key_id is not None:
# alternative signing key
gpg2_path = find_gpg2()
assert gpg2_path, 'You need to install gpg2'
p = subprocess.Popen([gpg2_path, '-a', '--export', key_id], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
log.error('Failed to load key {}\n{}'.format(key_id, err))
return False
signing_keys = { key_id: out.strip() }
res = genesis_block_audit(get_genesis_block_stages(), key_bundle=signing_keys)
if not res:
log.error('Genesis block is NOT signed by {}'.format(', '.join(signing_keys.keys())))
return False
return True | python | def do_genesis_block_audit(genesis_block_path=None, key_id=None):
"""
Loads and audits the genesis block, optionally using an alternative key
"""
signing_keys = GENESIS_BLOCK_SIGNING_KEYS
if genesis_block_path is not None:
# alternative genesis block
genesis_block_load(genesis_block_path)
if key_id is not None:
# alternative signing key
gpg2_path = find_gpg2()
assert gpg2_path, 'You need to install gpg2'
p = subprocess.Popen([gpg2_path, '-a', '--export', key_id], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
log.error('Failed to load key {}\n{}'.format(key_id, err))
return False
signing_keys = { key_id: out.strip() }
res = genesis_block_audit(get_genesis_block_stages(), key_bundle=signing_keys)
if not res:
log.error('Genesis block is NOT signed by {}'.format(', '.join(signing_keys.keys())))
return False
return True | [
"def",
"do_genesis_block_audit",
"(",
"genesis_block_path",
"=",
"None",
",",
"key_id",
"=",
"None",
")",
":",
"signing_keys",
"=",
"GENESIS_BLOCK_SIGNING_KEYS",
"if",
"genesis_block_path",
"is",
"not",
"None",
":",
"# alternative genesis block",
"genesis_block_load",
"... | Loads and audits the genesis block, optionally using an alternative key | [
"Loads",
"and",
"audits",
"the",
"genesis",
"block",
"optionally",
"using",
"an",
"alternative",
"key"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2926-L2952 | train | 225,375 |
blockstack/blockstack-core | blockstack/blockstackd.py | setup_recovery | def setup_recovery(working_dir):
"""
Set up the recovery metadata so we can fully recover secondary state,
like subdomains.
"""
db = get_db_state(working_dir)
bitcoind_session = get_bitcoind(new=True)
assert bitcoind_session is not None
_, current_block = virtualchain.get_index_range('bitcoin', bitcoind_session, virtualchain_hooks, working_dir)
assert current_block, 'Failed to connect to bitcoind'
set_recovery_range(working_dir, db.lastblock, current_block - NUM_CONFIRMATIONS)
return True | python | def setup_recovery(working_dir):
"""
Set up the recovery metadata so we can fully recover secondary state,
like subdomains.
"""
db = get_db_state(working_dir)
bitcoind_session = get_bitcoind(new=True)
assert bitcoind_session is not None
_, current_block = virtualchain.get_index_range('bitcoin', bitcoind_session, virtualchain_hooks, working_dir)
assert current_block, 'Failed to connect to bitcoind'
set_recovery_range(working_dir, db.lastblock, current_block - NUM_CONFIRMATIONS)
return True | [
"def",
"setup_recovery",
"(",
"working_dir",
")",
":",
"db",
"=",
"get_db_state",
"(",
"working_dir",
")",
"bitcoind_session",
"=",
"get_bitcoind",
"(",
"new",
"=",
"True",
")",
"assert",
"bitcoind_session",
"is",
"not",
"None",
"_",
",",
"current_block",
"=",... | Set up the recovery metadata so we can fully recover secondary state,
like subdomains. | [
"Set",
"up",
"the",
"recovery",
"metadata",
"so",
"we",
"can",
"fully",
"recover",
"secondary",
"state",
"like",
"subdomains",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2954-L2967 | train | 225,376 |
blockstack/blockstack-core | blockstack/blockstackd.py | check_recovery | def check_recovery(working_dir):
"""
Do we need to recover on start-up?
"""
recovery_start_block, recovery_end_block = get_recovery_range(working_dir)
if recovery_start_block is not None and recovery_end_block is not None:
local_current_block = virtualchain_hooks.get_last_block(working_dir)
if local_current_block <= recovery_end_block:
return True
# otherwise, we're outside the recovery range and we can clear it
log.debug('Chain state is at block {}, and is outside the recovery window {}-{}'.format(local_current_block, recovery_start_block, recovery_end_block))
clear_recovery_range(working_dir)
return False
else:
# not recovering
return False | python | def check_recovery(working_dir):
"""
Do we need to recover on start-up?
"""
recovery_start_block, recovery_end_block = get_recovery_range(working_dir)
if recovery_start_block is not None and recovery_end_block is not None:
local_current_block = virtualchain_hooks.get_last_block(working_dir)
if local_current_block <= recovery_end_block:
return True
# otherwise, we're outside the recovery range and we can clear it
log.debug('Chain state is at block {}, and is outside the recovery window {}-{}'.format(local_current_block, recovery_start_block, recovery_end_block))
clear_recovery_range(working_dir)
return False
else:
# not recovering
return False | [
"def",
"check_recovery",
"(",
"working_dir",
")",
":",
"recovery_start_block",
",",
"recovery_end_block",
"=",
"get_recovery_range",
"(",
"working_dir",
")",
"if",
"recovery_start_block",
"is",
"not",
"None",
"and",
"recovery_end_block",
"is",
"not",
"None",
":",
"l... | Do we need to recover on start-up? | [
"Do",
"we",
"need",
"to",
"recover",
"on",
"start",
"-",
"up?"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2970-L2987 | train | 225,377 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.success_response | def success_response(self, method_resp, **kw):
"""
Make a standard "success" response,
which contains some ancilliary data.
Also, detect if this node is too far behind the Bitcoin blockchain,
and if so, convert this into an error message.
"""
resp = {
'status': True,
'indexing': config.is_indexing(self.working_dir),
'lastblock': virtualchain_hooks.get_last_block(self.working_dir),
}
resp.update(kw)
resp.update(method_resp)
if self.is_stale():
# our state is stale
resp['stale'] = True
resp['warning'] = 'Daemon has not reindexed since {}'.format(self.last_indexing_time)
return resp | python | def success_response(self, method_resp, **kw):
"""
Make a standard "success" response,
which contains some ancilliary data.
Also, detect if this node is too far behind the Bitcoin blockchain,
and if so, convert this into an error message.
"""
resp = {
'status': True,
'indexing': config.is_indexing(self.working_dir),
'lastblock': virtualchain_hooks.get_last_block(self.working_dir),
}
resp.update(kw)
resp.update(method_resp)
if self.is_stale():
# our state is stale
resp['stale'] = True
resp['warning'] = 'Daemon has not reindexed since {}'.format(self.last_indexing_time)
return resp | [
"def",
"success_response",
"(",
"self",
",",
"method_resp",
",",
"*",
"*",
"kw",
")",
":",
"resp",
"=",
"{",
"'status'",
":",
"True",
",",
"'indexing'",
":",
"config",
".",
"is_indexing",
"(",
"self",
".",
"working_dir",
")",
",",
"'lastblock'",
":",
"... | Make a standard "success" response,
which contains some ancilliary data.
Also, detect if this node is too far behind the Bitcoin blockchain,
and if so, convert this into an error message. | [
"Make",
"a",
"standard",
"success",
"response",
"which",
"contains",
"some",
"ancilliary",
"data",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L515-L537 | train | 225,378 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.load_name_info | def load_name_info(self, db, name_record):
"""
Get some extra name information, given a db-loaded name record.
Return the updated name_record
"""
name = str(name_record['name'])
name_record = self.sanitize_rec(name_record)
namespace_id = get_namespace_from_name(name)
namespace_record = db.get_namespace(namespace_id, include_history=False)
if namespace_record is None:
namespace_record = db.get_namespace_reveal(namespace_id, include_history=False)
if namespace_record is None:
# name can't exist (this can be arrived at if we're resolving a DID)
return None
# when does this name expire (if it expires)?
if namespace_record['lifetime'] != NAMESPACE_LIFE_INFINITE:
deadlines = BlockstackDB.get_name_deadlines(name_record, namespace_record, db.lastblock)
if deadlines is not None:
name_record['expire_block'] = deadlines['expire_block']
name_record['renewal_deadline'] = deadlines['renewal_deadline']
else:
# only possible if namespace is not yet ready
name_record['expire_block'] = -1
name_record['renewal_deadline'] = -1
else:
name_record['expire_block'] = -1
name_record['renewal_deadline'] = -1
if name_record['expire_block'] > 0 and name_record['expire_block'] <= db.lastblock:
name_record['expired'] = True
else:
name_record['expired'] = False
# try to get the zonefile as well
if 'value_hash' in name_record and name_record['value_hash'] is not None:
conf = get_blockstack_opts()
if is_atlas_enabled(conf):
zfdata = self.get_zonefile_data(name_record['value_hash'], conf['zonefiles'])
if zfdata is not None:
zfdata = base64.b64encode(zfdata)
name_record['zonefile'] = zfdata
return name_record | python | def load_name_info(self, db, name_record):
"""
Get some extra name information, given a db-loaded name record.
Return the updated name_record
"""
name = str(name_record['name'])
name_record = self.sanitize_rec(name_record)
namespace_id = get_namespace_from_name(name)
namespace_record = db.get_namespace(namespace_id, include_history=False)
if namespace_record is None:
namespace_record = db.get_namespace_reveal(namespace_id, include_history=False)
if namespace_record is None:
# name can't exist (this can be arrived at if we're resolving a DID)
return None
# when does this name expire (if it expires)?
if namespace_record['lifetime'] != NAMESPACE_LIFE_INFINITE:
deadlines = BlockstackDB.get_name_deadlines(name_record, namespace_record, db.lastblock)
if deadlines is not None:
name_record['expire_block'] = deadlines['expire_block']
name_record['renewal_deadline'] = deadlines['renewal_deadline']
else:
# only possible if namespace is not yet ready
name_record['expire_block'] = -1
name_record['renewal_deadline'] = -1
else:
name_record['expire_block'] = -1
name_record['renewal_deadline'] = -1
if name_record['expire_block'] > 0 and name_record['expire_block'] <= db.lastblock:
name_record['expired'] = True
else:
name_record['expired'] = False
# try to get the zonefile as well
if 'value_hash' in name_record and name_record['value_hash'] is not None:
conf = get_blockstack_opts()
if is_atlas_enabled(conf):
zfdata = self.get_zonefile_data(name_record['value_hash'], conf['zonefiles'])
if zfdata is not None:
zfdata = base64.b64encode(zfdata)
name_record['zonefile'] = zfdata
return name_record | [
"def",
"load_name_info",
"(",
"self",
",",
"db",
",",
"name_record",
")",
":",
"name",
"=",
"str",
"(",
"name_record",
"[",
"'name'",
"]",
")",
"name_record",
"=",
"self",
".",
"sanitize_rec",
"(",
"name_record",
")",
"namespace_id",
"=",
"get_namespace_from... | Get some extra name information, given a db-loaded name record.
Return the updated name_record | [
"Get",
"some",
"extra",
"name",
"information",
"given",
"a",
"db",
"-",
"loaded",
"name",
"record",
".",
"Return",
"the",
"updated",
"name_record"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L574-L620 | train | 225,379 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.get_name_DID_info | def get_name_DID_info(self, name):
"""
Get a name's DID info
Returns None if not found
"""
db = get_db_state(self.working_dir)
did_info = db.get_name_DID_info(name)
if did_info is None:
return {'error': 'No such name', 'http_status': 404}
return did_info | python | def get_name_DID_info(self, name):
"""
Get a name's DID info
Returns None if not found
"""
db = get_db_state(self.working_dir)
did_info = db.get_name_DID_info(name)
if did_info is None:
return {'error': 'No such name', 'http_status': 404}
return did_info | [
"def",
"get_name_DID_info",
"(",
"self",
",",
"name",
")",
":",
"db",
"=",
"get_db_state",
"(",
"self",
".",
"working_dir",
")",
"did_info",
"=",
"db",
".",
"get_name_DID_info",
"(",
"name",
")",
"if",
"did_info",
"is",
"None",
":",
"return",
"{",
"'erro... | Get a name's DID info
Returns None if not found | [
"Get",
"a",
"name",
"s",
"DID",
"info",
"Returns",
"None",
"if",
"not",
"found"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L712-L722 | train | 225,380 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.rpc_get_name_DID | def rpc_get_name_DID(self, name, **con_info):
"""
Given a name or subdomain, return its DID.
"""
did_info = None
if check_name(name):
did_info = self.get_name_DID_info(name)
elif check_subdomain(name):
did_info = self.get_subdomain_DID_info(name)
else:
return {'error': 'Invalid name or subdomain', 'http_status': 400}
if did_info is None:
return {'error': 'No DID for this name', 'http_status': 404}
did = make_DID(did_info['name_type'], did_info['address'], did_info['index'])
return self.success_response({'did': did}) | python | def rpc_get_name_DID(self, name, **con_info):
"""
Given a name or subdomain, return its DID.
"""
did_info = None
if check_name(name):
did_info = self.get_name_DID_info(name)
elif check_subdomain(name):
did_info = self.get_subdomain_DID_info(name)
else:
return {'error': 'Invalid name or subdomain', 'http_status': 400}
if did_info is None:
return {'error': 'No DID for this name', 'http_status': 404}
did = make_DID(did_info['name_type'], did_info['address'], did_info['index'])
return self.success_response({'did': did}) | [
"def",
"rpc_get_name_DID",
"(",
"self",
",",
"name",
",",
"*",
"*",
"con_info",
")",
":",
"did_info",
"=",
"None",
"if",
"check_name",
"(",
"name",
")",
":",
"did_info",
"=",
"self",
".",
"get_name_DID_info",
"(",
"name",
")",
"elif",
"check_subdomain",
... | Given a name or subdomain, return its DID. | [
"Given",
"a",
"name",
"or",
"subdomain",
"return",
"its",
"DID",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L734-L750 | train | 225,381 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.rpc_get_DID_record | def rpc_get_DID_record(self, did, **con_info):
"""
Given a DID, return the name or subdomain it corresponds to
"""
if not isinstance(did, (str,unicode)):
return {'error': 'Invalid DID: not a string', 'http_status': 400}
try:
did_info = parse_DID(did)
except:
return {'error': 'Invalid DID', 'http_status': 400}
res = None
if did_info['name_type'] == 'name':
res = self.get_name_DID_record(did)
elif did_info['name_type'] == 'subdomain':
res = self.get_subdomain_DID_record(did)
if 'error' in res:
return {'error': res['error'], 'http_status': res.get('http_status', 404)}
return self.success_response({'record': res['record']}) | python | def rpc_get_DID_record(self, did, **con_info):
"""
Given a DID, return the name or subdomain it corresponds to
"""
if not isinstance(did, (str,unicode)):
return {'error': 'Invalid DID: not a string', 'http_status': 400}
try:
did_info = parse_DID(did)
except:
return {'error': 'Invalid DID', 'http_status': 400}
res = None
if did_info['name_type'] == 'name':
res = self.get_name_DID_record(did)
elif did_info['name_type'] == 'subdomain':
res = self.get_subdomain_DID_record(did)
if 'error' in res:
return {'error': res['error'], 'http_status': res.get('http_status', 404)}
return self.success_response({'record': res['record']}) | [
"def",
"rpc_get_DID_record",
"(",
"self",
",",
"did",
",",
"*",
"*",
"con_info",
")",
":",
"if",
"not",
"isinstance",
"(",
"did",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"return",
"{",
"'error'",
":",
"'Invalid DID: not a string'",
",",
"'http_st... | Given a DID, return the name or subdomain it corresponds to | [
"Given",
"a",
"DID",
"return",
"the",
"name",
"or",
"subdomain",
"it",
"corresponds",
"to"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L805-L826 | train | 225,382 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.rpc_get_blockstack_ops_at | def rpc_get_blockstack_ops_at(self, block_id, offset, count, **con_info):
"""
Get the name operations that occured in the given block.
Does not include account operations.
Returns {'nameops': [...]} on success.
Returns {'error': ...} on error
"""
if not check_block(block_id):
return {'error': 'Invalid block height', 'http_status': 400}
if not check_offset(offset):
return {'error': 'Invalid offset', 'http_status': 400}
if not check_count(count, 10):
return {'error': 'Invalid count', 'http_status': 400}
db = get_db_state(self.working_dir)
nameops = db.get_all_blockstack_ops_at(block_id, offset=offset, count=count)
db.close()
log.debug("{} name operations at block {}, offset {}, count {}".format(len(nameops), block_id, offset, count))
ret = []
for nameop in nameops:
assert 'opcode' in nameop, 'BUG: missing opcode in {}'.format(json.dumps(nameop, sort_keys=True))
canonical_op = self.sanitize_rec(nameop)
ret.append(canonical_op)
return self.success_response({'nameops': ret}) | python | def rpc_get_blockstack_ops_at(self, block_id, offset, count, **con_info):
"""
Get the name operations that occured in the given block.
Does not include account operations.
Returns {'nameops': [...]} on success.
Returns {'error': ...} on error
"""
if not check_block(block_id):
return {'error': 'Invalid block height', 'http_status': 400}
if not check_offset(offset):
return {'error': 'Invalid offset', 'http_status': 400}
if not check_count(count, 10):
return {'error': 'Invalid count', 'http_status': 400}
db = get_db_state(self.working_dir)
nameops = db.get_all_blockstack_ops_at(block_id, offset=offset, count=count)
db.close()
log.debug("{} name operations at block {}, offset {}, count {}".format(len(nameops), block_id, offset, count))
ret = []
for nameop in nameops:
assert 'opcode' in nameop, 'BUG: missing opcode in {}'.format(json.dumps(nameop, sort_keys=True))
canonical_op = self.sanitize_rec(nameop)
ret.append(canonical_op)
return self.success_response({'nameops': ret}) | [
"def",
"rpc_get_blockstack_ops_at",
"(",
"self",
",",
"block_id",
",",
"offset",
",",
"count",
",",
"*",
"*",
"con_info",
")",
":",
"if",
"not",
"check_block",
"(",
"block_id",
")",
":",
"return",
"{",
"'error'",
":",
"'Invalid block height'",
",",
"'http_st... | Get the name operations that occured in the given block.
Does not include account operations.
Returns {'nameops': [...]} on success.
Returns {'error': ...} on error | [
"Get",
"the",
"name",
"operations",
"that",
"occured",
"in",
"the",
"given",
"block",
".",
"Does",
"not",
"include",
"account",
"operations",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L976-L1005 | train | 225,383 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.rpc_get_blockstack_ops_hash_at | def rpc_get_blockstack_ops_hash_at( self, block_id, **con_info ):
"""
Get the hash over the sequence of names and namespaces altered at the given block.
Used by SNV clients.
Returns {'status': True, 'ops_hash': ops_hash} on success
Returns {'error': ...} on error
"""
if not check_block(block_id):
return {'error': 'Invalid block height', 'http_status': 400}
db = get_db_state(self.working_dir)
ops_hash = db.get_block_ops_hash( block_id )
db.close()
return self.success_response( {'ops_hash': ops_hash} ) | python | def rpc_get_blockstack_ops_hash_at( self, block_id, **con_info ):
"""
Get the hash over the sequence of names and namespaces altered at the given block.
Used by SNV clients.
Returns {'status': True, 'ops_hash': ops_hash} on success
Returns {'error': ...} on error
"""
if not check_block(block_id):
return {'error': 'Invalid block height', 'http_status': 400}
db = get_db_state(self.working_dir)
ops_hash = db.get_block_ops_hash( block_id )
db.close()
return self.success_response( {'ops_hash': ops_hash} ) | [
"def",
"rpc_get_blockstack_ops_hash_at",
"(",
"self",
",",
"block_id",
",",
"*",
"*",
"con_info",
")",
":",
"if",
"not",
"check_block",
"(",
"block_id",
")",
":",
"return",
"{",
"'error'",
":",
"'Invalid block height'",
",",
"'http_status'",
":",
"400",
"}",
... | Get the hash over the sequence of names and namespaces altered at the given block.
Used by SNV clients.
Returns {'status': True, 'ops_hash': ops_hash} on success
Returns {'error': ...} on error | [
"Get",
"the",
"hash",
"over",
"the",
"sequence",
"of",
"names",
"and",
"namespaces",
"altered",
"at",
"the",
"given",
"block",
".",
"Used",
"by",
"SNV",
"clients",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1008-L1023 | train | 225,384 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.get_bitcoind_info | def get_bitcoind_info(self):
"""
Get bitcoind info. Try the cache, and on cache miss,
fetch from bitcoind and cache.
"""
cached_bitcoind_info = self.get_cached_bitcoind_info()
if cached_bitcoind_info:
return cached_bitcoind_info
bitcoind_opts = default_bitcoind_opts( virtualchain.get_config_filename(virtualchain_hooks, self.working_dir), prefix=True )
bitcoind = get_bitcoind( new_bitcoind_opts=bitcoind_opts, new=True )
if bitcoind is None:
return {'error': 'Internal server error: failed to connect to bitcoind'}
try:
info = bitcoind.getinfo()
assert 'error' not in info
assert 'blocks' in info
self.set_cached_bitcoind_info(info)
return info
except Exception as e:
raise | python | def get_bitcoind_info(self):
"""
Get bitcoind info. Try the cache, and on cache miss,
fetch from bitcoind and cache.
"""
cached_bitcoind_info = self.get_cached_bitcoind_info()
if cached_bitcoind_info:
return cached_bitcoind_info
bitcoind_opts = default_bitcoind_opts( virtualchain.get_config_filename(virtualchain_hooks, self.working_dir), prefix=True )
bitcoind = get_bitcoind( new_bitcoind_opts=bitcoind_opts, new=True )
if bitcoind is None:
return {'error': 'Internal server error: failed to connect to bitcoind'}
try:
info = bitcoind.getinfo()
assert 'error' not in info
assert 'blocks' in info
self.set_cached_bitcoind_info(info)
return info
except Exception as e:
raise | [
"def",
"get_bitcoind_info",
"(",
"self",
")",
":",
"cached_bitcoind_info",
"=",
"self",
".",
"get_cached_bitcoind_info",
"(",
")",
"if",
"cached_bitcoind_info",
":",
"return",
"cached_bitcoind_info",
"bitcoind_opts",
"=",
"default_bitcoind_opts",
"(",
"virtualchain",
".... | Get bitcoind info. Try the cache, and on cache miss,
fetch from bitcoind and cache. | [
"Get",
"bitcoind",
"info",
".",
"Try",
"the",
"cache",
"and",
"on",
"cache",
"miss",
"fetch",
"from",
"bitcoind",
"and",
"cache",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1078-L1102 | train | 225,385 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.get_consensus_info | def get_consensus_info(self):
"""
Get block height and consensus hash. Try the cache, and
on cache miss, fetch from the db
"""
cached_consensus_info = self.get_cached_consensus_info()
if cached_consensus_info:
return cached_consensus_info
db = get_db_state(self.working_dir)
ch = db.get_current_consensus()
block = db.get_current_block()
db.close()
cinfo = {'consensus_hash': ch, 'block_height': block}
self.set_cached_consensus_info(cinfo)
return cinfo | python | def get_consensus_info(self):
"""
Get block height and consensus hash. Try the cache, and
on cache miss, fetch from the db
"""
cached_consensus_info = self.get_cached_consensus_info()
if cached_consensus_info:
return cached_consensus_info
db = get_db_state(self.working_dir)
ch = db.get_current_consensus()
block = db.get_current_block()
db.close()
cinfo = {'consensus_hash': ch, 'block_height': block}
self.set_cached_consensus_info(cinfo)
return cinfo | [
"def",
"get_consensus_info",
"(",
"self",
")",
":",
"cached_consensus_info",
"=",
"self",
".",
"get_cached_consensus_info",
"(",
")",
"if",
"cached_consensus_info",
":",
"return",
"cached_consensus_info",
"db",
"=",
"get_db_state",
"(",
"self",
".",
"working_dir",
"... | Get block height and consensus hash. Try the cache, and
on cache miss, fetch from the db | [
"Get",
"block",
"height",
"and",
"consensus",
"hash",
".",
"Try",
"the",
"cache",
"and",
"on",
"cache",
"miss",
"fetch",
"from",
"the",
"db"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1105-L1121 | train | 225,386 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.rpc_get_account_tokens | def rpc_get_account_tokens(self, address, **con_info):
"""
Get the types of tokens that an account owns
Returns the list on success
"""
if not check_account_address(address):
return {'error': 'Invalid address', 'http_status': 400}
# must be b58
if is_c32_address(address):
address = c32ToB58(address)
db = get_db_state(self.working_dir)
token_list = db.get_account_tokens(address)
db.close()
return self.success_response({'token_types': token_list}) | python | def rpc_get_account_tokens(self, address, **con_info):
"""
Get the types of tokens that an account owns
Returns the list on success
"""
if not check_account_address(address):
return {'error': 'Invalid address', 'http_status': 400}
# must be b58
if is_c32_address(address):
address = c32ToB58(address)
db = get_db_state(self.working_dir)
token_list = db.get_account_tokens(address)
db.close()
return self.success_response({'token_types': token_list}) | [
"def",
"rpc_get_account_tokens",
"(",
"self",
",",
"address",
",",
"*",
"*",
"con_info",
")",
":",
"if",
"not",
"check_account_address",
"(",
"address",
")",
":",
"return",
"{",
"'error'",
":",
"'Invalid address'",
",",
"'http_status'",
":",
"400",
"}",
"# m... | Get the types of tokens that an account owns
Returns the list on success | [
"Get",
"the",
"types",
"of",
"tokens",
"that",
"an",
"account",
"owns",
"Returns",
"the",
"list",
"on",
"success"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1300-L1315 | train | 225,387 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.rpc_get_account_balance | def rpc_get_account_balance(self, address, token_type, **con_info):
"""
Get the balance of an address for a particular token type
Returns the value on success
Returns 0 if the balance is 0, or if there is no address
"""
if not check_account_address(address):
return {'error': 'Invalid address', 'http_status': 400}
if not check_token_type(token_type):
return {'error': 'Invalid token type', 'http_status': 400}
# must be b58
if is_c32_address(address):
address = c32ToB58(address)
db = get_db_state(self.working_dir)
account = db.get_account(address, token_type)
if account is None:
return self.success_response({'balance': 0})
balance = db.get_account_balance(account)
if balance is None:
balance = 0
db.close()
return self.success_response({'balance': balance}) | python | def rpc_get_account_balance(self, address, token_type, **con_info):
"""
Get the balance of an address for a particular token type
Returns the value on success
Returns 0 if the balance is 0, or if there is no address
"""
if not check_account_address(address):
return {'error': 'Invalid address', 'http_status': 400}
if not check_token_type(token_type):
return {'error': 'Invalid token type', 'http_status': 400}
# must be b58
if is_c32_address(address):
address = c32ToB58(address)
db = get_db_state(self.working_dir)
account = db.get_account(address, token_type)
if account is None:
return self.success_response({'balance': 0})
balance = db.get_account_balance(account)
if balance is None:
balance = 0
db.close()
return self.success_response({'balance': balance}) | [
"def",
"rpc_get_account_balance",
"(",
"self",
",",
"address",
",",
"token_type",
",",
"*",
"*",
"con_info",
")",
":",
"if",
"not",
"check_account_address",
"(",
"address",
")",
":",
"return",
"{",
"'error'",
":",
"'Invalid address'",
",",
"'http_status'",
":"... | Get the balance of an address for a particular token type
Returns the value on success
Returns 0 if the balance is 0, or if there is no address | [
"Get",
"the",
"balance",
"of",
"an",
"address",
"for",
"a",
"particular",
"token",
"type",
"Returns",
"the",
"value",
"on",
"success",
"Returns",
"0",
"if",
"the",
"balance",
"is",
"0",
"or",
"if",
"there",
"is",
"no",
"address"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1318-L1344 | train | 225,388 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.export_account_state | def export_account_state(self, account_state):
"""
Make an account state presentable to external consumers
"""
return {
'address': account_state['address'],
'type': account_state['type'],
'credit_value': '{}'.format(account_state['credit_value']),
'debit_value': '{}'.format(account_state['debit_value']),
'lock_transfer_block_id': account_state['lock_transfer_block_id'],
'block_id': account_state['block_id'],
'vtxindex': account_state['vtxindex'],
'txid': account_state['txid'],
} | python | def export_account_state(self, account_state):
"""
Make an account state presentable to external consumers
"""
return {
'address': account_state['address'],
'type': account_state['type'],
'credit_value': '{}'.format(account_state['credit_value']),
'debit_value': '{}'.format(account_state['debit_value']),
'lock_transfer_block_id': account_state['lock_transfer_block_id'],
'block_id': account_state['block_id'],
'vtxindex': account_state['vtxindex'],
'txid': account_state['txid'],
} | [
"def",
"export_account_state",
"(",
"self",
",",
"account_state",
")",
":",
"return",
"{",
"'address'",
":",
"account_state",
"[",
"'address'",
"]",
",",
"'type'",
":",
"account_state",
"[",
"'type'",
"]",
",",
"'credit_value'",
":",
"'{}'",
".",
"format",
"... | Make an account state presentable to external consumers | [
"Make",
"an",
"account",
"state",
"presentable",
"to",
"external",
"consumers"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1347-L1360 | train | 225,389 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.rpc_get_account_record | def rpc_get_account_record(self, address, token_type, **con_info):
"""
Get the current state of an account
"""
if not check_account_address(address):
return {'error': 'Invalid address', 'http_status': 400}
if not check_token_type(token_type):
return {'error': 'Invalid token type', 'http_status': 400}
# must be b58
if is_c32_address(address):
address = c32ToB58(address)
db = get_db_state(self.working_dir)
account = db.get_account(address, token_type)
db.close()
if account is None:
return {'error': 'No such account', 'http_status': 404}
state = self.export_account_state(account)
return self.success_response({'account': state}) | python | def rpc_get_account_record(self, address, token_type, **con_info):
"""
Get the current state of an account
"""
if not check_account_address(address):
return {'error': 'Invalid address', 'http_status': 400}
if not check_token_type(token_type):
return {'error': 'Invalid token type', 'http_status': 400}
# must be b58
if is_c32_address(address):
address = c32ToB58(address)
db = get_db_state(self.working_dir)
account = db.get_account(address, token_type)
db.close()
if account is None:
return {'error': 'No such account', 'http_status': 404}
state = self.export_account_state(account)
return self.success_response({'account': state}) | [
"def",
"rpc_get_account_record",
"(",
"self",
",",
"address",
",",
"token_type",
",",
"*",
"*",
"con_info",
")",
":",
"if",
"not",
"check_account_address",
"(",
"address",
")",
":",
"return",
"{",
"'error'",
":",
"'Invalid address'",
",",
"'http_status'",
":",... | Get the current state of an account | [
"Get",
"the",
"current",
"state",
"of",
"an",
"account"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1363-L1385 | train | 225,390 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.rpc_get_account_at | def rpc_get_account_at(self, address, block_height, **con_info):
"""
Get the account's statuses at a particular block height.
Returns the sequence of history states on success
"""
if not check_account_address(address):
return {'error': 'Invalid address', 'http_status': 400}
if not check_block(block_height):
return {'error': 'Invalid start block', 'http_status': 400}
# must be b58
if is_c32_address(address):
address = c32ToB58(address)
db = get_db_state(self.working_dir)
account_states = db.get_account_at(address, block_height)
db.close()
# return credit_value and debit_value as strings, so the unwitting JS developer doesn't get confused
# as to why large balances get mysteriously converted to doubles.
ret = [self.export_account_state(hist) for hist in account_states]
return self.success_response({'history': ret}) | python | def rpc_get_account_at(self, address, block_height, **con_info):
"""
Get the account's statuses at a particular block height.
Returns the sequence of history states on success
"""
if not check_account_address(address):
return {'error': 'Invalid address', 'http_status': 400}
if not check_block(block_height):
return {'error': 'Invalid start block', 'http_status': 400}
# must be b58
if is_c32_address(address):
address = c32ToB58(address)
db = get_db_state(self.working_dir)
account_states = db.get_account_at(address, block_height)
db.close()
# return credit_value and debit_value as strings, so the unwitting JS developer doesn't get confused
# as to why large balances get mysteriously converted to doubles.
ret = [self.export_account_state(hist) for hist in account_states]
return self.success_response({'history': ret}) | [
"def",
"rpc_get_account_at",
"(",
"self",
",",
"address",
",",
"block_height",
",",
"*",
"*",
"con_info",
")",
":",
"if",
"not",
"check_account_address",
"(",
"address",
")",
":",
"return",
"{",
"'error'",
":",
"'Invalid address'",
",",
"'http_status'",
":",
... | Get the account's statuses at a particular block height.
Returns the sequence of history states on success | [
"Get",
"the",
"account",
"s",
"statuses",
"at",
"a",
"particular",
"block",
"height",
".",
"Returns",
"the",
"sequence",
"of",
"history",
"states",
"on",
"success"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1414-L1436 | train | 225,391 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.rpc_get_consensus_hashes | def rpc_get_consensus_hashes( self, block_id_list, **con_info ):
"""
Return the consensus hashes at multiple block numbers
Return a dict mapping each block ID to its consensus hash.
Returns {'status': True, 'consensus_hashes': dict} on success
Returns {'error': ...} on success
"""
if type(block_id_list) != list:
return {'error': 'Invalid block heights', 'http_status': 400}
if len(block_id_list) > 32:
return {'error': 'Too many block heights', 'http_status': 400}
for bid in block_id_list:
if not check_block(bid):
return {'error': 'Invalid block height', 'http_status': 400}
db = get_db_state(self.working_dir)
ret = {}
for block_id in block_id_list:
ret[block_id] = db.get_consensus_at(block_id)
db.close()
return self.success_response( {'consensus_hashes': ret} ) | python | def rpc_get_consensus_hashes( self, block_id_list, **con_info ):
"""
Return the consensus hashes at multiple block numbers
Return a dict mapping each block ID to its consensus hash.
Returns {'status': True, 'consensus_hashes': dict} on success
Returns {'error': ...} on success
"""
if type(block_id_list) != list:
return {'error': 'Invalid block heights', 'http_status': 400}
if len(block_id_list) > 32:
return {'error': 'Too many block heights', 'http_status': 400}
for bid in block_id_list:
if not check_block(bid):
return {'error': 'Invalid block height', 'http_status': 400}
db = get_db_state(self.working_dir)
ret = {}
for block_id in block_id_list:
ret[block_id] = db.get_consensus_at(block_id)
db.close()
return self.success_response( {'consensus_hashes': ret} ) | [
"def",
"rpc_get_consensus_hashes",
"(",
"self",
",",
"block_id_list",
",",
"*",
"*",
"con_info",
")",
":",
"if",
"type",
"(",
"block_id_list",
")",
"!=",
"list",
":",
"return",
"{",
"'error'",
":",
"'Invalid block heights'",
",",
"'http_status'",
":",
"400",
... | Return the consensus hashes at multiple block numbers
Return a dict mapping each block ID to its consensus hash.
Returns {'status': True, 'consensus_hashes': dict} on success
Returns {'error': ...} on success | [
"Return",
"the",
"consensus",
"hashes",
"at",
"multiple",
"block",
"numbers",
"Return",
"a",
"dict",
"mapping",
"each",
"block",
"ID",
"to",
"its",
"consensus",
"hash",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1653-L1678 | train | 225,392 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.get_zonefile_data | def get_zonefile_data( self, zonefile_hash, zonefile_dir ):
"""
Get a zonefile by hash
Return the serialized zonefile on success
Return None on error
"""
# check cache
atlas_zonefile_data = get_atlas_zonefile_data( zonefile_hash, zonefile_dir, check=False )
if atlas_zonefile_data is not None:
# check hash
zfh = get_zonefile_data_hash( atlas_zonefile_data )
if zfh != zonefile_hash:
log.debug("Invalid local zonefile %s" % zonefile_hash )
remove_atlas_zonefile_data( zonefile_hash, zonefile_dir )
else:
log.debug("Zonefile %s is local" % zonefile_hash)
return atlas_zonefile_data
return None | python | def get_zonefile_data( self, zonefile_hash, zonefile_dir ):
"""
Get a zonefile by hash
Return the serialized zonefile on success
Return None on error
"""
# check cache
atlas_zonefile_data = get_atlas_zonefile_data( zonefile_hash, zonefile_dir, check=False )
if atlas_zonefile_data is not None:
# check hash
zfh = get_zonefile_data_hash( atlas_zonefile_data )
if zfh != zonefile_hash:
log.debug("Invalid local zonefile %s" % zonefile_hash )
remove_atlas_zonefile_data( zonefile_hash, zonefile_dir )
else:
log.debug("Zonefile %s is local" % zonefile_hash)
return atlas_zonefile_data
return None | [
"def",
"get_zonefile_data",
"(",
"self",
",",
"zonefile_hash",
",",
"zonefile_dir",
")",
":",
"# check cache",
"atlas_zonefile_data",
"=",
"get_atlas_zonefile_data",
"(",
"zonefile_hash",
",",
"zonefile_dir",
",",
"check",
"=",
"False",
")",
"if",
"atlas_zonefile_data... | Get a zonefile by hash
Return the serialized zonefile on success
Return None on error | [
"Get",
"a",
"zonefile",
"by",
"hash",
"Return",
"the",
"serialized",
"zonefile",
"on",
"success",
"Return",
"None",
"on",
"error"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1694-L1713 | train | 225,393 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.rpc_get_zonefiles_by_block | def rpc_get_zonefiles_by_block( self, from_block, to_block, offset, count, **con_info ):
"""
Get information about zonefiles announced in blocks [@from_block, @to_block]
@offset - offset into result set
@count - max records to return, must be <= 100
Returns {'status': True, 'lastblock' : blockNumber,
'zonefile_info' : [ { 'block_height' : 470000,
'txid' : '0000000',
'zonefile_hash' : '0000000' } ] }
"""
conf = get_blockstack_opts()
if not is_atlas_enabled(conf):
return {'error': 'Not an atlas node', 'http_status': 400}
if not check_block(from_block):
return {'error': 'Invalid from_block height', 'http_status': 400}
if not check_block(to_block):
return {'error': 'Invalid to_block height', 'http_status': 400}
if not check_offset(offset):
return {'error': 'invalid offset', 'http_status': 400}
if not check_count(count, 100):
return {'error': 'invalid count', 'http_status': 400}
zonefile_info = atlasdb_get_zonefiles_by_block(from_block, to_block, offset, count, path=conf['atlasdb_path'])
if 'error' in zonefile_info:
return zonefile_info
return self.success_response( {'zonefile_info': zonefile_info } ) | python | def rpc_get_zonefiles_by_block( self, from_block, to_block, offset, count, **con_info ):
"""
Get information about zonefiles announced in blocks [@from_block, @to_block]
@offset - offset into result set
@count - max records to return, must be <= 100
Returns {'status': True, 'lastblock' : blockNumber,
'zonefile_info' : [ { 'block_height' : 470000,
'txid' : '0000000',
'zonefile_hash' : '0000000' } ] }
"""
conf = get_blockstack_opts()
if not is_atlas_enabled(conf):
return {'error': 'Not an atlas node', 'http_status': 400}
if not check_block(from_block):
return {'error': 'Invalid from_block height', 'http_status': 400}
if not check_block(to_block):
return {'error': 'Invalid to_block height', 'http_status': 400}
if not check_offset(offset):
return {'error': 'invalid offset', 'http_status': 400}
if not check_count(count, 100):
return {'error': 'invalid count', 'http_status': 400}
zonefile_info = atlasdb_get_zonefiles_by_block(from_block, to_block, offset, count, path=conf['atlasdb_path'])
if 'error' in zonefile_info:
return zonefile_info
return self.success_response( {'zonefile_info': zonefile_info } ) | [
"def",
"rpc_get_zonefiles_by_block",
"(",
"self",
",",
"from_block",
",",
"to_block",
",",
"offset",
",",
"count",
",",
"*",
"*",
"con_info",
")",
":",
"conf",
"=",
"get_blockstack_opts",
"(",
")",
"if",
"not",
"is_atlas_enabled",
"(",
"conf",
")",
":",
"r... | Get information about zonefiles announced in blocks [@from_block, @to_block]
@offset - offset into result set
@count - max records to return, must be <= 100
Returns {'status': True, 'lastblock' : blockNumber,
'zonefile_info' : [ { 'block_height' : 470000,
'txid' : '0000000',
'zonefile_hash' : '0000000' } ] } | [
"Get",
"information",
"about",
"zonefiles",
"announced",
"in",
"blocks",
"["
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1845-L1875 | train | 225,394 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.peer_exchange | def peer_exchange(self, peer_host, peer_port):
"""
Exchange peers.
Add the given peer to the list of new peers to consider.
Return the list of healthy peers
"""
# get peers
peer_list = atlas_get_live_neighbors( "%s:%s" % (peer_host, peer_port) )
if len(peer_list) > atlas_max_neighbors():
random.shuffle(peer_list)
peer_list = peer_list[:atlas_max_neighbors()]
log.info("Enqueue remote peer {}:{}".format(peer_host, peer_port))
atlas_peer_enqueue( "%s:%s" % (peer_host, peer_port))
log.debug("Live peers reply to %s:%s: %s" % (peer_host, peer_port, peer_list))
return peer_list | python | def peer_exchange(self, peer_host, peer_port):
"""
Exchange peers.
Add the given peer to the list of new peers to consider.
Return the list of healthy peers
"""
# get peers
peer_list = atlas_get_live_neighbors( "%s:%s" % (peer_host, peer_port) )
if len(peer_list) > atlas_max_neighbors():
random.shuffle(peer_list)
peer_list = peer_list[:atlas_max_neighbors()]
log.info("Enqueue remote peer {}:{}".format(peer_host, peer_port))
atlas_peer_enqueue( "%s:%s" % (peer_host, peer_port))
log.debug("Live peers reply to %s:%s: %s" % (peer_host, peer_port, peer_list))
return peer_list | [
"def",
"peer_exchange",
"(",
"self",
",",
"peer_host",
",",
"peer_port",
")",
":",
"# get peers",
"peer_list",
"=",
"atlas_get_live_neighbors",
"(",
"\"%s:%s\"",
"%",
"(",
"peer_host",
",",
"peer_port",
")",
")",
"if",
"len",
"(",
"peer_list",
")",
">",
"atl... | Exchange peers.
Add the given peer to the list of new peers to consider.
Return the list of healthy peers | [
"Exchange",
"peers",
".",
"Add",
"the",
"given",
"peer",
"to",
"the",
"list",
"of",
"new",
"peers",
"to",
"consider",
".",
"Return",
"the",
"list",
"of",
"healthy",
"peers"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1878-L1894 | train | 225,395 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPC.rpc_atlas_peer_exchange | def rpc_atlas_peer_exchange(self, remote_peer, **con_info):
"""
Accept a remotely-given atlas peer, and return our list
of healthy peers. The remotely-given atlas peer will only
be considered if the caller is localhost; otherwise, the caller's
socket-given information will be used. This is to prevent
a malicious node from filling up this node's peer table with
junk.
Returns at most atlas_max_neighbors() peers
Returns {'status': True, 'peers': ...} on success
Returns {'error': ...} on failure
"""
conf = get_blockstack_opts()
if not conf.get('atlas', False):
return {'error': 'Not an atlas node', 'http_status': 404}
# take the socket-given information if this is not localhost
client_host = con_info['client_host']
client_port = con_info['client_port']
peer_host = None
peer_port = None
LOCALHOST = ['127.0.0.1', '::1', 'localhost']
if client_host not in LOCALHOST:
# we don't allow a non-localhost peer to insert an arbitrary host
peer_host = client_host
peer_port = client_port
else:
try:
peer_host, peer_port = url_to_host_port(remote_peer)
assert peer_host
assert peer_port
except:
# invalid
return {'error': 'Invalid remote peer address', 'http_status': 400}
peers = self.peer_exchange(peer_host, peer_port)
return self.success_response({'peers': peers}) | python | def rpc_atlas_peer_exchange(self, remote_peer, **con_info):
"""
Accept a remotely-given atlas peer, and return our list
of healthy peers. The remotely-given atlas peer will only
be considered if the caller is localhost; otherwise, the caller's
socket-given information will be used. This is to prevent
a malicious node from filling up this node's peer table with
junk.
Returns at most atlas_max_neighbors() peers
Returns {'status': True, 'peers': ...} on success
Returns {'error': ...} on failure
"""
conf = get_blockstack_opts()
if not conf.get('atlas', False):
return {'error': 'Not an atlas node', 'http_status': 404}
# take the socket-given information if this is not localhost
client_host = con_info['client_host']
client_port = con_info['client_port']
peer_host = None
peer_port = None
LOCALHOST = ['127.0.0.1', '::1', 'localhost']
if client_host not in LOCALHOST:
# we don't allow a non-localhost peer to insert an arbitrary host
peer_host = client_host
peer_port = client_port
else:
try:
peer_host, peer_port = url_to_host_port(remote_peer)
assert peer_host
assert peer_port
except:
# invalid
return {'error': 'Invalid remote peer address', 'http_status': 400}
peers = self.peer_exchange(peer_host, peer_port)
return self.success_response({'peers': peers}) | [
"def",
"rpc_atlas_peer_exchange",
"(",
"self",
",",
"remote_peer",
",",
"*",
"*",
"con_info",
")",
":",
"conf",
"=",
"get_blockstack_opts",
"(",
")",
"if",
"not",
"conf",
".",
"get",
"(",
"'atlas'",
",",
"False",
")",
":",
"return",
"{",
"'error'",
":",
... | Accept a remotely-given atlas peer, and return our list
of healthy peers. The remotely-given atlas peer will only
be considered if the caller is localhost; otherwise, the caller's
socket-given information will be used. This is to prevent
a malicious node from filling up this node's peer table with
junk.
Returns at most atlas_max_neighbors() peers
Returns {'status': True, 'peers': ...} on success
Returns {'error': ...} on failure | [
"Accept",
"a",
"remotely",
"-",
"given",
"atlas",
"peer",
"and",
"return",
"our",
"list",
"of",
"healthy",
"peers",
".",
"The",
"remotely",
"-",
"given",
"atlas",
"peer",
"will",
"only",
"be",
"considered",
"if",
"the",
"caller",
"is",
"localhost",
";",
... | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1917-L1957 | train | 225,396 |
blockstack/blockstack-core | blockstack/blockstackd.py | BlockstackdRPCServer.stop_server | def stop_server(self):
"""
Stop serving. Also stops the thread.
"""
if self.rpc_server is not None:
try:
self.rpc_server.socket.shutdown(socket.SHUT_RDWR)
except:
log.warning("Failed to shut down server socket")
self.rpc_server.shutdown() | python | def stop_server(self):
"""
Stop serving. Also stops the thread.
"""
if self.rpc_server is not None:
try:
self.rpc_server.socket.shutdown(socket.SHUT_RDWR)
except:
log.warning("Failed to shut down server socket")
self.rpc_server.shutdown() | [
"def",
"stop_server",
"(",
"self",
")",
":",
"if",
"self",
".",
"rpc_server",
"is",
"not",
"None",
":",
"try",
":",
"self",
".",
"rpc_server",
".",
"socket",
".",
"shutdown",
"(",
"socket",
".",
"SHUT_RDWR",
")",
"except",
":",
"log",
".",
"warning",
... | Stop serving. Also stops the thread. | [
"Stop",
"serving",
".",
"Also",
"stops",
"the",
"thread",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2017-L2027 | train | 225,397 |
blockstack/blockstack-core | blockstack/lib/nameset/virtualchain_hooks.py | get_last_block | def get_last_block(working_dir):
"""
Get the last block processed
Return the integer on success
Return None on error
"""
# make this usable even if we haven't explicitly configured virtualchain
impl = sys.modules[__name__]
return BlockstackDB.get_lastblock(impl, working_dir) | python | def get_last_block(working_dir):
"""
Get the last block processed
Return the integer on success
Return None on error
"""
# make this usable even if we haven't explicitly configured virtualchain
impl = sys.modules[__name__]
return BlockstackDB.get_lastblock(impl, working_dir) | [
"def",
"get_last_block",
"(",
"working_dir",
")",
":",
"# make this usable even if we haven't explicitly configured virtualchain ",
"impl",
"=",
"sys",
".",
"modules",
"[",
"__name__",
"]",
"return",
"BlockstackDB",
".",
"get_lastblock",
"(",
"impl",
",",
"working_dir",
... | Get the last block processed
Return the integer on success
Return None on error | [
"Get",
"the",
"last",
"block",
"processed",
"Return",
"the",
"integer",
"on",
"success",
"Return",
"None",
"on",
"error"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/virtualchain_hooks.py#L119-L128 | train | 225,398 |
blockstack/blockstack-core | blockstack/lib/nameset/virtualchain_hooks.py | get_or_instantiate_db_state | def get_or_instantiate_db_state(working_dir):
"""
Get a read-only handle to the DB.
Instantiate it first if it doesn't exist.
DO NOT CALL WHILE INDEXING
Returns the handle on success
Raises on error
"""
# instantiates
new_db = BlockstackDB.borrow_readwrite_instance(working_dir, -1)
BlockstackDB.release_readwrite_instance(new_db, -1)
return get_db_state(working_dir) | python | def get_or_instantiate_db_state(working_dir):
"""
Get a read-only handle to the DB.
Instantiate it first if it doesn't exist.
DO NOT CALL WHILE INDEXING
Returns the handle on success
Raises on error
"""
# instantiates
new_db = BlockstackDB.borrow_readwrite_instance(working_dir, -1)
BlockstackDB.release_readwrite_instance(new_db, -1)
return get_db_state(working_dir) | [
"def",
"get_or_instantiate_db_state",
"(",
"working_dir",
")",
":",
"# instantiates",
"new_db",
"=",
"BlockstackDB",
".",
"borrow_readwrite_instance",
"(",
"working_dir",
",",
"-",
"1",
")",
"BlockstackDB",
".",
"release_readwrite_instance",
"(",
"new_db",
",",
"-",
... | Get a read-only handle to the DB.
Instantiate it first if it doesn't exist.
DO NOT CALL WHILE INDEXING
Returns the handle on success
Raises on error | [
"Get",
"a",
"read",
"-",
"only",
"handle",
"to",
"the",
"DB",
".",
"Instantiate",
"it",
"first",
"if",
"it",
"doesn",
"t",
"exist",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/virtualchain_hooks.py#L162-L177 | train | 225,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.