code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
table = self._table
lpw, rpw = table.left_padding_widths, table.right_padding_widths
wep = table.width_exceed_policy
list_of_rows = []
if (wep is WidthExceedPolicy.WEP_STRIP or
wep is WidthExceedPolicy.WEP_ELLIPSIS):
# Let's strip the row
... | def _get_row_within_width(self, row) | Process a row so that it is clamped by column_width.
Parameters
----------
row : array_like
A single row.
Returns
-------
list of list:
List representation of the `row` after it has been processed
according to width exceed policy. | 2.512804 | 2.47726 | 1.014348 |
width = (self._table.column_widths[column_index]
- self._table.left_padding_widths[column_index]
- self._table.right_padding_widths[column_index])
if termwidth(row_item) <= width:
return row_item
else:
if width - len(delimiter) ... | def _clamp_string(self, row_item, column_index, delimiter='') | Clamp `row_item` to fit in column referred by column_index.
This method considers padding and appends the delimiter if `row_item`
needs to be truncated.
Parameters
----------
row_item: str
String which should be clamped.
column_index: int
Index ... | 3.273836 | 3.531307 | 0.927089 |
try:
responses = set()
for text in wrap(string=str(message), length=wrap_length):
response = self.http_session.post(
url="{}/im/sendIM".format(self.api_base_url),
data={
"r": uuid.uuid4(),
... | def send_im(self, target, message, mentions=None, parse=None, update_msg_id=None, wrap_length=5000) | Send text message.
:param target: Target user UIN or chat ID.
:param message: Message text.
:param mentions: Iterable with UINs to mention in message.
:param parse: Iterable with several values from :class:`icq.constant.MessageParseType` specifying which message
items should... | 4.001891 | 3.659959 | 1.093425 |
return random.choice(tuple(sequence) if isinstance(sequence, set) else sequence) | def random_choice(sequence) | Same as :meth:`random.choice`, but also supports :class:`set` type to be passed as sequence. | 5.383836 | 3.775205 | 1.426104 |
if context is None or not isinstance(context, dict):
context = {}
markdown_html = _transform_markdown_into_html(text)
sanitised_markdown_html = _sanitise_markdown_html(markdown_html)
return mark_safe(sanitised_markdown_html) | def render_markdown(text, context=None) | Turn markdown into HTML. | 3.528884 | 3.383115 | 1.043087 |
warning = (
"wagtailmarkdown.utils.render() is deprecated. Use "
"wagtailmarkdown.utils.render_markdown() instead."
)
warnings.warn(warning, WagtailMarkdownDeprecationWarning, stacklevel=2)
return render_markdown(text, context) | def render(text, context=None) | Depreceated call to render_markdown(). | 3.139538 | 2.629669 | 1.193891 |
def run(self, parent, blocks):
block = blocks.pop(0).split('\n')
header = block[0].strip()
seperator = block[1].strip()
rows = block[2:]
# Get format type (bordered by pipes or not)
border = False
if header.startswith('|'):
border = T... | Parse a table block and build table. | null | null | null | |
def _build_row(self, row, parent, align, border):
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row
# contains the... | Given a row of text, build table cells. | null | null | null | |
def _split_row(self, row, border):
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|') | split a row of text into list of cells. | null | null | null | |
def extendMarkdown(self, md, md_globals):
md.parser.blockprocessors.add('table',
TableProcessor(md.parser),
'<hashheader') | Add an instance of TableProcessor to BlockParser. | null | null | null | |
static_dirs = set()
for finder in settings.STATICFILES_FINDERS:
finder = finders.get_finder(finder)
if hasattr(finder, 'storages'):
for storage in finder.storages.values():
static_dirs.add(storage.location)
if hasattr(finder, 'storage'):
st... | def get_all_static() | Get all the static files directories found by ``STATICFILES_FINDERS``
:return: set of paths (top-level folders only) | 2.568366 | 2.213956 | 1.16008 |
if self.infile is None and "{infile}" in self.command:
if self.filename is None:
self.infile = NamedTemporaryFile(mode='wb', suffix=self.infile_ext)
self.infile.write(self.content.encode(self.default_encoding))
self.infile.flush()
... | def input(self, **kwargs) | Specify temporary input file extension.
Browserify requires explicit file extension (".js" or ".json" by default).
https://github.com/substack/node-browserify/issues/1469 | 4.476688 | 4.071254 | 1.099585 |
'''this hashes all types to a hash without colissions. python's hashing algorithms are not cross type compatable but hashing tuples with the type as the first element seems to do the trick'''
obj_type = type(obj)
try:
# this works for hashables
return hash((obj_type, obj))
except:
... | def graph_hash(obj) | this hashes all types to a hash without colissions. python's hashing algorithms are not cross type compatable but hashing tuples with the type as the first element seems to do the trick | 18.128866 | 5.621908 | 3.224682 |
''' use this function to store a python object in the database '''
assert not isinstance(item, RamGraphDBNode)
item_hash = graph_hash(item)
if item_hash not in self.nodes:
self.nodes[item_hash] = RamGraphDBNode(item)
return self.nodes[item_hash] | def store_item(self, item) | use this function to store a python object in the database | 4.933382 | 4.234187 | 1.165131 |
''' use this to store a relation between two objects '''
self.__require_string__(name)
#print('storing relation', src, name, dst)
# make sure both items are stored
self.store_item(src).link(name, self.store_item(dst)) | def store_relation(self, src, name, dst) | use this to store a relation between two objects | 7.117507 | 6.282228 | 1.132959 |
''' can be both used as (src, relation, dest) for a single relation or
(src, relation) to delete all relations of that type from the src '''
self.__require_string__(relation)
if src in self and target in self:
self._get_item_node(src).unlink(relation, self._get_item_node(... | def delete_relation(self, src, relation, target) | can be both used as (src, relation, dest) for a single relation or
(src, relation) to delete all relations of that type from the src | 8.572301 | 4.235788 | 2.02378 |
''' removes an item from the db '''
for relation, dst in self.relations_of(item, True):
self.delete_relation(item, relation, dst)
#print(item, relation, dst)
for src, relation in self.relations_to(item, True):
self.delete_relation(src, relation, item)
... | def delete_item(self, item) | removes an item from the db | 3.647026 | 3.704955 | 0.984364 |
''' list all relations the originate from target '''
relations = (target if isinstance(target, RamGraphDBNode) else self._get_item_node(target)).outgoing
if include_object:
for k in relations:
for v in relations[k]:
if hasattr(v, 'obj'): # filter d... | def relations_of(self, target, include_object=False) | list all relations the originate from target | 8.474442 | 6.957356 | 1.218055 |
''' list all relations pointing at an object '''
relations = self._get_item_node(target).incoming
if include_object:
for k in relations:
for v in relations[k]:
if hasattr(v, 'obj'): # filter dead links
yield v.obj, k
... | def relations_to(self, target, include_object=False) | list all relations pointing at an object | 7.014351 | 6.030647 | 1.163118 |
''' display the entire of objects with their (id, value, node) '''
for key in self.nodes:
node = self.nodes[key]
value = node.obj
print(key, '-', repr(value), '-', node) | def show_objects(self) | display the entire of objects with their (id, value, node) | 10.201654 | 4.436408 | 2.29953 |
''' list every relation in the database as (src, relation, dst) '''
for node in self.iter_nodes():
for relation, target in self.relations_of(node.obj, True):
yield node.obj, relation, target | def list_relations(self) | list every relation in the database as (src, relation, dst) | 7.305188 | 5.109131 | 1.42983 |
''' display every relation in the database as (src, relation, dst) '''
for src_node in self.iter_nodes():
for relation in src_node.outgoing:
for dst_node in src_node.outgoing[relation]:
print(repr(src_node.obj), '-', relation, '-', repr(dst_node.obj)) | def show_relations(self) | display every relation in the database as (src, relation, dst) | 4.556345 | 3.246459 | 1.403481 |
''' use this to filter VLists, simply provide a filter function and what relation to apply it to '''
assert type(relation).__name__ in {'str','unicode'}, 'where needs the first arg to be a string'
assert callable(filter_fn), 'filter_fn needs to be callable'
return VList(i for i in self i... | def where(self, relation, filter_fn) | use this to filter VLists, simply provide a filter function and what relation to apply it to | 8.659863 | 4.820359 | 1.796518 |
''' use this to filter VLists, simply provide a filter function to filter the current found objects '''
assert callable(filter_fn), 'filter_fn needs to be callable'
return VList(i for i in self if filter_fn(i())) | def _where(self, filter_fn) | use this to filter VLists, simply provide a filter function to filter the current found objects | 11.839749 | 3.595491 | 3.292944 |
'''use this to filter VLists with kv pairs'''
out = self
for k,v in kwargs.items():
out = out.where(k, lambda i:i==v)
return out | def _where(self, **kwargs) | use this to filter VLists with kv pairs | 9.212753 | 3.674157 | 2.507447 |
''' creates a file at the given path and sets the permissions to user only read/write '''
from os.path import isfile
if not isfile(path): # only do the following if the file doesn't exist yet
from os import chmod
from stat import S_IRUSR, S_IWUSR
open(path, "... | def _create_file(path='') | creates a file at the given path and sets the permissions to user only read/write | 4.714748 | 3.626764 | 1.299988 |
''' use this function to store a python object in the database '''
#print('storing item', item)
item_id = self._id_of(item)
#print('item_id', item_id)
if item_id is None:
#print('storing item', item)
blob = self.serialize(item)
with self._write... | def store_item(self, item) | use this function to store a python object in the database | 4.532237 | 4.094862 | 1.106811 |
''' removes an item from the db '''
for relation in self.relations_of(item):
self.delete_relation(item, relation)
for origin, relation in self.relations_to(item, True):
self.delete_relation(origin, relation, item)
with self._write_lock:
self._execute('... | def delete_item(self, item) | removes an item from the db | 5.861659 | 5.974267 | 0.981151 |
''' use this to store a relation between two objects '''
self.__require_string__(name)
#print('storing relation', src, name, dst)
# make sure both items are stored
self.store_item(src)
self.store_item(dst)
with self._write_lock:
#print(locals())
... | def store_relation(self, src, name, dst) | use this to store a relation between two objects | 5.132977 | 4.88001 | 1.051837 |
''' deletes a single relation between objects '''
self.__require_string__(relation)
src_id = self._id_of(src)
dst_id = self._id_of(dst)
with self._write_lock:
self._execute('''
DELETE from relations where src=? and name=? and dst=?
''', (sr... | def _delete_single_relation(self, src, relation, dst) | deletes a single relation between objects | 4.3648 | 4.399338 | 0.992149 |
''' can be both used as (src, relation, dest) for a single relation or
(src, relation) to delete all relations of that type from the src '''
self.__require_string__(relation)
if len(targets):
for i in targets:
self._delete_single_relation(src, relation, i)... | def delete_relation(self, src, relation, *targets) | can be both used as (src, relation, dest) for a single relation or
(src, relation) to delete all relations of that type from the src | 4.864101 | 3.040769 | 1.599628 |
''' returns back all elements the target has a relation to '''
query = 'select ob1.code from objects as ob1, objects as ob2, relations where relations.dst=ob1.id and relations.name=? and relations.src=ob2.id and ob2.code=?' # src is id not source :/
for i in self._execute(query, (relation, self.... | def find(self, target, relation) | returns back all elements the target has a relation to | 7.11492 | 5.922777 | 1.201281 |
''' list all relations the originate from target '''
if include_object:
_ = self._execute('''
select relations.name, ob2.code from relations, objects as ob1, objects as ob2 where relations.src=ob1.id and ob2.id=relations.dst and ob1.code=?
''', (self.serialize(tar... | def relations_of(self, target, include_object=False) | list all relations the originate from target | 3.420922 | 3.084463 | 1.109082 |
''' list all relations pointing at an object '''
if include_object:
_ = self._execute('''
select name, (select code from objects where id=src) from relations where dst=?
''', (self._id_of(target),))
for i in _:
yield self.deserialize(i[... | def relations_to(self, target, include_object=False) | list all relations pointing at an object | 4.218765 | 3.876534 | 1.088283 |
''' generate tuples containing (relation, object_that_applies) '''
return gen.chain( ((r,i) for i in self.find(target,r)) for r in self.relations_of(target) ) | def connections_of(self, target) | generate tuples containing (relation, object_that_applies) | 12.569838 | 5.298519 | 2.37233 |
''' list the entire of objects with their (id, serialized_form, actual_value) '''
for i in self._execute('select * from objects'):
_id, code = i
yield _id, code, self.deserialize(code) | def list_objects(self) | list the entire of objects with their (id, serialized_form, actual_value) | 15.150829 | 5.078071 | 2.98358 |
''' list every relation in the database as (src, relation, dst) '''
_ = self._execute('select * from relations').fetchall()
for i in _:
#print(i)
src, name, dst = i
src = self.deserialize(
next(self._execute('select code from objects where id=?... | def list_relations(self) | list every relation in the database as (src, relation, dst) | 3.856265 | 3.279962 | 1.175704 |
# OSX
if platform.system() == "Darwin":
# scraped from /usr/include, not exported by python's socket module
TCP_KEEPALIVE = 0x10
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, interva... | def set_keep_alive(self, sock, after_idle_sec=5, interval_sec=60,
max_fails=5) | This function instructs the TCP socket to send a heart beat every n
seconds to detect dead connections. It's the TCP equivalent of the
IRC ping-pong protocol and allows for better cleanup / detection
of dead TCP connections.
It activates after 1 second (after_idle_sec) of idleness, then... | 1.888818 | 1.934698 | 0.976286 |
buf_len = len(self.buf)
replies = []
reply = b""
chop = 0
skip = 0
i = 0
buf_len = len(self.buf)
for i in range(0, buf_len):
ch = self.buf[i:i + 1]
if skip:
skip -= 1
i += 1
c... | def parse_buf(self, encoding="unicode") | Since TCP is a stream-orientated protocol, responses aren't guaranteed
to be complete when they arrive. The buffer stores all the data and
this function splits the data into replies based on the new line
delimiter. | 2.895297 | 2.743449 | 1.055349 |
# Socket is disconnected.
if not self.connected:
return
# Recv chunks until network buffer is empty.
repeat = 1
wait = 0.2
chunk_no = 0
max_buf = self.max_buf
max_chunks = self.max_chunks
if fixed_limit is not None:
... | def get_chunks(self, fixed_limit=None, encoding="unicode") | This is the function which handles retrieving new data chunks. It's
main logic is avoiding a recv call blocking forever and halting
the program flow. To do this, it manages errors and keeps an eye
on the buffer to avoid overflows and DoS attacks.
http://stackoverflow.com/questions/16745... | 3.730265 | 3.624535 | 1.029171 |
def validate_node(self, node_ip, node_port=None, same_nodes=1):
self.debug_print("Validating: " + node_ip)
# Is this a valid IP?
if not is_ip_valid(node_ip) or node_ip == "0.0.0.0":
self.debug_print("Invalid node ip in validate node")
return 0
# Is this ... | Don't accept connections from self to passive server
or connections to already connected nodes. | null | null | null | |
def bootstrap(self):
# Disable bootstrap.
if not self.enable_bootstrap:
return None
# Avoid raping the rendezvous server.
t = time.time()
if self.last_bootstrap is not None:
if t - self.last_bootstrap <= rendezvous_interval:
... | When the software is first started, it needs to retrieve
a list of nodes to connect to the network to. This function
asks the server for N nodes which consists of at least N
passive nodes and N simultaneous nodes. The simultaneous
nodes are prioritized if the node_type for the machin... | null | null | null | |
def advertise(self):
# Advertise is disabled.
if not self.enable_advertise:
self.debug_print("Advertise is disbled!")
return None
# Direct net server is reserved for direct connections only.
if self.net_type == "direct" and self.node_type == "p... | This function tells the rendezvous server that our node is ready to
accept connections from other nodes on the P2P network that run the
bootstrap function. It's only used when net_type == p2p | null | null | null | |
def determine_node(self):
# Manually set node_type as simultaneous.
if self.node_type == "simultaneous":
if self.nat_type != "unknown":
return "simultaneous"
# Get IP of binding interface.
unspecific_bind = ["0.0.0.0", "127.0.0.1", "localho... | Determines the type of node based on a combination of forwarding
reachability and NAT type. | null | null | null | |
def start(self):
self.debug_print("Starting networking.")
self.debug_print("Make sure to iterate over replies if you need"
" connection alive management!")
# Register a cnt + c handler
signal.signal(signal.SIGINT, self.stop)
# Save W... | This function determines node and NAT type, saves connectivity details,
and starts any needed servers to be a part of the network. This is
usually the first function called after initialising the Net class. | null | null | null | |
def stop(self, signum=None, frame=None):
self.debug_print("Stopping networking.")
if self.passive is not None:
try:
self.passive.shutdown(1)
except:
pass
self.passive.close()
self.passive = None
if self.... | Just let the threads timeout by themselves.
Otherwise mutex deadlocks could occur.
for unl_thread in self.unl.unl_threads:
unl_thread.exit() | null | null | null | |
def send_remote_port(self):
msg = "REMOTE TCP %s" % (str(self.transport.getPeer().port))
self.send_line(msg) | Sends the remote port mapped for the connection.
This port is surprisingly often the same as the locally
bound port for an endpoint because a lot of NAT types
preserve the port. | null | null | null | |
def cleanup_candidates(self, node_ip):
if node_ip in self.factory.candidates:
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
elapsed = int(time.time() - candidate["time"])
if elapsed > self.challege_timeout:
... | Removes old TCP hole punching candidates for a
designated node if a certain amount of time has passed
since they last connected. | null | null | null | |
def propogate_candidates(self, node_ip):
if node_ip in self.factory.candidates:
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
# Not connected.
if not candidate["con"].connected:
continue
... | Used to progate new candidates to passive simultaneous
nodes. | null | null | null | |
def synchronize_simultaneous(self, node_ip):
for candidate in self.factory.candidates[node_ip]:
# Only if candidate is connected.
if not candidate["con"].connected:
continue
# Synchronise simultaneous node.
if candidate["time"] ... | Because adjacent mappings for certain NAT types can
be stolen by other connections, the purpose of this
function is to ensure the last connection by a passive
simultaneous node is recent compared to the time for
a candidate to increase the chance that the precited
mappings r... | null | null | null | |
def connectionLost(self, reason):
try:
self.connected = False
if debug:
print(self.log_entry("CLOSED =", "none"))
# Every five minutes: cleanup
t = time.time()
if time.time() - self.factory.last_cleanup >= self.cleanu... | Mostly handles clean-up of node + candidate structures.
Avoids memory exhaustion for a large number of connections. | null | null | null | |
def get_external_ip(self):
random.shuffle(self.server_list)
myip = ''
for server in self.server_list[:3]:
myip = self.fetch(server)
if myip != '':
return myip
else:
continue
return '' | This function gets your IP from a random server | null | null | null | |
def fetch(self, server):
t = None
socket_default_timeout = socket.getdefaulttimeout()
opener = urllib.build_opener()
opener.addheaders = [('User-agent',
"Mozilla/5.0 (X11; Linux x86_64; rv:24.0)"
" Gecko/20100101... | This function gets your IP from a specific server | null | null | null | |
def connect(self, their_unl, events, force_master=1, hairpin=1,
nonce="0" * 64):
parms = (their_unl, events, force_master, hairpin, nonce)
t = Thread(target=self.connect_handler, args=parms)
t.start()
self.unl_threads.append(t) | A new thread is spawned because many of the connection techniques
rely on sleep to determine connection outcome or to synchronise hole
punching techniques. If the sleep is in its own thread it won't
block main execution. | null | null | null | |
n = self.statx_n(self.data_points)
if n < 1:
return Decimal("0")
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
for k in range(0, self.clean_steps):
min_val = avg - sdev
... | def calculate_clock_skew(self) | Computer average and standard deviation
using all the data points. | 2.595875 | 2.50814 | 1.03498 |
def attend_fight(self, mappings, node_ip, predictions, ntp):
# Bind listen server socket.
mappings = self.add_listen_sock(mappings)
log.debug(mappings)
# Walk to fight.
self.simultaneous_cons = []
predictions = predictions.split(" ")
self.simu... | This function is for starting and managing a fight
once the details are known. It also handles the
task of returning any valid connections (if any) that
may be returned from threads in the simultaneous_fight function. | null | null | null | |
def sequential_connect(self):
# Connect to rendezvous server.
try:
mappings = sequential_bind(self.mapping_no + 1, self.interface)
con = self.server_connect(mappings[0]["sock"])
except Exception as e:
log.debug(e)
log.debug("this... | Sequential connect is designed to return a connection to the
Rendezvous Server but it does so in a way that the local port
ranges (both for the server and used for subsequent hole
punching) are allocated sequentially and predictably. This is
because Delta+1 type NATs only preserve th... | null | null | null | |
def simultaneous_listen(self):
# Close socket.
if self.server_con is not None:
self.server_con.s.close()
self.server_con = None
# Reset predictions + mappings.
self.mappings = None
self.predictions = None
# Connect to rendez... | This function is called by passive simultaneous nodes who
wish to establish themself as such. It sets up a connection
to the Rendezvous Server to monitor for new hole punching requests. | null | null | null | |
def predict_mappings(self, mappings):
if self.nat_type not in self.predictable_nats:
msg = "Can't predict mappings for non-predictable NAT type."
raise Exception(msg)
for mapping in mappings:
mapping["bound"] = mapping["sock"].getsockname()[1]
... | This function is used to predict the remote ports that a NAT
will map a local connection to. It requires the NAT type to
be determined before use. Current support for preserving and
delta type mapping behaviour. | null | null | null | |
def throw_punch(self, args, tries=1):
# Parse arguments.
if len(args) != 3:
return 0
sock, node_ip, remote_port = args
if sock is None or node_ip is None or remote_port is None:
return 0
# Generous timeout.
con = Sock(blocking... | Attempt to open a hole by TCP hole punching. This
function is called by the simultaneous fight function
and its the code that handles doing the actual hole
punching / connecting. | null | null | null | |
def simultaneous_fight(self, my_mappings, node_ip, predictions, origin_ntp):
# Get current network time accurate to
# ~50 ms over WAN (apparently.)
p = request_priority_execution()
log.debug("Getting NTP")
if self.sys_clock is not None:
our_ntp = sel... | TCP hole punching algorithm. It uses network time servers to
synchronize two nodes to connect to each other on their
predicted remote ports at the exact same time.
One thing to note is how sensitive TCP hole punching is to
timing. To open a successful connection both sides need to
... | null | null | null | |
def simultaneous_challenge(self, node_ip, node_port, proto):
parts = self.sequential_connect()
if parts is None:
log.debug("Sequential connect failed")
return None
con, mappings, predictions = parts
# Tell server to list ourselves as a candidat... | Used by active simultaneous nodes to attempt to initiate
a simultaneous open to a compatible node after retrieving
its details from bootstrapping. The function advertises
itself as a potential candidate to the server for the
designated node_ip. It also waits for a response from the
... | null | null | null | |
def parse_remote_port(self, reply):
remote_port = re.findall("^REMOTE (TCP|UDP) ([0-9]+)$", reply)
if not len(remote_port):
remote_port = 0
else:
remote_port = int(remote_port[0][1])
if remote_port < 1 or remote_port > 65535:
... | Parses a remote port from a Rendezvous Server's
response. | null | null | null | |
if port is None or port < 1024 or port > 65535:
port = random.randint(1024, 65535)
assert(1024 <= port <= 65535)
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', port)) # Try to open port
except socket.error as e:
... | def get_unused_port(port=None) | Checks if port is already in use. | 2.170149 | 2.105793 | 1.030561 |
if sys.version_info < (3, 0, 0):
if type(interface) == str:
interface = unicode(interface)
else:
if type(interface) == bytes:
interface = interface.decode("utf-8")
# Get ID of interface that handles WAN stuff.
default_gateway = get_default_gateway(interface)
... | def get_lan_ip(interface="default") | Execution may reach here if the host is using
virtual interfaces on Linux and there are no gateways
which suggests the host is a VPS or server. In this
case | 2.958377 | 2.987049 | 0.990401 |
if n == 2:
try:
ip = myip()
ip = extract_ip(ip)
if is_ip_valid(ip):
return ip
except Exception as e:
print(str(e))
return None
# Fail-safe: use centralized server for IP lookup.
from pyp2p.net import forwardin... | def get_wan_ip(n=0) | That IP module sucks. Occasionally it returns an IP address behind
cloudflare which probably happens when cloudflare tries to proxy your web
request because it thinks you're trying to DoS. It's better if we just run
our own infrastructure. | 3.138692 | 3.089561 | 1.015902 |
def get_gateway_addr():
try:
import netifaces
return netifaces.gateways()["default"][netifaces.AF_INET][0]
except ImportError:
shell_command = 'netstat -rn'
if os.name == "posix":
pattern = \
re.compile('(?:default|0\.0\.0\.0|::/0)\s+([\w... | Use netifaces to get the gateway address, if we can't import it then
fall back to a hack to obtain the current gateway automatically, since
Python has no interface to sysctl().
This may or may not be the gateway we should be contacting.
It does not guarantee correct results.
... | null | null | null | |
def get_gateway_socket(gateway):
if not gateway:
raise NATPMPNetworkError(NATPMP_GATEWAY_NO_VALID_GATEWAY,
error_str(NATPMP_GATEWAY_NO_VALID_GATEWAY))
response_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
response_socket.setblocking(0)
re... | Takes a gateway address string and returns a non-blocking UDP
socket to communicate with its NAT-PMP implementation on
NATPMP_PORT.
e.g. addr = get_gateway_socket('10.0.1.1') | null | null | null | |
def get_public_address(gateway_ip=None, retry=9):
if gateway_ip is None:
gateway_ip = get_gateway_addr()
addr_request = PublicAddressRequest()
addr_response = send_request_with_retry(gateway_ip, addr_request,
response_data_class=
... | A high-level function that returns the public interface IP of
the current host by querying the NAT-PMP gateway. IP is
returned as string.
Takes two possible keyword arguments:
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to usin... | null | null | null | |
def map_tcp_port(public_port, private_port, lifetime=3600, gateway_ip=None,
retry=9, use_exception=True):
return map_port(NATPMP_PROTOCOL_TCP, public_port, private_port, lifetime,
gateway_ip=gateway_ip, retry=retry,
use_exception=use_exception) | A high-level wrapper to map_port() that requests a mapping
for a public TCP port on the NAT to a private TCP port on this host.
Returns the complete response on success.
public_port - the public port of the mapping requested
private_port - the private port of the mappi... | null | null | null | |
def map_udp_port(public_port, private_port, lifetime=3600, gateway_ip=None,
retry=9, use_exception=True):
return map_port(NATPMP_PROTOCOL_UDP, public_port, private_port, lifetime,
gateway_ip=gateway_ip, retry=retry,
use_exception=use_exception) | A high-level wrapper to map_port() that requests a mapping for
a public UDP port on the NAT to a private UDP port on this host.
Returns the complete response on success.
public_port - the public port of the mapping requested
private_port - the private port of the mappi... | null | null | null | |
def map_port(protocol, public_port, private_port, lifetime=3600,
gateway_ip=None, retry=9, use_exception=True):
if protocol not in [NATPMP_PROTOCOL_UDP, NATPMP_PROTOCOL_TCP]:
raise ValueError("Must be either NATPMP_PROTOCOL_UDP or "
"NATPMP_PROTOCOL_TCP")
... | A function to map public_port to private_port of protocol.
Returns the complete response on success.
protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP
public_port - the public port of the mapping requested
private_port - the private port of the mapping request... | null | null | null | |
proto = proto.upper()
valid_protos = ["TCP", "UDP"]
if proto not in valid_protos:
raise Exception("Invalid protocol for forwarding.")
valid_ports = range(1, 65535)
if src_port not in valid_ports:
raise Exception("Invalid port for forwarding.")
... | def forward_port(self, proto, src_port, dest_ip, dest_port=None) | Creates a new mapping for the default gateway to forward ports.
Source port is from the perspective of the original client.
For example, if a client tries to connect to us on port 80,
the source port is port 80. The destination port isn't
necessarily 80, however. We might wish to run our... | 2.452584 | 2.423999 | 1.011792 |
self.stop()
self.initialize()
self.handle = self.loop.call_at(self.get_next(), self.call_next) | def start(self) | Start scheduling | 7.062679 | 6.388937 | 1.105455 |
if self.handle is not None:
self.handle.cancel()
self.handle = self.future = self.croniter = None | def stop(self) | Stop scheduling | 7.641145 | 6.621982 | 1.153906 |
self.initialize()
self.future = asyncio.Future(loop=self.loop)
self.handle = self.loop.call_at(self.get_next(), self.call_func, *args)
return self.future | def next(self, *args) | yield from .next() | 4.369461 | 4.202692 | 1.039681 |
if self.croniter is None:
self.time = time.time()
self.datetime = datetime.now(self.tz)
self.loop_time = self.loop.time()
self.croniter = croniter(self.spec, start_time=self.datetime) | def initialize(self) | Initialize croniter and related times | 4.609856 | 3.538367 | 1.30282 |
return self.loop_time + (self.croniter.get_next(float) - self.time) | def get_next(self) | Return next iteration time related to loop time | 14.83296 | 7.819675 | 1.896877 |
if self.handle is not None:
self.handle.cancel()
next_time = self.get_next()
self.handle = self.loop.call_at(next_time, self.call_next)
self.call_func() | def call_next(self) | Set next hop in the loop. Call task | 3.780114 | 3.69857 | 1.022047 |
asyncio.gather(
self.cron(*args, **kwargs),
loop=self.loop, return_exceptions=True
).add_done_callback(self.set_result) | def call_func(self, *args, **kwargs) | Called. Take care of exceptions using gather | 5.681886 | 4.81136 | 1.180931 |
result = result.result()[0]
if self.future is not None:
if isinstance(result, Exception):
self.future.set_exception(result)
else:
self.future.set_result(result)
self.future = None
elif isinstance(result, Exception):
... | def set_result(self, result) | Set future's result if needed (can be an exception).
Else raise if needed. | 2.633615 | 2.282123 | 1.15402 |
match = re.match(r'\s*' + TIMEFORMAT + r'\s*$', sval, re.I)
if not match or not match.group(0).strip():
return
mdict = match.groupdict()
return sum(
MULTIPLIERS[k] * cast(v) for (k, v) in mdict.items() if v is not None) | def timeparse(sval) | Parse a time expression, returning it as a number of seconds. If
possible, the return value will be an `int`; if this is not
possible, the return will be a `float`. Returns `None` if a time
expression cannot be parsed from the given string.
Arguments:
- `sval`: the string value to parse
>>> ti... | 4.466319 | 4.781457 | 0.934092 |
lines = re.split('[\r\n]+', text)
processed = []
for line in lines:
all_caps = line.upper() == line
words = re.split('[\t ]', line)
tc_line = []
for word in words:
if callback:
new_word = callback(word, all_caps=all_caps)
if n... | def titlecase(text, callback=None, small_first_last=True) | Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'. | 2.741679 | 2.753904 | 0.995561 |
'''Handler for command line invocation'''
# Try to handle any reasonable thing thrown at this.
# Consume '-f' and '-o' as input/output, allow '-' for stdin/stdout
# and treat any subsequent arguments as a space separated string to
# be titlecased (so it still works if people forget quotes)
pars... | def cmd() | Handler for command line invocation | 3.09459 | 3.063864 | 1.010029 |
parser.add_option(
'--eradicate-aggressive',
default=False,
help=(
'Enables aggressive mode for eradicate; '
'this may result in false positives'
),
action='store_true',
type=None,
) | def add_options(cls, parser: OptionManager) -> None | ``flake8`` api method to register new plugin options.
See :class:`.Configuration` docs for detailed options reference.
Arguments:
parser: ``flake8`` option parser instance. | 6.128753 | 5.935016 | 1.032643 |
if self.filename != STDIN:
buffer = StringIO()
options = _Options(aggressive=self.options.eradicate_aggressive)
fix_file(self.filename, options, buffer)
traceback = buffer.getvalue()
if traceback:
yield 1, 0, self._error(trace... | def run(self) -> Generator[Tuple[int, int, str, type], None, None] | Runs the checker.
``fix_file()`` only mutates the buffer object.
It is the only way to find out if some error happened. | 7.345452 | 6.33246 | 1.159968 |
yielded = set()
for value in g:
if value not in yielded:
yield value
yielded.add(value) | def unique(g) | Yield values yielded by ``g``, removing any duplicates.
Example
-------
>>> list(unique(iter([1, 3, 1, 2, 3])))
[1, 3, 2] | 3.113294 | 4.090847 | 0.761039 |
for type_ in t.mro():
try:
return vars(type_)[name]
except KeyError:
pass
raise AttributeError(name) | def static_get_type_attr(t, name) | Get a type attribute statically, circumventing the descriptor protocol. | 3.637497 | 3.471002 | 1.047967 |
message = "\nclass {C} received conflicting default implementations:".format(
C=typename,
)
for attrname, interfaces in conflicts.items():
message += dedent(
).format(
attr=attrname,
interfaces=bulleted_list(sorted(map(getname, interfaces))),... | def _conflicting_defaults(typename, conflicts) | Format an error message for conflicting default implementations.
Parameters
----------
typename : str
Name of the type for which we're producing an error.
conflicts : dict[str -> list[Interface]]
Map from strings to interfaces providing a default with that name.
Returns
-------... | 9.150398 | 8.540711 | 1.071386 |
missing = []
mistyped = {}
mismatched = {}
for name, iface_sig in self._signatures.items():
try:
# Don't invoke the descriptor protocol here so that we get
# staticmethod/classmethod/property objects instead of the
# fu... | def _diff_signatures(self, type_) | Diff our method signatures against the methods provided by type_.
Parameters
----------
type_ : type
The type to check.
Returns
-------
missing, mistyped, mismatched : list[str], dict[str -> type], dict[str -> signature] # noqa
``missing`` is a l... | 4.683328 | 3.892531 | 1.203157 |
raw_missing, mistyped, mismatched = self._diff_signatures(type_)
# See if we have defaults for missing methods.
missing = []
defaults_to_use = {}
for name in raw_missing:
try:
defaults_to_use[name] = self._defaults[name].implementation
... | def verify(self, type_) | Check whether a type implements ``self``.
Parameters
----------
type_ : type
The type to check.
Raises
------
TypeError
If ``type_`` doesn't conform to our interface.
Returns
-------
None | 5.1785 | 5.973287 | 0.866943 |
assert missing or mistyped or mismatched, "Implementation wasn't invalid."
message = "\nclass {C} failed to implement interface {I}:".format(
C=getname(t),
I=getname(self),
)
if missing:
message += dedent(
).forma... | def _invalid_implementation(self, t, missing, mistyped, mismatched) | Make a TypeError explaining why ``t`` doesn't implement our interface. | 2.648067 | 2.661037 | 0.995126 |
if name is None:
name = existing_class.__name__ + 'Interface'
if subset is None:
subset = set(dir(existing_class)) - TRIVIAL_CLASS_ATTRIBUTES
return InterfaceMeta(
name,
(Interface,),
{name: static_get_type_attr(existing_clas... | def from_class(cls, existing_class, subset=None, name=None) | Create an interface from an existing class.
Parameters
----------
existing_class : type
The type from which to extract an interface.
subset : list[str], optional
List of methods that should be included in the interface.
Default is to use all attribute... | 4.446798 | 4.62382 | 0.961715 |
return all([
positionals_compatible(
takewhile(is_positional, impl_sig.parameters.values()),
takewhile(is_positional, iface_sig.parameters.values()),
),
keywords_compatible(
valfilter(complement(is_positional), impl_sig.parameters),
valfil... | def compatible(impl_sig, iface_sig) | Check whether ``impl_sig`` is compatible with ``iface_sig``.
Parameters
----------
impl_sig : inspect.Signature
The signature of the implementation function.
iface_sig : inspect.Signature
The signature of the interface function.
In general, an implementation is compatible with an i... | 3.51894 | 3.865681 | 0.910303 |
func = kwargs['func']
del kwargs['func']
return aggregate_np(*args, func=lambda x: func(x), **kwargs) | def aggregate_group_loop(*args, **kwargs) | wraps func in lambda which prevents aggregate_numpy from
recognising and optimising it. Instead it groups and loops. | 5.232708 | 4.204672 | 1.244499 |
cmp_pos = 0
steps = 1
if len(group_idx) < 1:
return 0
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
steps += 1
return steps | def step_count(group_idx) | Return the amount of index changes within group_idx. | 3.056719 | 2.819071 | 1.0843 |
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, np.int64)
indices[0] = 0
indices[-1] = group_idx.size
cmp_pos = 0
ri = 1
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
indices[ri] = i
ri += 1
r... | def step_indices(group_idx) | Return the edges of areas within group_idx, which are filled with the same value. | 3.123912 | 3.075365 | 1.015786 |
_valgetter = cls._valgetter_scalar if scalar else cls._valgetter
valgetter = nb.njit(_valgetter)
outersetter = nb.njit(cls._outersetter)
_cls_inner = nb.njit(cls._inner)
if nans:
def _inner(ri, val, ret, counter, mean):
if not np.isnan(val):
... | def callable(cls, nans=False, reverse=False, scalar=False) | Compile a jitted function doing the hard part of the job | 4.077472 | 4.108189 | 0.992523 |
jitfunc = nb.njit(self.func, nogil=True)
def _loop(sortidx, group_idx, a, ret):
size = len(ret)
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
indices = step_indices(group_idx_srt)
for i in range(len(indices) - 1):
... | def callable(self, nans=False) | Compile a jitted function and loop it over the sorted data. | 3.701173 | 3.461611 | 1.069205 |
alias = dict((k, k) for k in funcs_common)
alias.update(_alias_str)
alias.update((fn, fn) for fn in _alias_builtin.values())
alias.update(_alias_builtin)
for d in extra:
alias.update(d)
alias.update((k, k) for k in set(alias.values()))
# Treat nan-functions as firstclass member ... | def get_aliasing(*extra) | The assembles the dict mapping strings and functions to the list of
supported function names:
e.g. alias['add'] = 'sum' and alias[sorted] = 'sort'
This funciton should only be called during import. | 5.775379 | 5.650438 | 1.022112 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.