code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def _send(self, message):
if self.lowercase_metric_names:
message = message.lower()
message = self.str2listtuple(message)
try:
self.socket.sendall(message)
except socket.gaierror as error:
raise GraphiteSendException(
"Failed to send data to %s, with error: %s" %
(self.addr, error))
except socket.error as error:
raise GraphiteSendException(
"Socket closed before able to send data to %s, "
"with error: %s" %
(self.addr, error))
except Exception as error:
raise GraphiteSendException(
"Unknown error while trying to send data down socket to %s, "
"error: %s" %
(self.addr, error))
return "sent %d long pickled message" % len(message) | Given a message send it to the graphite server. |
def stop_process(self, process, timeout=None):
process["terminate"] = True
if timeout is not None:
process["terminate_at"] = time.time() + timeout
process["subprocess"].send_signal(signal.SIGINT) | Initiates a graceful stop of one process |
def _cleaning(clusL, path):
backup = op.join(path, "list_obj_red.pk")
if not op.exists(backup):
clus_obj = reduceloci(clusL, path)
with open(backup, 'wb') as output:
pickle.dump(clus_obj, output, pickle.HIGHEST_PROTOCOL)
return clus_obj
else:
logger.info("Loading previous reduced clusters")
with open(backup, 'rb') as in_handle:
clus_obj = pickle.load(in_handle)
return clus_obj | Load saved cluster and jump to next step |
def close(self):
super(LockingDatabase, self).close()
if not self.readonly:
self.release_lock() | Closes the database, releasing lock. |
def _parse_ostriz(ostriz_data):
if not ostriz_data:
raise NothingToDoException("No data to import")
results = []
found_build = None
last_finish_time = [0]
for test_path, test_data in six.iteritems(ostriz_data):
curr_build = test_data.get("build")
if not curr_build:
continue
if not found_build:
found_build = curr_build
if found_build != curr_build:
continue
if not test_data.get("statuses"):
continue
_append_record(test_data, results, test_path)
_comp_finish_time(test_data, last_finish_time)
if last_finish_time[0]:
logger.info("Last result finished at %s", last_finish_time[0])
testrun_id = _get_testrun_id(found_build)
return xunit_exporter.ImportedData(results=results, testrun=testrun_id) | Reads the content of the input JSON and returns testcases results. |
def sanitize_random(value):
if not value:
return value
return ''.join(random.choice(CHARACTERS) for _ in range(len(value))) | Random string of same length as the given value. |
def read(message):
require_compatible_version(message.physt_compatible)
a_dict = _dict_from_v0342(message)
return create_from_dict(a_dict, "Message") | Convert a parsed protobuf message into a histogram. |
def _one_q_pauli_prep(label, index, qubit):
if index not in [0, 1]:
raise ValueError(f'Bad Pauli index: {index}')
if label == 'X':
if index == 0:
return Program(RY(pi / 2, qubit))
else:
return Program(RY(-pi / 2, qubit))
elif label == 'Y':
if index == 0:
return Program(RX(-pi / 2, qubit))
else:
return Program(RX(pi / 2, qubit))
elif label == 'Z':
if index == 0:
return Program()
else:
return Program(RX(pi, qubit))
raise ValueError(f'Bad Pauli label: {label}') | Prepare the index-th eigenstate of the pauli operator given by label. |
def document_ids(self):
matches = [PCC_DOCID_RE.match(os.path.basename(fname))
for fname in pcc.tokenization]
return sorted(match.groups()[0] for match in matches) | returns a list of document IDs used in the PCC |
def _deploy(self):
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
try:
self.d.setup(timeout=timeout)
self.d.sentry.wait(timeout=timeout)
except amulet.helpers.TimeoutError:
amulet.raise_status(
amulet.FAIL,
msg="Deployment timed out ({}s)".format(timeout)
)
except Exception:
raise | Deploy environment and wait for all hooks to finish executing. |
def calculate_width_widget(width, margin = None, margin_left = None, margin_right = None):
if margin_left is None:
margin_left = margin
if margin_right is None:
margin_right = margin
if margin_left is not None:
width -= int(margin_left)
if margin_right is not None:
width -= int(margin_right)
return width if width > 0 else None | Calculate actual widget width based on given margins. |
def extract_arc (archive, compression, cmd, verbosity, interactive, outdir):
cmdlist = [cmd, 'x', os.path.abspath(archive)]
return (cmdlist, {'cwd': outdir}) | Extract a ARC archive. |
def requirements(collector):
out = sys.stdout
artifact = collector.configuration['dashmat'].artifact
if artifact not in (None, "", NotSpecified):
if isinstance(artifact, six.string_types):
out = open(artifact, 'w')
else:
out = artifact
for active in collector.configuration['__imported__'].values():
for requirement in active.requirements():
out.write("{0}\n".format(requirement)) | Just print out the requirements |
def enable_fullquicklook(self):
self.args.disable_quicklook = False
for p in ['cpu', 'gpu', 'mem', 'memswap']:
setattr(self.args, 'disable_' + p, True) | Disable the full quicklook mode |
def updateAllKeys(self):
for kf, key in zip(self.kf_list, self.sorted_key_list()):
kf.update(key, self.dct[key]) | Update times for all keys in the layout. |
def update_from_dict(self, attribute_dict):
if 'template' in attribute_dict:
self.update_from_dict(attribute_dict['template'])
setattr(self,
self.Meta.id_field, attribute_dict['template']['name'])
return
try:
for k, v in attribute_dict.items():
setattr(self, k, v)
except Exception:
setattr(self, self.Meta.id_field, attribute_dict) | Method overriden from the base class |
def collate_fn(batch, train=True):
premise_batch, _ = pad_batch([row['premise'] for row in batch])
hypothesis_batch, _ = pad_batch([row['hypothesis'] for row in batch])
label_batch = torch.stack([row['label'] for row in batch])
transpose = (lambda b: b.t_().squeeze(0).contiguous())
return (transpose(premise_batch), transpose(hypothesis_batch), transpose(label_batch)) | list of tensors to a batch tensors |
def fix_reference_name(name, blacklist=None):
name = "".join(re.split(r'[^0-9a-zA-Z_]', name))
while name and not re.match(r'([a-zA-Z]+[0-9a-zA-Z_]*)$', name):
if not re.match(r'[a-zA-Z]', name[0]):
name = name[1:]
continue
name = str(name)
if not name:
name = "data"
if blacklist is not None and name in blacklist:
get_new_name = lambda index: name+('%03d' % index)
index = 0
while get_new_name(index) in blacklist:
index += 1
name = get_new_name(index)
return name | Return a syntax-valid Python reference name from an arbitrary name |
def resume():
if not settings.platformCompatible():
return False
(output, error) = subprocess.Popen(["osascript", "-e", RESUME], stdout=subprocess.PIPE).communicate() | Tell iTunes to resume |
def rmse_2pi(x, xhat):
real_diff = from_0_to_pi(x - xhat)
np.square(real_diff, out=real_diff)
sum_ = np.sum(real_diff)
return sqrt(sum_ / len(x)) | Calcualte rmse between vector or matrix x and xhat, ignoring mod of 2pi. |
def _input_templates(self):
foo = self._config.read([self._pathfile])
if len(foo) == 1:
for k, v in self._config.items('paths'):
self.templates[k] = v
else:
raise ValueError("Could not read {0}!".format(self._pathfile))
return | Read the path template file. |
def _qstr(self, question):
"we need to cope with a list, or a list of lists"
parts = []
for entry in question:
if type(entry) is list:
parts.append(self._qstr(entry))
else:
parts.append('"%s"<%d>' % (self._count_data.get_candidate_title(entry), entry))
return ', '.join(parts) | we need to cope with a list, or a list of lists |
def _sanity_check_no_nested_folds(ir_blocks):
fold_seen = False
for block in ir_blocks:
if isinstance(block, Fold):
if fold_seen:
raise AssertionError(u'Found a nested Fold contexts: {}'.format(ir_blocks))
else:
fold_seen = True
elif isinstance(block, Unfold):
if not fold_seen:
raise AssertionError(u'Found an Unfold block without a matching Fold: '
u'{}'.format(ir_blocks))
else:
fold_seen = False | Assert that there are no nested Fold contexts, and that every Fold has a matching Unfold. |
def load(self, callback=None, errback=None, reload=False):
if not reload and self.data:
raise MonitorException('monitor already loaded')
def success(result, *args):
self.data = result
if callback:
return callback(self)
else:
return self
return self._rest.retrieve(self.data['id'], callback=success,
errback=errback) | Load monitor data from the API. |
def dumps(self, script):
"Return a compressed representation of script as a binary string."
string = BytesIO()
self._dump(script, string, self._protocol, self._version)
return string.getvalue() | Return a compressed representation of script as a binary string. |
def build_header(self, title):
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out | Generate the header for the Markdown file. |
def name_to_system_object(self, name):
if isinstance(name, str):
if self.allow_name_referencing:
name = name
else:
raise NameError('System.allow_name_referencing is set to False, cannot convert string to name')
elif isinstance(name, Object):
name = str(name)
return self.namespace.get(name, None) | Give SystemObject instance corresponding to the name |
def _fetch_json(self, url, payload):
params = {
'data': json.dumps(payload),
'headers': {'content-type': 'application/json'},
'params': {'sid': self.sma_sid} if self.sma_sid else None,
}
for _ in range(3):
try:
with async_timeout.timeout(3):
res = yield from self._aio_session.post(
self._url + url, **params)
return (yield from res.json()) or {}
except asyncio.TimeoutError:
continue
return {'err': "Could not connect to SMA at {} (timeout)"
.format(self._url)} | Fetch json data for requests. |
def write(self, descrs):
names = []
for descr in descrs:
mo = re.match(self.long_regex, descr)
if mo:
names.append(mo.group(mo.lastindex) + descr[mo.end():])
else:
names.append(descr)
return names | Convert descriptions into names |
def _clear_state(self, seed=None):
self.start_time = time()
self.run_stats = []
self.best_index = -1
self.best_score = -1
self.best_config = None
self.search_space = None
if seed is not None:
self.rng = random.Random(seed) | Clears the state, starts clock |
def tls_session_update(self, msg_str):
self.tls_session.handshake_messages.append(msg_str)
self.tls_session.handshake_messages_parsed.append(self) | Covers both post_build- and post_dissection- context updates. |
def a_temp_file():
filename = None
try:
tmpfile = tempfile.NamedTemporaryFile(delete=False)
filename = tmpfile.name
yield tmpfile
finally:
if filename and os.path.exists(filename):
os.remove(filename) | Yield the name of a temporary file and ensure it's removed after use |
def _adjust_prt_flds(self, kws_xlsx, desc2nts, shade_hdrgos):
if "prt_flds" in kws_xlsx:
return kws_xlsx["prt_flds"]
dont_print = set(['hdr_idx', 'is_hdrgo', 'is_usrgo'])
prt_flds_adjusted = []
nt_flds = self.sortobj.get_fields(desc2nts)
for nt_fld in nt_flds:
if nt_fld not in dont_print:
if nt_fld == "format_txt":
if shade_hdrgos is True:
prt_flds_adjusted.append(nt_fld)
else:
prt_flds_adjusted.append(nt_fld)
kws_xlsx['prt_flds'] = prt_flds_adjusted | Print user-requested fields or provided fields minus info fields. |
def _get_fastq_files(ldetail, read, fastq_dir):
return glob.glob(os.path.join(fastq_dir, "Project_%s" % ldetail["project_name"],
"Sample_%s" % ldetail["name"],
"%s_*_%s_*.fastq.gz" % (ldetail["name"], read))) | Retrieve fastq files corresponding to the sample and read number. |
def add_weight(cls):
@functools.wraps(cls.add_weight)
def _add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
**kwargs):
if isinstance(initializer, tf.keras.layers.Layer):
weight = initializer(shape, dtype)
self._trainable_weights.extend(initializer.trainable_weights)
self._non_trainable_weights.extend(initializer.non_trainable_weights)
if regularizer is not None:
def loss_fn():
with tf.name_scope(name + '/Regularizer'):
return regularizer(initializer(shape, dtype))
self.add_loss(loss_fn)
return weight
return super(cls, self).add_weight(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
**kwargs)
cls.add_weight = _add_weight
return cls | Decorator for Layers, overriding add_weight for trainable initializers. |
def connection_service_name(service, *args):
if isinstance(service, str):
return service
return normalize_string(type(service).__name__) | the name of a service that manages the connection between services |
def servicenames(self):
"Give the list of services available in this folder."
return set([service['name'].rstrip('/').split('/')[-1]
for service in self._json_struct.get('services', [])]) | Give the list of services available in this folder. |
def autofill(ctx, f):
auto_fill_days = ctx.obj['settings']['auto_fill_days']
if not auto_fill_days:
ctx.obj['view'].view.err("The parameter `auto_fill_days` must be set "
"to use this command.")
return
today = datetime.date.today()
last_day = calendar.monthrange(today.year, today.month)
last_date = datetime.date(today.year, today.month, last_day[1])
timesheet_collection = get_timesheet_collection_for_context(
ctx, f
)
t = timesheet_collection.latest()
t.prefill(auto_fill_days, last_date)
t.save()
ctx.obj['view'].msg("Your entries file has been filled.") | Fills your timesheet up to today, for the defined auto_fill_days. |
def reject( self ):
if ( self == XConfigDialog._instance ):
XConfigDialog._instance = None
super(XConfigDialog, self).reject() | Overloads the reject method to clear up the instance variable. |
def register(self, name: str):
if name in self._callbacks:
raise ValueError('Hook already registered')
self._callbacks[name] = None
if self._event_dispatcher is not None:
self._event_dispatcher.register(name) | Register hooks that can be connected. |
def create_mysql_mysqlconnector(username, password, host, port, database, **kwargs):
return create_engine(
_create_mysql_mysqlconnector(username, password, host, port, database),
**kwargs
) | create an engine connected to a mysql database using mysqlconnector. |
def get(self, thing: type):
if thing in self.instances:
return self.instances[thing]
if thing in self.factories:
fact = self.factories[thing]
ret = self.get(fact)
if hasattr(fact, '__di__') and fact.__di__['singleton']:
self.instances[thing] = ret
return ret
if inspect.isclass(thing):
return self._call_class_init(thing)
elif callable(thing):
return self.call(thing)
raise DiayException('cannot resolve: %r' % thing) | Get an instance of some type. |
def remove_edge_from_heap(self, segment_ids):
self._initialize_heap()
key = normalize_edge(segment_ids)
if key in self.edge_map:
self.edge_map[key][0] = None
self.num_valid_edges -= 1 | Remove an edge from the heap. |
def read(self, filepath):
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close() | Read the metadata values from a file path. |
def raenc(self, key):
if key in self.altair_encode:
del self.altair_encode[key]
else:
self.warning("Key " + key + " not found in Altair encoding dict") | Remove an entry from the altair encoding dict |
def max(self):
return self._constructor(self.values.max(axis=self.baseaxes, keepdims=True)) | Compute the max across records. |
async def readline(self) -> bytes:
if self._at_eof:
return b''
if self._unread:
line = self._unread.popleft()
else:
line = await self._content.readline()
if line.startswith(self._boundary):
sline = line.rstrip(b'\r\n')
boundary = self._boundary
last_boundary = self._boundary + b'--'
if sline == boundary or sline == last_boundary:
self._at_eof = True
self._unread.append(line)
return b''
else:
next_line = await self._content.readline()
if next_line.startswith(self._boundary):
line = line[:-len(self._newline)]
self._unread.append(next_line)
return line | Reads body part by line by line. |
def filechunk(f, chunksize):
while True:
chunk = tuple(itertools.islice(f, chunksize))
if not chunk:
return
yield np.loadtxt(iter(chunk), dtype=np.float64) | Iterator that allow for piecemeal processing of a file. |
def levenshtein_distance(self, a, b):
n, m = len(a), len(b)
if n > m:
a,b = b,a
n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n] | This calculates the Levenshtein distance between a and b. |
def _get_request_obj(csr):
text = _text_or_file(csr)
text = get_pem_entry(text, pem_type='CERTIFICATE REQUEST')
return M2Crypto.X509.load_request_string(text) | Returns a CSR object based on PEM text. |
def detach_client(self, app):
connection = self.get_connection()
if connection:
connection.detach_and_close()
self.invalidate() | Detach the client that belongs to this CLI. |
def _get_spyderplugins(plugin_path, is_io, modnames, modlist):
if not osp.isdir(plugin_path):
return
for name in os.listdir(plugin_path):
if not name.startswith(PLUGIN_PREFIX):
continue
if is_io != name.startswith(IO_PREFIX):
continue
forbidden_suffixes = ['dist-info', 'egg.info', 'egg-info', 'egg-link',
'kernels']
if any([name.endswith(s) for s in forbidden_suffixes]):
continue
_import_plugin(name, plugin_path, modnames, modlist) | Scan the directory `plugin_path` for plugin packages and loads them. |
def authInsert(user, role, group, site):
if not role: return True
for k, v in user['roles'].iteritems():
for g in v['group']:
if k in role.get(g, '').split(':'):
return True
return False | Authorization function for general insert |
def _authstr(self, auth):
if type(auth) is dict:
return '{' + ','.join(["{0}:{1}".format(k, auth[k]) for k in sorted(auth.keys())]) + '}'
return auth | Convert auth to str so that it can be hashed |
def mean_oob_mae_weight(trees):
weights = []
active_trees = []
for tree in trees:
oob_mae = tree.out_of_bag_mae
if oob_mae is None or oob_mae.mean is None:
continue
weights.append(oob_mae.mean)
active_trees.append(tree)
if not active_trees:
return
weights = normalize(weights)
return zip(weights, active_trees) | Returns weights proportional to the out-of-bag mean absolute error for each tree. |
def request(self, method, url, body=None, headers=None, *args, **kwargs):
self._vcr_request = Request(
method=method,
uri=self._uri(url),
body=body,
headers=headers or {}
)
log.debug('Got {}'.format(self._vcr_request))
self._sock = VCRFakeSocket() | Persist the request metadata in self._vcr_request |
def remove_if_exist(path):
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
return True
elif os.path.isfile(path):
os.remove(path)
return True
return False | Delete a file or a directory recursively if it exists, else no exception is raised |
def show_data(self, item):
child, cookie = self.mainview_tree.GetFirstChild(item)
child_list = []
while child.IsOk():
child_list.append(child)
child, cookie = self.mainview_tree.GetNextChild(item, cookie)
lc = self.nodeview_lc
lc.DeleteAllItems()
for i, child in enumerate(child_list):
text = self.mainview_tree.GetItemText(child)
try:
k, v = [s.strip() for s in text.split(':')]
except ValueError:
k, v = text, '...'
idx = lc.InsertItem(MAXNROW, v)
lc.SetItem(idx, 1, k) | show data key-value in ListCtrl for tree item |
def append(self, value):
self.database.run_script(
'array_append',
keys=[self.key],
args=[value]) | Append a new value to the end of the array. |
def stub():
form = cgi.FieldStorage()
userid = form['userid'].value
password = form['passwd'].value
group = form['group'].value | Just some left over code |
def domain_list(gandi):
domains = gandi.dns.list()
for domain in domains:
gandi.echo(domain['fqdn'])
return domains | List domains manageable by REST API. |
def dmag(self,band):
if self.mags is None:
raise ValueError('dmag is not defined because primary mags are not defined for this population.')
return self.stars['{}_mag'.format(band)] - self.mags[band] | Magnitude difference between primary star and BG stars |
def max_intensity(self, time):
ti = np.where(time == self.times)[0][0]
return self.timesteps[ti].max() | Calculate the maximum intensity found at a timestep. |
def send_stun(self, message, addr):
self.__log_debug('> %s %s', addr, message)
self.transport.sendto(bytes(message), addr) | Send a STUN message. |
def _init_update_po_files(self, domains):
for language in settings.TRANSLATIONS:
for domain, options in domains.items():
if language == options['default']: continue
if os.path.isfile(_po_path(language, domain)):
self._update_po_file(language, domain, options['pot'])
else:
self._init_po_file(language, domain, options['pot']) | Update or initialize the `.po` translation files |
def parse(self):
while True:
status, self._buffer, packet = Packet.parse_msg(self._buffer)
if status == PARSE_RESULT.INCOMPLETE:
return status
if status == PARSE_RESULT.OK and packet:
packet.received = datetime.datetime.now()
if isinstance(packet, UTETeachInPacket) and self.teach_in:
response_packet = packet.create_response_packet(self.base_id)
self.logger.info('Sending response to UTE teach-in.')
self.send(response_packet)
if self.__callback is None:
self.receive.put(packet)
else:
self.__callback(packet)
self.logger.debug(packet) | Parses messages and puts them to receive queue |
def format_help(self, description):
for bold in ("``", "*"):
parts = []
if description is None:
description = ""
for i, s in enumerate(description.split(bold)):
parts.append(s if i % 2 == 0 else "<b>%s</b>" % s)
description = "".join(parts)
description = urlize(description, autoescape=False)
return mark_safe(description.replace("\n", "<br>")) | Format the setting's description into HTML. |
def js(request):
userid = authenticated_userid(request)
user = markupsafe.Markup("'%s'")%userid if userid else "null"
redirect_paramater = request.registry['persona.redirect_url_parameter']
came_from = '%s%s' % (request.host_url,
request.GET.get(redirect_paramater, request.path_qs))
data = {
'user': user,
'login': request.route_path(request.registry['persona.login_route']),
'logout': request.route_path(request.registry['persona.logout_route']),
'csrf_token': request.session.get_csrf_token(),
'came_from': came_from,
'request_params': markupsafe.Markup(request.registry['persona.request_params']),
}
template = markupsafe.Markup(pkg_resources.resource_string('pyramid_persona', 'templates/persona.js').decode())
return template % data | Returns the javascript needed to run persona |
def format_progress(i, n):
if n == 0:
fraction = 0
else:
fraction = float(i)/n
LEN_BAR = 25
num_plus = int(round(fraction*LEN_BAR))
s_plus = '+'*num_plus
s_point = '.'*(LEN_BAR-num_plus)
return '[{0!s}{1!s}] {2:d}/{3:d} - {4:.1f}%'.format(s_plus, s_point, i, n, fraction*100) | Returns string containing a progress bar, a percentage, etc. |
def negotiate_header(url):
hostname = urlparse(url).hostname
_, krb_context = kerberos.authGSSClientInit('HTTP@%s' % hostname)
yield threads.deferToThread(kerberos.authGSSClientStep,
krb_context, '')
negotiate_details = kerberos.authGSSClientResponse(krb_context)
defer.returnValue('Negotiate ' + negotiate_details) | Return the "Authorization" HTTP header value to use for this URL. |
def _separable_approx3(h, N=1):
return np.cumsum([np.einsum("i,j,k", fz, fy, fx) for fz, fy, fx in _separable_series3(h, N)], 0) | returns the N first approximations to the 3d function h |
def verify_message(self, message):
if verify_checksum(
message,
self.in_checksum.get(message.id, 0),
):
self.in_checksum[message.id] = message.checksum[1]
if message.flags == FlagsType.none:
self.in_checksum.pop(message.id)
else:
self.in_checksum.pop(message.id, None)
raise InvalidChecksumError(
description="Checksum does not match!",
id=message.id,
) | Verify the checksum of the message. |
def save_json(self, fname='servers.json'):
rows = sorted(self.keys())
with open(fname, 'wt') as fp:
json.dump([self[k] for k in rows], fp, indent=1) | Write out to a CSV file. |
def plot(self):
plt.plot(self.bin_edges, self.hist, self.bin_edges, self.best_pdf) | Plot the empirical histogram versus best-fit distribution's PDF. |
def dist_factory(path_item, entry, only):
lower = entry.lower()
is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
return (
distributions_from_metadata
if is_meta else
find_distributions
if not only and _is_egg_path(entry) else
resolve_egg_link
if not only and lower.endswith('.egg-link') else
NoDists()
) | Return a dist_factory for a path_item and entry |
def complete_hit(self, text, line, begidx, endidx):
return [i for i in PsiturkNetworkShell.hit_commands if \
i.startswith(text)] | Tab-complete hit command. |
def _get_observer_fun(self, prop_name):
def _observer_fun(self, model, old, new):
if self._itsme:
return
self._on_prop_changed()
_observer_fun.__name__ = "property_%s_value_change" % prop_name
return _observer_fun | This is the code for an value change observer |
def log_histograms(self, model: Model, histogram_parameters: Set[str]) -> None:
for name, param in model.named_parameters():
if name in histogram_parameters:
self.add_train_histogram("parameter_histogram/" + name, param) | Send histograms of parameters to tensorboard. |
def _format_strings(self):
values = self.values.astype(object)
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or
_get_format_datetime64(is_dates_only,
date_format=self.date_format))
fmt_values = [formatter(x) for x in values]
return fmt_values | we by definition have a TZ |
def memoize(func):
cache = {}
@functools.wraps(func)
def wrapper(*args):
key = "__".join(str(arg) for arg in args)
if key not in cache:
cache[key] = func(*args)
return cache[key]
return wrapper | Classic memoize decorator for non-class methods |
def roster(opts, runner=None, utils=None, whitelist=None):
return LazyLoader(
_module_dirs(opts, 'roster'),
opts,
tag='roster',
whitelist=whitelist,
pack={
'__runner__': runner,
'__utils__': utils,
},
) | Returns the roster modules |
def language(self, language_code=None):
if language_code is None:
language_code = appsettings.PARLER_LANGUAGES.get_default_language()
self._language = language_code
return self | Set the language code to assign to objects retrieved using this QuerySet. |
def _pretty_size(size):
units = [' G', ' M', ' K', ' B']
while units and size >= 1000:
size = size / 1024.0
units.pop()
return '{0}{1}'.format(round(size, 1), units[-1]) | Print sizes in a similar fashion as eclean |
def _add_goterms_kws(self, go2obj_user, kws_gos):
if 'go2color' in kws_gos:
for goid in kws_gos['go2color'].keys():
self._add_goterms(go2obj_user, goid) | Add more GOTerms to go2obj_user, if requested and relevant. |
def calculate_tx_fee(tx_size: int) -> Decimal:
per_kb_cost = 0.01
min_fee = Decimal(0.001)
fee = Decimal((tx_size / 1000) * per_kb_cost)
if fee <= min_fee:
return min_fee
else:
return fee | return tx fee from tx size in bytes |
def most_confused(self, min_val:int=1, slice_size:int=1)->Collection[Tuple[str,str,int]]:
"Sorted descending list of largest non-diagonal entries of confusion matrix, presented as actual, predicted, number of occurrences."
cm = self.confusion_matrix(slice_size=slice_size)
np.fill_diagonal(cm, 0)
res = [(self.data.classes[i],self.data.classes[j],cm[i,j])
for i,j in zip(*np.where(cm>=min_val))]
return sorted(res, key=itemgetter(2), reverse=True) | Sorted descending list of largest non-diagonal entries of confusion matrix, presented as actual, predicted, number of occurrences. |
def visibleCols(self):
'List of `Column` which are not hidden.'
return self.keyCols + [c for c in self.columns if not c.hidden and not c.keycol] | List of `Column` which are not hidden. |
def pdf(self, x_test):
N,D = self.data.shape
x_test = np.asfortranarray(x_test)
x_test = x_test.reshape([-1, D])
pdfs = self._individual_pdfs(x_test)
if self.fully_dimensional:
pdfs = np.sum(np.prod(pdfs, axis=-1)*self.weights[None, :], axis=-1)
else:
pdfs = np.prod(np.sum(pdfs*self.weights[None,:,None], axis=-2), axis=-1)
return(pdfs) | Computes the probability density function at all x_test |
def get(self, class_name):
for transform in self.cpu_transforms + self.gpu_transforms:
if transform.__class__.__name__ == class_name:
return transform | Get a transform in the chain from its name. |
def sizes(count, offset=0, max_chunk=500):
if count is None:
chunk = max_chunk
while True:
yield chunk, offset
offset += chunk
else:
while count:
chunk = min(count, max_chunk)
count = max(0, count - max_chunk)
yield chunk, offset
offset += chunk | Helper to iterate over remote data via count & offset pagination. |
def Pack(self, msg, type_url_prefix='type.googleapis.com/'):
if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/':
self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
else:
self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
self.value = msg.SerializeToString() | Packs the specified message into current Any message. |
def _attach_dummy_intf_rtr(self, tenant_id, tenant_name, rtr_id):
serv_obj = self.get_service_obj(tenant_id)
fw_dict = serv_obj.get_fw_dict()
fw_id = fw_dict.get('fw_id')
rtr_nwk = fw_id[0:4] + fw_const.DUMMY_SERVICE_NWK + (
fw_id[len(fw_id) - 4:])
net_id, subnet_id = self.os_helper.create_network(
rtr_nwk, tenant_id, self.servicedummy_ip_subnet)
if net_id is None or subnet_id is None:
return None, None
net_dict = {}
net_dict['name'] = rtr_nwk
self.store_net_db(tenant_id, net_id, net_dict, 'SUCCESS')
subnet_lst = set()
subnet_lst.add(subnet_id)
if rtr_id is None:
self.os_helper.delete_network(rtr_nwk, tenant_id, subnet_id,
net_id)
return None, None
ret = self.os_helper.add_intf_router(rtr_id, tenant_id, subnet_lst)
if not ret:
self.os_helper.delete_network(rtr_nwk, tenant_id, subnet_id,
net_id)
return None, None
return net_id, subnet_id | Function to create a dummy router and interface. |
def __update_mouse(self, milliseconds):
for button in self.gui_buttons:
was_hovering = button.is_mouse_hovering
button.update(milliseconds)
if was_hovering == False and button.is_mouse_hovering:
old_index = self.current_index
self.current_index = self.gui_buttons.index(button)
self.__handle_selections(old_index, self.current_index)
elif Ragnarok.get_world().Mouse.is_clicked(self.mouse_select_button) and button.is_mouse_hovering:
button.clicked_action() | Use the mouse to control selection of the buttons. |
def nodes_dump(self):
self._flush_nodes()
for (graph, node, branch, turn,tick, extant) in self.sql('nodes_dump'):
yield (
self.unpack(graph),
self.unpack(node),
branch,
turn,
tick,
bool(extant)
) | Dump the entire contents of the nodes table. |
def add_line(self, logevent):
key = None
self.empty = False
self.groups.setdefault(key, list()).append(logevent) | Append log line to this plot type. |
def parse_authorization_header(authorization_header):
auth_scheme = 'OAuth '.lower()
if authorization_header[:len(auth_scheme)].lower().startswith(auth_scheme):
items = parse_http_list(authorization_header[len(auth_scheme):])
try:
return list(parse_keqv_list(items).items())
except (IndexError, ValueError):
pass
raise ValueError('Malformed authorization header') | Parse an OAuth authorization header into a list of 2-tuples |
def run(api_port=8082, address=None, unix_socket=None, scheduler=None):
if scheduler is None:
scheduler = Scheduler()
scheduler.load()
_init_api(
scheduler=scheduler,
api_port=api_port,
address=address,
unix_socket=unix_socket,
)
pruner = tornado.ioloop.PeriodicCallback(scheduler.prune, 60000)
pruner.start()
def shutdown_handler(signum, frame):
exit_handler()
sys.exit(0)
@atexit.register
def exit_handler():
logger.info("Scheduler instance shutting down")
scheduler.dump()
stop()
signal.signal(signal.SIGINT, shutdown_handler)
signal.signal(signal.SIGTERM, shutdown_handler)
if os.name == 'nt':
signal.signal(signal.SIGBREAK, shutdown_handler)
else:
signal.signal(signal.SIGQUIT, shutdown_handler)
logger.info("Scheduler starting up")
tornado.ioloop.IOLoop.instance().start() | Runs one instance of the API server. |
def set(self, oid, typevalue):
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable) | Call the default or user setter function if available |
def _is_not_archived(sysmeta_pyxb):
if _is_archived(sysmeta_pyxb):
raise d1_common.types.exceptions.InvalidSystemMetadata(
0,
'Archived flag was set. A new object created via create() or update() '
'cannot already be archived. pid="{}"'.format(
d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
),
identifier=d1_common.xml.get_req_val(sysmeta_pyxb.identifier),
) | Assert that ``sysmeta_pyxb`` does not have have the archived flag set. |
def _redact_secret(
data: Union[Dict, List],
) -> Union[Dict, List]:
if isinstance(data, dict):
stack = [data]
else:
stack = []
while stack:
current = stack.pop()
if 'secret' in current:
current['secret'] = '<redacted>'
else:
stack.extend(
value
for value in current.values()
if isinstance(value, dict)
)
return data | Modify `data` in-place and replace keys named `secret`. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.