code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def terminal_type(cls):
what = sys.platform
kind = 'UNDEFINED_TERMINAL_TYPE'
if 'linux' in what:
kind = 'linux'
elif 'darwin' in what:
kind = 'darwin'
elif 'cygwin' in what:
kind = 'cygwin'
elif 'windows' in what:
kind = 'windows'
return kind | returns darwin, cygwin, cmd, or linux |
def pandoc(args, filein=None, fileout=None):
cmd = [u'pandoc']
if filein:
cmd.append(filein)
if fileout:
cmd.append('-o')
cmd.append(fileout)
cmd.extend(args.split())
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode:
raise PandocError('pandoc exited with return code {}\n{}'.format(proc.returncode, str(err)))
return out.decode('utf-8') | Execute pandoc with the given arguments |
def load_mode_builder(obs_mode, node):
nval1 = node.get('builder')
if nval1 is not None:
if isinstance(nval1, str):
newmethod = import_object(nval1)
obs_mode.build_ob = newmethod.__get__(obs_mode)
else:
raise TypeError('builder must be None or a string')
else:
nval2 = node.get('builder_options')
if nval2 is not None:
if isinstance(nval2, list):
for opt_dict in nval2:
if 'result_of' in opt_dict:
fields = opt_dict['result_of']
obs_mode.build_ob_options = ResultOf(**fields)
break
else:
raise TypeError('builder_options must be None or a list')
return obs_mode | Load observing mode OB builder |
def _init_flds_cur(self):
flds = []
flds0 = ['GO', 'NS', 'enrichment', self.pval_fld, 'dcnt', 'tinfo', 'depth',
'ratio_in_study', 'ratio_in_pop', 'name']
flds_p = [f for f in self.flds_all if f[:2] == 'p_' and f != self.pval_fld]
flds.extend(flds0)
if flds_p:
flds.extend(flds_p)
flds.append('study_count')
flds.append('study_items')
return flds | Choose fields to print from a multitude of available fields. |
def ms_rotate(self, viewer, event, data_x, data_y, msg=True):
if not self.canrotate:
return True
msg = self.settings.get('msg_rotate', msg)
x, y = self.get_win_xy(viewer)
if event.state == 'move':
self._rotate_xy(viewer, x, y)
elif event.state == 'down':
if msg:
viewer.onscreen_message("Rotate (drag around center)",
delay=1.0)
self._start_x, self._start_y = x, y
self._start_rot = viewer.get_rotation()
else:
viewer.onscreen_message(None)
return True | Rotate the image by dragging the cursor left or right. |
def validate_relation_data(self, sentry_unit, relation, expected):
actual = sentry_unit.relation(relation[0], relation[1])
return self._validate_dict_data(expected, actual) | Validate actual relation data based on expected relation data. |
def _assemble_regulate_activity(stmt):
subj_str = _assemble_agent_str(stmt.subj)
obj_str = _assemble_agent_str(stmt.obj)
if stmt.is_activation:
rel_str = ' activates '
else:
rel_str = ' inhibits '
stmt_str = subj_str + rel_str + obj_str
return _make_sentence(stmt_str) | Assemble RegulateActivity statements into text. |
def delete_node_1ton(node_list, begin, node, end):
if end is None:
assert end is not None
end = node.successor
elif not isinstance(end, list):
end = [end]
if any(e_.in_or_out for e_ in end):
begin.out_redirect(node.single_input, node.single_output)
else:
for ne_ in end:
target_var_name = node.single_input
assert target_var_name in begin.output.values()
ne_.in_redirect(node.single_output, target_var_name)
begin.successor = [v_ for v_ in begin.successor if v_ != node] + node.successor
for ne_ in end:
ne_.precedence = [begin if v_ == node else v_ for v_ in ne_.precedence]
node_list.remove(node)
return node_list | delete the node which has 1-input and n-output |
def _reuse_pre_installed_setuptools(env, installer):
if not env.setuptools_version:
return
reuse_old = config.reuse_old_setuptools
reuse_best = config.reuse_best_setuptools
reuse_future = config.reuse_future_setuptools
reuse_comment = None
if reuse_old or reuse_best or reuse_future:
pv_old = parse_version(env.setuptools_version)
pv_new = parse_version(installer.setuptools_version())
if pv_old < pv_new:
if reuse_old:
reuse_comment = "%s+ recommended" % (
installer.setuptools_version(),)
elif pv_old > pv_new:
if reuse_future:
reuse_comment = "%s+ required" % (
installer.setuptools_version(),)
elif reuse_best:
reuse_comment = ""
if reuse_comment is None:
return
if reuse_comment:
reuse_comment = " (%s)" % (reuse_comment,)
print("Reusing pre-installed setuptools %s distribution%s." % (
env.setuptools_version, reuse_comment))
return True | Return whether a pre-installed setuptools distribution should be reused. |
def update_docs(self, iface, module):
key = "{}.{}".format(module.name, iface.name)
if key in module.predocs:
iface.docstring = self.docparser.to_doc(module.predocs[key][0], iface.name)
iface.docstart, iface.docend = (module.predocs[key][1], module.predocs[key][2]) | Updates the documentation for the specified interface using the module predocs. |
def remove_router_from_hosting_device(self, client, hosting_device_id,
router_id):
res_path = hostingdevice.HostingDevice.resource_path
return client.delete((res_path + DEVICE_L3_ROUTERS + "/%s") % (
hosting_device_id, router_id)) | Remove a router from hosting_device. |
def interrupt(self):
if(self.device.read(9) & 0x01):
self.handle_request()
self.device.clear_IR() | Invoked on a write operation into the IR of the RendererDevice. |
def _start_server(self, *args):
self.log("Starting server", args)
secure = self.certificate is not None
if secure:
self.log("Running SSL server with cert:", self.certificate)
else:
self.log("Running insecure server without SSL. Do not use without SSL proxy in production!", lvl=warn)
try:
self.server = Server(
(self.host, self.port),
secure=secure,
certfile=self.certificate
).register(self)
except PermissionError:
self.log('Could not open (privileged?) port, check '
'permissions!', lvl=critical) | Run the node local server |
def matches(target, entry):
for t, e in itertools.zip_longest(target, entry):
if e and t != e:
return False
return entry[0] and entry[1] | Does the target match the whitelist entry? |
def _ExtractHuntIdFromPath(entry, event):
match = re.match(r".*hunt/([^/]+).*", entry.http_request_path)
if match:
event.urn = "aff4:/hunts/{}".format(match.group(1)) | Extracts a Hunt ID from an APIAuditEntry's HTTP request path. |
def delete_url(self, url):
for decompress in [False, True]:
key = (url, decompress)
if key in self._local_paths:
path = self._local_paths[key]
remove(path)
del self._local_paths[key]
path = self.local_path(
url, decompress=decompress, download=False)
if exists(path):
remove(path) | Delete local files downloaded from given URL |
def create(cls, bucket, key, value):
with db.session.begin_nested():
obj = cls(
bucket_id=as_bucket_id(bucket),
key=key,
value=value
)
db.session.add(obj)
return obj | Create a new tag for bucket. |
def render_field_previews(self, id_and_obj_list, admin, request, field_name):
obj_preview_list = []
for obj_id, obj in id_and_obj_list:
try:
if obj is None:
obj_preview = self.render_field_error(
obj_id, obj, None, request
)
else:
try:
obj_preview = admin.preview(obj, request)
except AttributeError:
try:
obj_preview = obj.preview(request)
except AttributeError:
try:
obj_preview = getattr(self, 'preview_{0}'.format(
field_name))(obj, request)
except AttributeError:
obj_preview = self.render_field_default(obj, request)
obj_link = admin_link(obj, inner_html=obj_preview)
except Exception as ex:
obj_link = self.render_field_error(obj_id, obj, ex, request)
obj_preview_list.append(obj_link)
li_html_list = [u'<li>{0}</li>'.format(preview)
for preview in obj_preview_list]
if li_html_list:
return u'<ul>{0}</ul>'.format(u''.join(li_html_list))
else:
return '' | Override this to customise the preview representation of all objects. |
def cl_mutect(self, params, tmp_dir):
gatk_jar = self._get_jar("muTect", ["mutect"])
jvm_opts = config_utils.adjust_opts(self._jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
return ["java"] + jvm_opts + get_default_jvm_opts(tmp_dir) + \
["-jar", gatk_jar] + [str(x) for x in params] | Define parameters to run the mutect paired algorithm. |
def _find_stages(self):
stages = []
end = last_user_found = None
for part in reversed(self.dfp.structure):
if end is None:
end = part
if part['instruction'] == 'USER' and not last_user_found:
last_user_found = part['content']
if part['instruction'] == 'FROM':
stages.insert(0, {'from_structure': part,
'end_structure': end,
'stage_user': last_user_found})
end = last_user_found = None
return stages | Find limits of each Dockerfile stage |
def flush(self):
if not self._emit_partial and len(self._state) != self._state.maxlen:
self.notify(tuple(self._state))
self._state.clear() | Flush the queue - this will emit the current queue |
def _cron_id(cron):
cid = None
if cron['identifier']:
cid = cron['identifier']
else:
cid = SALT_CRON_NO_IDENTIFIER
if cid:
return _ensure_string(cid) | SAFETYBELT, Only set if we really have an identifier |
def _store_credentials(self, username, password, remember=False):
if username and password and remember:
CONF.set('main', 'report_error/username', username)
try:
keyring.set_password('github', username, password)
except Exception:
if self._show_msgbox:
QMessageBox.warning(self.parent_widget,
_('Failed to store password'),
_('It was not possible to securely '
'save your password. You will be '
'prompted for your Github '
'credentials next time you want '
'to report an issue.'))
remember = False
CONF.set('main', 'report_error/remember_me', remember) | Store credentials for future use. |
def _parse_tag(self, name):
from_ = self._get_from(b'tag')
tagger = self._get_user_info(b'tag', b'tagger',
accept_just_who=True)
message = self._get_data(b'tag', b'message')
return commands.TagCommand(name, from_, tagger, message) | Parse a tag command. |
def sequences_from_fasta(path):
from Bio import SeqIO
return {x.description: x.seq for x in SeqIO.parse(path, 'fasta')} | Extract multiple sequences from a FASTA file. |
def headers_as_dict(cls, resp):
if six.PY2:
pairs = [header.split(':', 1) for header in resp.msg.headers]
return dict([(k, v.strip()) for k, v in pairs])
else:
return dict([(k, v.strip()) for k, v in resp.msg._headers]) | Turns an array of response headers into a dictionary |
def can_undo(self):
return bool(self._undo) or bool(self._open and self._open[0]) | Are there actions to undo? |
def cached(size):
def decorator(func):
cached_func = _Cached(func, size)
return lambda *a, **kw: cached_func(*a, **kw)
return decorator | A caching decorator based on parameter objects |
def _align_mem(fastq_file, pair_file, ref_file, out_file, names, rg_info, data):
with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file):
cmd = ("unset JAVA_HOME && "
"%s | %s" % (_get_bwa_mem_cmd(data, out_file, ref_file, fastq_file, pair_file), tobam_cl))
do.run(cmd, "bwa mem alignment from fastq: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, fastq_file)])
return out_file | Perform bwa-mem alignment on supported read lengths. |
def __catalina_home():
locations = ['/usr/share/tomcat*', '/opt/tomcat']
for location in locations:
folders = glob.glob(location)
if folders:
for catalina_home in folders:
if os.path.isdir(catalina_home + "/bin"):
return catalina_home
return False | Tomcat paths differ depending on packaging |
def short_repr(item, max_length=15):
item = repr(item)
if len(item) > max_length:
item = '{}...{}'.format(item[:max_length - 3], item[-1])
return item | Short representation of item if it is too long |
def filter_string(n: Node, query: str) -> str:
return _scalariter2item(n, query, str) | Filter and ensure that the returned value is of string type. |
def update_contributions(sender, instance, action, model, pk_set, **kwargs):
if action != 'pre_add':
return
else:
for author in model.objects.filter(pk__in=pk_set):
update_content_contributions(instance, author) | Creates a contribution for each author added to an article. |
def parse_access_token(self):
access_file = os.path.join(self.file_path, 'access_token')
if os.path.isfile(access_file):
access_list = list()
with open(access_file, 'r') as access_token:
for line in access_token:
value, data = line.split('=')
access_list.append(data.rstrip())
self.access_secret = access_list[0]
self.access_token = access_list[1]
else:
print('Missing access_token')
self.get_request_token()
self.get_access_token() | Extract the secret and token values from the access_token file |
def prepare(self, ansi='', ensure_trailing_newline=False):
body, styles = self.apply_regex(ansi)
if ensure_trailing_newline and _needs_extra_newline(body):
body += '\n'
self._attrs = {
'dark_bg': self.dark_bg,
'line_wrap': self.line_wrap,
'font_size': self.font_size,
'body': body,
'styles': styles,
}
return self._attrs | Load the contents of 'ansi' into this object |
def flatten_unique(l: Iterable) -> List:
rval = OrderedDict()
for e in l:
if not isinstance(e, str) and isinstance(e, Iterable):
for ev in flatten_unique(e):
rval[ev] = None
else:
rval[e] = None
return list(rval.keys()) | Return a list of UNIQUE non-list items in l |
def setLocation(self, x, y):
self.x = int(x)
self.y = int(y)
return self | Set the location of this object to the specified coordinates. |
def lastId(self) -> BaseReference:
if self.childIds is not None:
if len(self.childIds) > 0:
return self.childIds[-1]
return None
else:
raise NotImplementedError | Last child's id of current TextualNode |
def FileHacks(self):
if sys.platform == "win32":
import win32api
if self.path == "/":
self.files = win32api.GetLogicalDriveStrings().split("\x00")
self.files = [drive.rstrip("\\") for drive in self.files if drive]
elif re.match(r"/*\\\\.\\[^\\]+\\?$", self.path) is not None:
self.size = 0x7fffffffffffffff
self.path = self.path.rstrip("\\")
self.alignment = 512
elif sys.platform == "darwin":
if re.match("/dev/r?disk.*", self.path):
self.size = 0x7fffffffffffffff
self.alignment = 512 | Hacks to make the filesystem look normal. |
def extend(self, content, zorder):
if zorder not in self._content:
self._content[zorder] = []
self._content[zorder].extend(content) | Extends with a list and a z-order |
def build_stop_ids(shape_id):
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)] | Create a pair of stop IDs based on the given shape ID. |
def build_specfile_sections(spec):
str = ""
mandatory_sections = {
'DESCRIPTION' : '\n%%description\n%s\n\n', }
str = str + SimpleTagCompiler(mandatory_sections).compile( spec )
optional_sections = {
'DESCRIPTION_' : '%%description -l %s\n%s\n\n',
'CHANGELOG' : '%%changelog\n%s\n\n',
'X_RPM_PREINSTALL' : '%%pre\n%s\n\n',
'X_RPM_POSTINSTALL' : '%%post\n%s\n\n',
'X_RPM_PREUNINSTALL' : '%%preun\n%s\n\n',
'X_RPM_POSTUNINSTALL' : '%%postun\n%s\n\n',
'X_RPM_VERIFY' : '%%verify\n%s\n\n',
'X_RPM_PREP' : '%%prep\n%s\n\n',
'X_RPM_BUILD' : '%%build\n%s\n\n',
'X_RPM_INSTALL' : '%%install\n%s\n\n',
'X_RPM_CLEAN' : '%%clean\n%s\n\n',
}
if 'X_RPM_PREP' not in spec:
spec['X_RPM_PREP'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"' + '\n%setup -q'
if 'X_RPM_BUILD' not in spec:
spec['X_RPM_BUILD'] = '[ ! -e "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && mkdir "$RPM_BUILD_ROOT"'
if 'X_RPM_INSTALL' not in spec:
spec['X_RPM_INSTALL'] = 'scons --install-sandbox="$RPM_BUILD_ROOT" "$RPM_BUILD_ROOT"'
if 'X_RPM_CLEAN' not in spec:
spec['X_RPM_CLEAN'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"'
str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )
return str | Builds the sections of a rpm specfile. |
def _pdist(p):
index, ref, ampl, cutoff, beta = p[:5]
if cutoff == 0.0:
pdist = models.PowerLaw(
ampl * 1e30 * u.Unit("1/eV"), ref * u.TeV, index
)
else:
pdist = models.ExponentialCutoffPowerLaw(
ampl * 1e30 * u.Unit("1/eV"),
ref * u.TeV,
index,
cutoff * u.TeV,
beta=beta,
)
return pdist | Return PL or ECPL instance based on parameters p |
def connect_event_handlers(self):
self.figure.canvas.mpl_connect('close_event', self.evt_release)
self.figure.canvas.mpl_connect('pause_event', self.evt_toggle_pause) | Connects event handlers to the figure. |
def handle_profile_save(self, sender, instance, **kwargs):
self.handle_save(instance.user.__class__, instance.user) | Custom handler for user profile save |
def scipy_sparse_to_spmatrix(A):
coo = A.tocoo()
SP = spmatrix(coo.data.tolist(), coo.row.tolist(), coo.col.tolist(), size=A.shape)
return SP | Efficient conversion from scipy sparse matrix to cvxopt sparse matrix |
def _get_appoptics(options):
conn = appoptics_metrics.connect(
options.get('api_token'),
sanitizer=appoptics_metrics.sanitize_metric_name,
hostname=options.get('api_url'))
log.info("Connected to appoptics.")
return conn | Return an appoptics connection object. |
def _missing_imageinfo(self):
if 'image' not in self.data:
return
missing = []
for img in self.data['image']:
if 'url' not in img:
missing.append(img['file'])
return list(set(missing)) | returns list of image filenames that are missing info |
def strip_prompt_login(path):
uri = urlsplit(path)
query_params = parse_qs(uri.query)
prompt_list = query_params.get('prompt', '')[0].split()
if 'login' in prompt_list:
prompt_list.remove('login')
query_params['prompt'] = ' '.join(prompt_list)
if not query_params['prompt']:
del query_params['prompt']
uri = uri._replace(query=urlencode(query_params, doseq=True))
return urlunsplit(uri) | Strips 'login' from the 'prompt' query parameter. |
def generate_output_path(args, project_path):
milisec = datetime.now().microsecond
dirname = 'results_{}_{}'.format(time.strftime('%Y.%m.%d_%H.%M.%S', time.localtime()), str(milisec))
return os.path.join(project_path, 'results', dirname) | Generate default output directory |
def next(self, type=None):
i = self.index + 1
s = self.sentence
while i < len(s):
if type in (s[i].type, None):
return s[i]
i += 1 | Returns the next word in the sentence with the given type. |
def _check_import_source():
path_rel = '~/cltk_data/greek/software/greek_software_tlgu/tlgu.h'
path = os.path.expanduser(path_rel)
if not os.path.isfile(path):
try:
corpus_importer = CorpusImporter('greek')
corpus_importer.import_corpus('greek_software_tlgu')
except Exception as exc:
logger.error('Failed to import TLGU: %s', exc)
raise | Check if tlgu imported, if not import it. |
async def wait_for_election_success(cls):
if cls.leader is None:
cls.leader_future = asyncio.Future(loop=cls.loop)
await cls.leader_future | Await this function if your cluster must have a leader |
def _validate_required(self, attributes):
required_fulfilled = set(self._required).issubset(set(attributes))
if not required_fulfilled:
raise ValueError(
"Not all required attributes fulfilled. Required: {required}".format(required=set(self._required))
) | Ensure required attributes are present. |
def refresh(self):
if not self._client:
return
current_networks = self._client.networks()
self.clear()
self.update((net['Name'], net['Id'])
for net in current_networks) | Fetches all current network names from the client, along with their id. |
def clicked(self, event):
group = event.artist._mt_group
n = event.artist._mt_n
dt = num2date(event.artist._mt_bin)
print("%4i %s events in %s sec beginning at %s"
% (n, group, self.bucketsize, dt.strftime("%b %d %H:%M:%S"))) | Print group name and number of items in bin. |
def __parse_affiliations_yml(self, affiliations):
enrollments = []
for aff in affiliations:
name = self.__encode(aff['organization'])
if not name:
error = "Empty organization name"
msg = self.GRIMOIRELAB_INVALID_FORMAT % {'error': error}
raise InvalidFormatError(cause=msg)
elif name.lower() == 'unknown':
continue
org = Organization(name=name)
if org is None:
continue
if 'start' in aff:
start_date = self.__force_datetime(aff['start'])
else:
start_date = MIN_PERIOD_DATE
if 'end' in aff:
end_date = self.__force_datetime(aff['end'])
else:
end_date = MAX_PERIOD_DATE
enrollment = Enrollment(start=start_date, end=end_date,
organization=org)
enrollments.append(enrollment)
self.__validate_enrollment_periods(enrollments)
return enrollments | Parse identity's affiliations from a yaml dict. |
def _decorate_axes(ax, freq, kwargs):
if not hasattr(ax, '_plot_data'):
ax._plot_data = []
ax.freq = freq
xaxis = ax.get_xaxis()
xaxis.freq = freq
if not hasattr(ax, 'legendlabels'):
ax.legendlabels = [kwargs.get('label', None)]
else:
ax.legendlabels.append(kwargs.get('label', None))
ax.view_interval = None
ax.date_axis_info = None | Initialize axes for time-series plotting |
def _get_name_map(saltenv='base'):
u_name_map = {}
name_map = get_repo_data(saltenv).get('name_map', {})
if not six.PY2:
return name_map
for k in name_map:
u_name_map[k] = name_map[k]
return u_name_map | Return a reverse map of full pkg names to the names recognized by winrepo. |
def bundlestate_to_str(state):
states = {
pelix.Bundle.INSTALLED: "INSTALLED",
pelix.Bundle.ACTIVE: "ACTIVE",
pelix.Bundle.RESOLVED: "RESOLVED",
pelix.Bundle.STARTING: "STARTING",
pelix.Bundle.STOPPING: "STOPPING",
pelix.Bundle.UNINSTALLED: "UNINSTALLED",
}
return states.get(state, "Unknown state ({0})".format(state)) | Converts a bundle state integer to a string |
def layout(self, rect=None, width=0, height=0, fontsize=11):
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
val = _fitz.Document_layout(self, rect, width, height, fontsize)
self._reset_page_refs()
self.initData()
return val | Re-layout a reflowable document. |
def setter_generator(field_name):
def set_translation_field(cls, value, language_code=None):
setattr(cls.get_translation(language_code, True),
field_name, value)
set_translation_field.short_description = "set " + field_name
return set_translation_field | Generate set_'field name' method for field field_name. |
def convert_time(time):
split_time = time.split()
try:
am_pm = split_time[1].replace('.', '')
time_str = '{0} {1}'.format(split_time[0], am_pm)
except IndexError:
return time
try:
time_obj = datetime.strptime(time_str, '%I:%M %p')
except ValueError:
time_obj = datetime.strptime(time_str, '%I %p')
return time_obj.strftime('%H:%M %p') | Convert a time string into 24-hour time. |
def api_walk(uri, per_page=100, key="login"):
page = 1
result = []
while True:
response = get_json(uri + "?page=%d&per_page=%d" % (page, per_page))
if len(response) == 0:
break
else:
page += 1
for r in response:
if key == USER_LOGIN:
result.append(user_login(r))
else:
result.append(r[key])
return list(set(result)) | For a GitHub URI, walk all the pages until there's no more content |
def add_checkpoint_file(self, filename):
if filename not in self.__checkpoint_files:
self.__checkpoint_files.append(filename) | Add filename as a checkpoint file for this DAG job. |
def complete(text, state):
global completion_results
if state == 0:
line = readline.get_line_buffer()
if line.startswith(':'):
completion_results = complete_control_command(line, text)
else:
if line.startswith('!') and text and line.startswith(text):
dropped_exclam = True
text = text[1:]
else:
dropped_exclam = False
completion_results = []
completion_results += complete_local_path(text)
l = len(text)
completion_results += [w + ' ' for w in history_words if
len(w) > l and w.startswith(text)]
if readline.get_begidx() == 0:
completion_results += [w + ' ' for w in user_commands_in_path
if len(w) > l and w.startswith(text)]
completion_results = remove_dupes(completion_results)
if dropped_exclam:
completion_results = ['!' + r for r in completion_results]
if state < len(completion_results):
return completion_results[state]
completion_results = None
return None | On tab press, return the next possible completion |
def handle_input(self, proxy, event_type, event, refcon):
self.update_timeval()
self.events = []
if event_type in (1, 2, 3, 4, 25, 26, 27):
self.handle_button(event, event_type)
if event_type == 22:
self.handle_scrollwheel(event)
self.handle_absolute(event)
self.handle_relative(event)
self.events.append(self.sync_marker(self.timeval))
self.write_to_pipe(self.events) | Handle an input event. |
def identify_pycbc_live(origin, filepath, fileobj, *args, **kwargs):
if identify_hdf5(origin, filepath, fileobj, *args, **kwargs) and (
filepath is not None and PYCBC_FILENAME.match(basename(filepath))):
return True
return False | Identify a PyCBC Live file as an HDF5 with the correct name |
def _parse_regr_response(self, response, uri=None, new_authzr_uri=None,
terms_of_service=None):
links = _parse_header_links(response)
if u'terms-of-service' in links:
terms_of_service = links[u'terms-of-service'][u'url']
if u'next' in links:
new_authzr_uri = links[u'next'][u'url']
if new_authzr_uri is None:
raise errors.ClientError('"next" link missing')
return (
response.json()
.addCallback(
lambda body:
messages.RegistrationResource(
body=messages.Registration.from_json(body),
uri=self._maybe_location(response, uri=uri),
new_authzr_uri=new_authzr_uri,
terms_of_service=terms_of_service))
) | Parse a registration response from the server. |
def string_to_sign(self):
return (AWS4_HMAC_SHA256 + "\n" +
self.request_timestamp + "\n" +
self.credential_scope + "\n" +
sha256(self.canonical_request.encode("utf-8")).hexdigest()) | The AWS SigV4 string being signed. |
def _clean_cleaned_data(self):
reference_node_id = 0
if '_ref_node_id' in self.cleaned_data:
reference_node_id = self.cleaned_data['_ref_node_id']
del self.cleaned_data['_ref_node_id']
position_type = self.cleaned_data['_position']
del self.cleaned_data['_position']
return position_type, reference_node_id | delete auxilary fields not belonging to node model |
def idf2txt(txt):
astr = nocomment(txt)
objs = astr.split(';')
objs = [obj.split(',') for obj in objs]
objs = [[line.strip() for line in obj] for obj in objs]
objs = [[_tofloat(line) for line in obj] for obj in objs]
objs = [tuple(obj) for obj in objs]
objs.sort()
lst = []
for obj in objs:
for field in obj[:-1]:
lst.append('%s,' % (field, ))
lst.append('%s;\n' % (obj[-1], ))
return '\n'.join(lst) | convert the idf text to a simple text |
def pre_operations(self, mode=None):
version_mode = self._get_version_mode(mode=mode)
return version_mode.pre_operations | Return pre-operations only for the mode asked |
def arg_int(name, default=None):
try:
v = request.args.get(name)
return int(v)
except (ValueError, TypeError):
return default | Fetch a query argument, as an integer. |
def kill_clients():
clients = request.form.get('clients').split(',')
kill_dead = request.args.get('kill_dead', default=False)
kill_dead = kill_dead and kill_dead in ['true', '1']
if not kill_dead and not clients:
return jsonify({'Error': 'no clients provided'})
for client in list(drivers.keys()):
if kill_dead and not drivers[client].is_logged_in() or client in clients:
drivers.pop(client).quit()
try:
timers[client].stop()
timers[client] = None
release_semaphore(client)
semaphores[client] = None
except:
pass
return get_active_clients() | Force kill driver and other objects for a perticular clien |
def generate_additional_context(self, matching_datasets):
top_tags = Tag.objects.filter(
dataset__in=matching_datasets
).annotate(
tag_count=Count('word')
).order_by('-tag_count')[:3]
return {
'top_tags': top_tags
} | Return top tags for a source. |
def _request_toc_element(self, index):
logger.debug('Requesting index %d on port %d', index, self.port)
pk = CRTPPacket()
if self._useV2:
pk.set_header(self.port, TOC_CHANNEL)
pk.data = (CMD_TOC_ITEM_V2, index & 0x0ff, (index >> 8) & 0x0ff)
self.cf.send_packet(pk, expected_reply=(
CMD_TOC_ITEM_V2, index & 0x0ff, (index >> 8) & 0x0ff))
else:
pk.set_header(self.port, TOC_CHANNEL)
pk.data = (CMD_TOC_ELEMENT, index)
self.cf.send_packet(pk, expected_reply=(CMD_TOC_ELEMENT, index)) | Request information about a specific item in the TOC |
def sys_mem_limit(self):
if platform.machine() in ['armv7l']:
_mem_limit = self.human_to_bytes('2700M')
else:
_mem_limit = self.human_to_bytes('4G')
return _mem_limit | Determine the default memory limit for the current service unit. |
def close(self, filehandle):
with self.lock:
if filehandle in self.files:
self.files[filehandle] -= 1
index = 0
size = len(self.past)
while size > self.size and index < size:
filehandle = self.past[index]
if self.files[filehandle] == 0:
filehandle.close()
del self.files[filehandle]
del self.past[index]
size -= 1
else:
index += 1 | Close openend file if no longer used. |
def _swaplch(LCH):
"Reverse the order of an LCH numpy dstack or tuple for analysis."
try:
L,C,H = np.dsplit(LCH,3)
return np.dstack((H,C,L))
except:
L,C,H = LCH
return H,C,L | Reverse the order of an LCH numpy dstack or tuple for analysis. |
def _pull_content_revision_parent(self):
if self._revision_id is None:
query_params = {
"prop": "extracts|revisions",
"explaintext": "",
"rvprop": "ids",
}
query_params.update(self.__title_query_param())
request = self.mediawiki.wiki_request(query_params)
page_info = request["query"]["pages"][self.pageid]
self._content = page_info["extract"]
self._revision_id = page_info["revisions"][0]["revid"]
self._parent_id = page_info["revisions"][0]["parentid"]
return self._content, self._revision_id, self._parent_id | combine the pulling of these three properties |
def load_data(self, filename, *args, **kwargs):
data = super(ParameterizedXLS, self).load_data(filename)
parameter_name = self.parameterization['parameter']['name']
parameter_values = self.parameterization['parameter']['values']
parameter_units = str(self.parameterization['parameter']['units'])
data[parameter_name] = parameter_values * UREG(parameter_units)
num_sheets = len(self.parameterization['parameter']['sheets'])
for key in self.parameterization['data']:
units = str(self.parameterization['data'][key].get('units')) or ''
datalist = []
for n in xrange(num_sheets):
k = key + '_' + str(n)
datalist.append(data[k].reshape((1, -1)))
data.pop(k)
data[key] = np.concatenate(datalist, axis=0) * UREG(units)
return data | Load parameterized data from different sheets. |
def getContactByUsername(cls, username):
pc = api.portal.get_tool("portal_catalog")
contacts = pc(portal_type=cls.portal_type,
getUsername=username)
if len(contacts) == 0:
return None
if len(contacts) > 1:
logger.error("User '{}' is bound to multiple Contacts '{}'".format(
username, ",".join(map(lambda c: c.Title, contacts))))
return map(lambda x: x.getObject(), contacts)
return contacts[0].getObject() | Convenience Classmethod which returns a Contact by a Username |
def _get_entities(self, user, ids):
queryset = get_objects_for_user(user, 'view_entity', Entity.objects.filter(id__in=ids))
actual_ids = queryset.values_list('id', flat=True)
missing_ids = list(set(ids) - set(actual_ids))
if missing_ids:
raise exceptions.ParseError(
"Entities with the following ids not found: {}" .format(', '.join(map(str, missing_ids)))
)
return queryset | Return entities queryset based on provided entity ids. |
def clone(src, **kwargs):
obj = object.__new__(type(src))
obj.__dict__.update(src.__dict__)
obj.__dict__.update(kwargs)
return obj | Clones object with optionally overridden fields |
def _is_allowed_command(self, command):
cmds = self._meta_data['allowed_commands']
if command not in self._meta_data['allowed_commands']:
error_message = "The command value {0} does not exist. " \
"Valid commands are {1}".format(command, cmds)
raise InvalidCommand(error_message) | Checking if the given command is allowed on a given endpoint. |
def add_new_devices_callback(self, callback):
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback) | Register as callback for when new devices are added. |
def create_router(self, context, router):
new_router = super(AristaL3ServicePlugin, self).create_router(
context,
router)
try:
self.driver.create_router(context, new_router)
return new_router
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error creating router on Arista HW router=%s "),
new_router)
super(AristaL3ServicePlugin, self).delete_router(
context,
new_router['id']
) | Create a new router entry in DB, and create it Arista HW. |
def _update_config_file(username, password, email, url, config_path):
try:
config = json.load(open(config_path, "r"))
except ValueError:
config = dict()
if not config.get('auths'):
config['auths'] = dict()
if not config['auths'].get(url):
config['auths'][url] = dict()
encoded_credentials = dict(
auth=base64.b64encode(username + b':' + password),
email=email
)
config['auths'][url] = encoded_credentials
try:
json.dump(config, open(config_path, "w"), indent=5, sort_keys=True)
except Exception as exc:
raise exceptions.AnsibleContainerConductorException(
u"Failed to write registry config to {0} - {1}".format(config_path, exc)
) | Update the config file with the authorization. |
def on_menu_clear_interpretation(self, event):
for sp in list(self.Data.keys()):
del self.Data[sp]['pars']
self.Data[sp]['pars'] = {}
self.Data[sp]['pars']['lab_dc_field'] = self.Data[sp]['lab_dc_field']
self.Data[sp]['pars']['er_specimen_name'] = self.Data[sp]['er_specimen_name']
self.Data[sp]['pars']['er_sample_name'] = self.Data[sp]['er_sample_name']
self.Data_samples = {}
self.Data_sites = {}
self.tmin_box.SetValue("")
self.tmax_box.SetValue("")
self.clear_boxes()
self.draw_figure(self.s) | clear all current interpretations. |
def _job_to_text(self, job):
next_run = self._format_date(job.get('next_run', None))
tasks = ''
for task in job.get('tasks', []):
tasks += self._task_to_text(task)
tasks += '\n\n'
return '\n'.join(['Job name: %s' % job.get('name', None),
'Cron schedule: %s' % job.get('cron_schedule', None),
'Next run: %s' % next_run,
'',
'Parent ID: %s' % job.get('parent_id', None),
'Job ID: %s' % job.get('job_id', None),
'',
'Tasks Detail',
'',
tasks]) | Return a standard formatting of a Job serialization. |
def main():
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
add_debug(parser)
add_app(parser)
add_env(parser)
add_properties(parser)
args = parser.parse_args()
logging.getLogger(__package__.split(".")[0]).setLevel(args.debug)
log.debug('Parsed arguements: %s', args)
if "prod" not in args.env:
log.info('No slack message sent, not a production environment')
else:
log.info("Sending slack message, production environment")
slacknotify = SlackNotification(app=args.app, env=args.env, prop_path=args.properties)
slacknotify.post_message() | Send Slack notification to a configured channel. |
def disconnect(self):
logger.info("Disconnecting from Zookeeper.")
self.client.stop()
self.client.close() | Stops and closes the kazoo connection. |
def get(self):
return self.render(
'index.html',
databench_version=DATABENCH_VERSION,
meta_infos=self.meta_infos(),
**self.info
) | Render the List-of-Analyses overview page. |
def _Start_refresh_timer(self):
if self._refreshPeriod > 60:
interval = self._refreshPeriod - 60
else:
interval = 60
self._refreshTimer = Timer(self._refreshPeriod, self.Refresh)
self._refreshTimer.setDaemon(True)
self._refreshTimer.start() | Internal method to support auto-refresh functionality. |
def strtype(self):
if self.kind is not None:
return "{}({})".format(self.dtype, self.kind)
else:
return self.dtype | Returns a string representing the type and kind of this value element. |
def load_shared_data(path: typing.Union[str, None]) -> dict:
if path is None:
return dict()
if not os.path.exists(path):
raise FileNotFoundError('No such shared data file "{}"'.format(path))
try:
with open(path, 'r') as fp:
data = json.load(fp)
except Exception:
raise IOError('Unable to read shared data file "{}"'.format(path))
if not isinstance(data, dict):
raise ValueError('Shared data must load into a dictionary object')
return data | Load shared data from a JSON file stored on disk |
def allowed_values(self):
if self._allowed_values is None:
self._allowed_values = ValueList()
for val in self.scraper._fetch_allowed_values(self):
if isinstance(val, DimensionValue):
self._allowed_values.append(val)
else:
self._allowed_values.append(DimensionValue(val,
Dimension()))
return self._allowed_values | Return a list of allowed values. |
def _shutdown(self):
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError("Finish may be called only once!")
if self.logstream is not None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if not self.config_h is None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs | Private method. Reset to non-piped spawn |
def my_archieve(self):
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.MY_ARCHIEVE) | Return the my archieve attribute of the BFD file being processed. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.