code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def box_text(text, width, offset=0):
box = " " * offset + "-" * (width+2) + "\n"
box += " " * offset + "|" + text.center(width) + "|" + "\n"
box += " " * offset + "-" * (width+2)
return box | Return text inside an ascii textbox |
def match_comment(self):
match = self.match(r"<%doc>(.*?)</%doc>", re.S)
if match:
self.append_node(parsetree.Comment, match.group(1))
return True
else:
return False | matches the multiline version of a comment |
def _change_kind(self, post_uid):
post_data = self.get_post_data()
logger.info('admin post update: {0}'.format(post_data))
MPost.update_misc(post_uid, kind=post_data['kcat'])
update_category(post_uid, post_data)
self.redirect('/{0}/{1}'.format(router_post[post_data['kcat']], post_uid)) | To modify the category of the post, and kind. |
def acknowledge_streamer(self, index, ack, force):
if index >= len(self.graph.streamers):
return _pack_sgerror(SensorGraphError.STREAMER_NOT_ALLOCATED)
old_ack = self.streamer_acks.get(index, 0)
if ack != 0:
if ack <= old_ack and not force:
return _pack_sgerror(SensorGraphError.OLD_ACKNOWLEDGE_UPDATE)
self.streamer_acks[index] = ack
current_ack = self.streamer_acks.get(index, 0)
return self._seek_streamer(index, current_ack) | Acknowledge a streamer value as received from the remote side. |
def merge_nested_environment_dicts(env_dicts, env_name=None, env_root=None):
if all(isinstance(val, (six.string_types, list))
for (_key, val) in env_dicts.items()):
return flatten_path_lists(env_dicts, env_root)
if env_name is None:
if env_dicts.get('*'):
return flatten_path_lists(env_dicts.get('*'), env_root)
raise AttributeError("Provided config key:val pairs %s aren't usable with no environment provided" % env_dicts)
if not env_dicts.get('*') and not env_dicts.get(env_name):
raise AttributeError("Provided config key:val pairs %s aren't usable with environment %s" % (env_dicts, env_name))
combined_dicts = merge_dicts(env_dicts.get('*', {}),
env_dicts.get(env_name, {}))
return flatten_path_lists(combined_dicts, env_root) | Return single-level dictionary from dictionary of dictionaries. |
def match(tgt, opts=None):
if not opts:
opts = __opts__
if HAS_RANGE:
range_ = seco.range.Range(opts['range_server'])
try:
return opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: %s', exc)
return False
return False | Matches based on range cluster |
def register_foreign_device(self, addr, ttl):
if _debug: BIPBBMD._debug("register_foreign_device %r %r", addr, ttl)
if isinstance(addr, Address):
pass
elif isinstance(addr, str):
addr = Address(addr)
else:
raise TypeError("addr must be a string or an Address")
for fdte in self.bbmdFDT:
if addr == fdte.fdAddress:
break
else:
fdte = FDTEntry()
fdte.fdAddress = addr
self.bbmdFDT.append( fdte )
fdte.fdTTL = ttl
fdte.fdRemain = ttl + 5
return 0 | Add a foreign device to the FDT. |
def _write(self, stream, text, byte_order):
if text:
self._write_txt(stream)
else:
if self._have_list:
self._write_bin(stream, byte_order)
else:
stream.write(self.data.astype(self.dtype(byte_order),
copy=False).data) | Write the data to a PLY file. |
def open(self, filename, mode='r', bufsize=-1):
sftp_client = self.open_sftp()
return sftp_client.open(filename, mode, bufsize) | Open a file on the remote system and return a file-like object. |
def setup_buttons(self):
self.fromcursor_btn = create_toolbutton(
self, icon=ima.icon('fromcursor'), tip=_('Go to cursor position'),
triggered=self.treewidget.go_to_cursor_position)
buttons = [self.fromcursor_btn]
for action in [self.treewidget.collapse_all_action,
self.treewidget.expand_all_action,
self.treewidget.restore_action,
self.treewidget.collapse_selection_action,
self.treewidget.expand_selection_action]:
buttons.append(create_toolbutton(self))
buttons[-1].setDefaultAction(action)
return buttons | Setup the buttons of the outline explorer widget toolbar. |
def to_dict(self):
return {'id': self.m_key + "_" + str(self.id_knwKB),
'key': self.m_key,
'value': self.m_value,
'kbid': self.kb.id if self.kb else None,
'kbname': self.kb.name if self.kb else None} | Return a dict representation of KnwKBRVAL. |
def table_path(cls, project, instance, table):
return google.api_core.path_template.expand(
"projects/{project}/instances/{instance}/tables/{table}",
project=project,
instance=instance,
table=table,
) | Return a fully-qualified table string. |
def to_plot_units(self, data, dtype_vert=False):
if dtype_vert == 'vert_av' or not dtype_vert:
conv_factor = self.units.plot_units_conv
elif dtype_vert == ('vert_int'):
conv_factor = self.units.vert_int_plot_units_conv
else:
raise ValueError("dtype_vert value `{0}` not recognized. Only "
"bool(dtype_vert) = False, 'vert_av', and "
"'vert_int' supported.".format(dtype_vert))
if isinstance(data, dict):
return {key: val*conv_factor for key, val in data.items()}
return data*conv_factor | Convert the given data to plotting units. |
def success(self, objects, message=None):
if self.is_context_only(objects):
return self.redirect(message=_("Changes saved."))
ids = map(api.get_id, objects)
if not message:
message = _("Saved items: {}").format(", ".join(ids))
return self.redirect(message=message) | Redirects the user to success page with informative message |
def _load_meta_cache(self):
try:
if self._should_invalidate_cache():
os.remove(self._cache_filename)
else:
with open(self._cache_filename, 'rb') as f:
self._document_meta = compat.pickle.load(f)
except (OSError, IOError, compat.pickle.PickleError,
ImportError, AttributeError):
pass | Try to load metadata from file. |
def story_archive(request, slug, template_name='staffmembers/story_archive.html'):
member = get_object_or_404(StaffMember, slug__iexact=slug, is_active=True)
stories = []
if hasattr(member, 'story_set'):
from story.settings import PUBLISHED_STATUS
stories = member.story_set.filter(publish_date__lte=datetime.now()
).filter(status__exact=PUBLISHED_STATUS
).order_by('-publish_date')
return render_to_response(template_name,
{'member': member,
'stories': stories},
context_instance=RequestContext(request)) | Return the list of stories written by this staff member |
def accept_quality(accept, default=1):
quality = default
if accept and ";" in accept:
accept, rest = accept.split(";", 1)
accept_quality = RE_ACCEPT_QUALITY.search(rest)
if accept_quality:
quality = float(accept_quality.groupdict().get('quality', quality).strip())
return (quality, accept.strip()) | Separates out the quality score from the accepted content_type |
def directory_files(path):
for entry in os.scandir(path):
if not entry.name.startswith('.') and entry.is_file():
yield entry.name | Yield directory file names. |
def detect_vcs():
location = os.path.abspath('.')
while True:
for vcs in Git, Mercurial, Bazaar, Subversion:
if vcs.detect(location):
return vcs
parent = os.path.dirname(location)
if parent == location:
raise Failure("Couldn't find version control data"
" (git/hg/bzr/svn supported)")
location = parent | Detect the version control system used for the current directory. |
def local_bifurcation_angles(neurites, neurite_type=NeuriteType.all):
return map_sections(_bifurcationfunc.local_bifurcation_angle,
neurites,
neurite_type=neurite_type,
iterator_type=Tree.ibifurcation_point) | Get a list of local bifurcation angles in a collection of neurites |
def validate(self, instance, value):
if isinstance(value, datetime.datetime):
return value
if not isinstance(value, string_types):
self.error(
instance=instance,
value=value,
extra='Cannot convert non-strings to datetime.',
)
try:
return self.from_json(value)
except ValueError:
self.error(
instance=instance,
value=value,
extra='Invalid format for converting to datetime.',
) | Check if value is a valid datetime object or JSON datetime string |
def _SendTerminationMessage(self, status=None):
if not self.runner_args.request_state.session_id:
return
if status is None:
status = rdf_flows.GrrStatus()
client_resources = self.context.client_resources
user_cpu = client_resources.cpu_usage.user_cpu_time
sys_cpu = client_resources.cpu_usage.system_cpu_time
status.cpu_time_used.user_cpu_time = user_cpu
status.cpu_time_used.system_cpu_time = sys_cpu
status.network_bytes_sent = self.context.network_bytes_sent
status.child_session_id = self.session_id
request_state = self.runner_args.request_state
request_state.response_count += 1
msg = rdf_flows.GrrMessage(
session_id=request_state.session_id,
request_id=request_state.id,
response_id=request_state.response_count,
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
type=rdf_flows.GrrMessage.Type.STATUS,
payload=status)
self.queue_manager.QueueResponse(msg)
self.QueueNotification(session_id=request_state.session_id) | This notifies the parent flow of our termination. |
def read_kioslaverc (kde_config_dir):
data = {}
filename = os.path.join(kde_config_dir, "kioslaverc")
with open(filename) as fd:
for line in fd:
line = line.rstrip()
if line.startswith('['):
in_proxy_settings = line.startswith("[Proxy Settings]")
elif in_proxy_settings:
if '=' not in line:
continue
key, value = line.split('=', 1)
key = key.strip()
value = value.strip()
if not key:
continue
key = loc_ro.sub("", key).strip()
if not key:
continue
add_kde_setting(key, value, data)
resolve_kde_settings(data)
return data | Read kioslaverc into data dictionary. |
def render_to_response(self, form, **kwargs):
page = self.get_page(form)
data = {
'iTotalRecords': page.paginator.count,
'iTotalDisplayRecords': page.paginator.count,
'sEcho': form.cleaned_data['sEcho'],
'aaData': self.get_rows(page.object_list),
}
return self.json_response(data) | Render Datatables expected JSON format |
def add_network_to_dhcp_agent(self, dhcp_agent, body=None):
return self.post((self.agent_path + self.DHCP_NETS) % dhcp_agent,
body=body) | Adds a network to dhcp agent. |
def mean_crossings(X):
X = np.atleast_3d(X)
N = X.shape[0]
D = X.shape[2]
mnx = np.zeros((N, D))
for i in range(D):
pos = X[:, :, i] > 0
npos = ~pos
c = (pos[:, :-1] & npos[:, 1:]) | (npos[:, :-1] & pos[:, 1:])
mnx[:, i] = np.count_nonzero(c, axis=1)
return mnx | Computes number of mean crossings for each variable in a segmented time series |
def can_execute(self):
return not self._disabled and all(dep.status == dep.node.S_OK for dep in self.deps) | True if we can execute the callback. |
def path(self):
out = self.datetime.strftime("%Y-%m-%d")
out += " "
ssm = (
self.datetime - self.datetime.replace(hour=0, minute=0, second=0, microsecond=0)
).total_seconds()
out += str(int(ssm)).zfill(5)
return out | Timestamp for placing into filepaths. |
def int32(x):
if x>0xFFFFFFFF:
raise OverflowError
if x>0x7FFFFFFF:
x=int(0x100000000-x)
if x<2147483648:
return -x
else:
return -2147483648
return x | Return a signed or unsigned int |
def clean_slug(self):
source = self.cleaned_data.get('slug', '')
lang_choice = self.language_code
if not source:
source = slugify(self.cleaned_data.get('title', ''))
qs = Post._default_manager.active_translations(lang_choice).language(lang_choice)
used = list(qs.values_list('translations__slug', flat=True))
slug = source
i = 1
while slug in used:
slug = '%s-%s' % (source, i)
i += 1
return slug | Generate a valid slug, in case the given one is taken |
def split_vcf(in_file, ref_file, config, out_dir=None):
if out_dir is None:
out_dir = os.path.join(os.path.dirname(in_file), "split")
out_files = []
with open(ref.fasta_idx(ref_file, config)) as in_handle:
for line in in_handle:
chrom, size = line.split()[:2]
out_file = os.path.join(out_dir,
os.path.basename(replace_suffix(append_stem(in_file, "-%s" % chrom), ".vcf")))
subset_vcf(in_file, (chrom, 0, size), out_file, config)
out_files.append(out_file)
return out_files | Split a VCF file into separate files by chromosome. |
def next_tzolkin_haab(tzolkin, haab, jd):
haabcount = _haab_count(*to_haab(jd))
haab_desired_count = _haab_count(*haab)
haab_days = (haab_desired_count - haabcount) % 365
possible_haab = set(h + haab_days for h in range(0, 18980, 365))
tzcount = _tzolkin_count(*to_tzolkin(jd))
tz_desired_count = _tzolkin_count(*tzolkin)
tzolkin_days = (tz_desired_count - tzcount) % 260
possible_tz = set(t + tzolkin_days for t in range(0, 18980, 260))
try:
return possible_tz.intersection(possible_haab).pop() + jd
except KeyError:
raise IndexError("That Haab'-Tzolk'in combination isn't possible") | For a given haab-tzolk'in combination, and a Julian day count, find the next occurrance of the combination after the date |
def calcTm(seq, mv_conc=50, dv_conc=0, dntp_conc=0.8, dna_conc=50,
max_nn_length=60, tm_method='santalucia',
salt_corrections_method='santalucia'):
tm_meth = _tm_methods.get(tm_method)
if tm_meth is None:
raise ValueError('{} is not a valid tm calculation method'.format(
tm_method))
salt_meth = _salt_corrections_methods.get(salt_corrections_method)
if salt_meth is None:
raise ValueError('{} is not a valid salt correction method'.format(
salt_corrections_method))
args = [pjoin(PRIMER3_HOME, 'oligotm'),
'-mv', str(mv_conc),
'-dv', str(dv_conc),
'-n', str(dntp_conc),
'-d', str(dna_conc),
'-tp', str(tm_meth),
'-sc', str(salt_meth),
seq]
tm = subprocess.check_output(args, stderr=DEV_NULL,
env=os.environ)
return float(tm) | Return the tm of `seq` as a float. |
def authorize(self):
with salt.utils.files.fopen(self.path, 'w+') as fp_:
fp_.write(str(int(time.time())))
return True | Prepare the master to expect a signing request |
def prompt_terminal(path):
def rlinput(prompt_msg, prefill=''):
import readline
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt_msg)
finally:
readline.set_startup_hook()
filepath, extension = os.path.splitext(path)
basename = os.path.basename(filepath)
dirname = os.path.dirname(filepath)
new_basename = rlinput('Filename: ', basename)
while os.path.exists(os.path.join(dirname, new_basename + extension)) and \
new_basename != basename:
new_basename = rlinput('Sorry, please try again... Filename: ',
basename)
if new_basename == '':
new_basename = basename
return os.path.join(dirname, new_basename + extension) | Prompt for a new filename via terminal. |
def _int_size_to_type(size):
if size <= 8:
return ByteType
if size <= 16:
return ShortType
if size <= 32:
return IntegerType
if size <= 64:
return LongType | Return the Catalyst datatype from the size of integers. |
def create_workspace(self) -> None:
def create_clicked(text):
if text:
command = Workspace.CreateWorkspaceCommand(self, text)
command.perform()
self.document_controller.push_undo_command(command)
self.pose_get_string_message_box(caption=_("Enter a name for the workspace"), text=_("Workspace"),
accepted_fn=create_clicked, accepted_text=_("Create"),
message_box_id="create_workspace") | Pose a dialog to name and create a workspace. |
def print_result_from_timeit(stmt='pass', setup='pass', number=1000000):
units = ["s", "ms", "us", "ns"]
duration = timeit(stmt, setup, number=int(number))
avg_duration = duration / float(number)
thousands = int(math.floor(math.log(avg_duration, 1000)))
print("Total time: %fs. Average run: %.3f%s." % (
duration, avg_duration * (1000 ** -thousands), units[-thousands])) | Clean function to know how much time took the execution of one statement |
def setup_logfile_raw(self, logfile, mode='w'):
self.logfile_raw = open(logfile, mode=mode) | start logging raw bytes to the given logfile, without timestamps |
def salt_api():
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.api
sapi = salt.cli.api.SaltAPI()
sapi.start() | The main function for salt-api |
def params(self):
if self._GETPOST is None:
self._GETPOST = MultiDict(self.GET)
self._GETPOST.update(dict(self.POST))
return self._GETPOST | A combined MultiDict with POST and GET parameters. |
def conflicts_with(self, section):
"Returns True if the given section conflicts with this time range."
for p in section.periods:
t = (p.int_days, p.start, p.end)
if t in self:
return True
return False | Returns True if the given section conflicts with this time range. |
def _get_tgt_length(self, var):
if var.type == "g" or var.type == "m":
return float("inf")
else:
identity_info = self.hdp.get_tx_identity_info(var.ac)
if not identity_info:
raise HGVSDataNotAvailableError(
"No identity info available for {ac}".format(ac=var.ac))
tgt_len = sum(identity_info["lengths"])
return tgt_len | Get the total length of the whole reference sequence |
def _modify_event(self, event_name, method, func):
if event_name not in self.ALL_EVENTS:
raise TypeError(('event_name ("%s") can only be one of the '
'following: %s') % (event_name,
repr(self.ALL_EVENTS)))
if not isinstance(func, collections.Callable):
raise TypeError(('func must be callable to be added as an '
'observer.'))
getattr(self._events[event_name], method)(func) | Wrapper to call a list's method from one of the events |
def find_identifier(self):
features = self.data['features']
n = len(features)
feature = features[0]
if 'id' in feature and len(set(feat['id'] for feat in features)) == n:
return 'feature.id'
for key in feature.get('properties', []):
if len(set(feat['properties'][key] for feat in features)) == n:
return 'feature.properties.{}'.format(key)
if self.embed:
for i, feature in enumerate(self.data['features']):
feature['id'] = str(i)
return 'feature.id'
raise ValueError(
'There is no unique identifier for each feature and because '
'`embed=False` it cannot be added. Consider adding an `id` '
'field to your geojson data or set `embed=True`. '
) | Find a unique identifier for each feature, create it if needed. |
def write_csv_line(mol, csv_writer, options):
status_field = options.status_field
line = []
id = mol.GetProp('id')
if id is not None:
line.append(id)
else:
line.append('n/a')
line.append(mol.GetProp(status_field))
queryList = mol.properties.keys()
for queryLabel in queryList:
line.append(mol.properties[queryLabel])
csv_writer.writerow(line) | Parse mol object and write a line to the csv file |
def on_scroll(self):
try:
sw = self.view['sw_scroller']
except KeyError:
return False
vadj = sw.get_vadjustment()
if vadj is None: return False
val = vadj.get_value()
if val >= vadj.upper - vadj.page_size:
self.view.show_vscrollbar()
return False
vadj.set_value(val+0.5)
return True | Called to scroll text |
def until_state(self, state, timeout=None):
if state not in self._valid_states:
raise ValueError('State must be one of {0}, not {1}'
.format(self._valid_states, state))
if state != self._state:
if timeout:
return with_timeout(self._ioloop.time() + timeout,
self._waiting_futures[state],
self._ioloop)
else:
return self._waiting_futures[state]
else:
f = tornado_Future()
f.set_result(True)
return f | Return a tornado Future that will resolve when the requested state is set |
def enqueue_jobs(self):
self.log.debug('Checking for scheduled jobs')
jobs = self.get_jobs_to_queue()
for job in jobs:
self.enqueue_job(job)
self.connection.expire(self.scheduler_key, int(self._interval) + 10)
return jobs | Move scheduled jobs into queues. |
def validate_model_specification_file(file_path: str) -> str:
if not os.path.isfile(file_path):
raise ConfigurationError('If you provide a model specification file, it must be a file. '
f'You provided {file_path}')
extension = file_path.split('.')[-1]
if extension not in ['yaml', 'yml']:
raise ConfigurationError(f'Model specification files must be in a yaml format. You provided {extension}')
yaml.full_load(file_path)
return file_path | Ensures the provided file is a yaml file |
def write_data_to_file(data, filepath):
try:
os.makedirs(os.path.dirname(filepath), 0o700)
except OSError:
pass
write_to_disk(filepath, content=data) | Write data to file |
def iter_ROOT_classes():
class_index = "http://root.cern.ch/root/html/ClassIndex.html"
for s in minidom.parse(urlopen(class_index)).getElementsByTagName("span"):
if ("class", "typename") in s.attributes.items():
class_name = s.childNodes[0].nodeValue
try:
yield getattr(QROOT, class_name)
except AttributeError:
pass | Iterator over all available ROOT classes |
def DropTables(self):
rows, _ = self.ExecuteQuery(
"SELECT table_name FROM information_schema.tables "
"WHERE table_schema='%s'" % self.database_name)
for row in rows:
self.ExecuteQuery("DROP TABLE `%s`" % row["table_name"]) | Drop all existing tables. |
def delta_stream(self, target='HEAD', limit=None,
sort=pygit2.GIT_SORT_TIME | pygit2.GIT_SORT_REVERSE,
after=None, before=None):
if target == 'HEAD':
target = self.repo.head.target
commits = []
for commit in self.repo.walk(target, sort):
cdate = commit_date(commit)
log.debug(
"processing commit id:%s date:%s parents:%d msg:%s",
str(commit.id)[:6], cdate.isoformat(),
len(commit.parents), commit.message)
if after and cdate > after:
continue
if before and cdate < before:
continue
commits.append(commit)
if limit and len(commits) > limit:
break
if limit:
self.initialize_tree(commits[limit].tree)
commits.pop(-1)
for commit in commits:
for policy_change in self._process_stream_commit(commit):
yield policy_change | Return an iterator of policy changes along a commit lineage in a repo. |
def file_matches_regexps(filename, patterns):
return any(re.match(pat, filename) for pat in patterns) | Does this filename match any of the regular expressions? |
def version():
namespace = {}
with open(os.path.join('mrcfile', 'version.py')) as f:
exec(f.read(), namespace)
return namespace['__version__'] | Get the version number without importing the mrcfile package. |
def add(self, client_id, email_address, name, access_level, password):
body = {
"EmailAddress": email_address,
"Name": name,
"AccessLevel": access_level,
"Password": password}
response = self._post("/clients/%s/people.json" %
client_id, json.dumps(body))
return json_to_py(response) | Adds a person to a client. Password is optional and if not supplied, an invitation will be emailed to the person |
def write_nginx_config(self):
log = logging.getLogger('ipsv.models.sites.site')
if not os.path.exists(self.root):
log.debug('Creating HTTP root directory: %s', self.root)
os.makedirs(self.root, 0o755)
server_block = ServerBlock(self)
server_config_path = os.path.join(_cfg.get('Paths', 'NginxSitesAvailable'), self.domain.name)
if not os.path.exists(server_config_path):
log.debug('Creating new configuration path: %s', server_config_path)
os.makedirs(server_config_path, 0o755)
server_config_path = os.path.join(server_config_path, '{fn}.conf'.format(fn=self.slug))
if os.path.exists(server_config_path):
log.info('Server block configuration file already exists, overwriting: %s', server_config_path)
os.remove(server_config_path)
log.info('Writing Nginx server block configuration file')
with open(server_config_path, 'w') as f:
f.write(server_block.template) | Write the Nginx configuration file for this Site |
def validate(self):
for client in self.clients:
for key in REQUIRED_KEYS:
if key not in client:
raise MissingConfigValue(key)
if 'revision_file' not in client:
client.revision_file = DEFAULT_REVISION_FILEPATH.format(
client.key
) | Check the value of the config attributes. |
def find_data(path, warn=True):
full_path = os.path.join(get_data_dir(), path)
if warn and not os.path.isfile(full_path):
for package in PACKAGES:
if path == package.path:
log.warn('%s doesn\'t exist. Run `cde data download` to get it.' % path)
break
return full_path | Return the absolute path to a data file within the data directory. |
def clone(self, date=None, data=None, name=None):
name = name or self.name
data = data if data is not None else self.values()
ts = self.__class__(name)
ts._dtype = self._dtype
if date is None:
ts.make(self.keys(), data, raw=True)
else:
ts.make(date, data)
return ts | Create a clone of timeseries |
def eval(self, script, keys=[], args=[]):
return self.execute(b'EVAL', script, len(keys), *(keys + args)) | Execute a Lua script server side. |
async def connect(self, connection):
if self._user_connected:
self._log.info('User is already connected!')
return
self._connection = connection
await self._connect()
self._user_connected = True | Connects to the specified given connection using the given auth key. |
def do_metric_create_raw(mc, args):
try:
mc.metrics.create(**args.jsonbody)
except (osc_exc.ClientException, k_exc.HttpError) as he:
raise osc_exc.CommandError('%s\n%s' % (he.message, he.details))
else:
print('Successfully created metric') | Create metric from raw json body. |
def send_unsubscribe(self, dup, topics):
pkt = MqttPkt()
pktlen = 2 + sum([2+len(topic) for topic in topics])
pkt.command = NC.CMD_UNSUBSCRIBE | (dup << 3) | (1 << 1)
pkt.remaining_length = pktlen
ret = pkt.alloc()
if ret != NC.ERR_SUCCESS:
return ret
mid = self.mid_generate()
pkt.write_uint16(mid)
for topic in topics:
pkt.write_string(topic)
return self.packet_queue(pkt) | Send unsubscribe COMMAND to server. |
def run(self) -> None:
while not self.cleanup:
try:
result, test, additional_info = self.result_queue.get(timeout=1)
except queue.Empty:
continue
self.result_queue.task_done()
if result == TestState.serialization_failure:
test = self.tests[test]
warnings.warn("Serialization error: {} on test {}".format(
additional_info, test), SerializationWarning)
test(self)
else:
self.testsRun += 1
if result == TestState.success:
self.addSuccess(test)
elif result == TestState.failure:
self.addFailure(test, additional_info)
elif result == TestState.error:
self.addError(test, additional_info)
elif result == TestState.skipped:
self.addSkip(test, additional_info)
elif result == TestState.expected_failure:
self.addExpectedFailure(test, additional_info)
elif result == TestState.unexpected_success:
self.addUnexpectedSuccess(test)
else:
raise Exception("This is not a valid test type :", result) | processes entries in the queue until told to stop |
def run_track(track,
result_hosts=None,
crate_root=None,
output_fmt=None,
logfile_info=None,
logfile_result=None,
failfast=False,
sample_mode='reservoir'):
with Logger(output_fmt=output_fmt,
logfile_info=logfile_info,
logfile_result=logfile_result) as log:
executor = Executor(
track_dir=os.path.dirname(track),
log=log,
result_hosts=result_hosts,
crate_root=crate_root,
fail_fast=failfast,
sample_mode=sample_mode
)
error = executor.execute(toml.load(track))
if error:
sys.exit(1) | Execute a track file |
def restart(self, all=False):
if all:
data = {'type': self.type}
else:
data = {'ps': self.process}
r = self._h._http_resource(
method='POST',
resource=('apps', self.app.name, 'ps', 'restart'),
data=data
)
r.raise_for_status() | Restarts the given process. |
def set(self, option, value):
if self.config is None:
self.config = {}
self.config[option] = value | Sets an option to a value. |
def remove_group(self, group):
if not isinstance(group, Group):
raise TypeError("group must be Group")
if group not in self.groups:
raise ValueError("Group doesn't exist / is not bound to this database.")
num_entries = len(group.entries)
for i in xrange(num_entries):
self.remove_entry(group.entries[0])
num_children = len(group.children)
for i in xrange(num_children):
self.remove_group(group.children[0])
group.parent.children.remove(group)
self.groups.remove(group) | Remove the specified group. |
def _custom_icon(self, name, **kwargs):
options = dict(_default_options, **kwargs)
if name in self.painters:
painter = self.painters[name]
return self._icon_by_painter(painter, options)
else:
return QIcon() | Return the custom icon corresponding to the given name. |
def error(self, msg):
self._error = True
self._progress.printMsg('XML parse error: %s' % msg, error=True) | Callback run when a recoverable parsing error occurs |
def fmt_cell(self, value, width, cell_formating, **text_formating):
strptrn = " {:" + '{:s}{:d}'.format(cell_formating.get('align', '<'), width) + "s} "
strptrn = self.fmt_text(strptrn, **text_formating)
return strptrn.format(value) | Format sigle table cell. |
def _handle_template(self, token):
params = []
default = 1
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.TemplateParamSeparator):
if not params:
name = self._pop()
param = self._handle_parameter(default)
params.append(param)
if not param.showkey:
default += 1
elif isinstance(token, tokens.TemplateClose):
if not params:
name = self._pop()
return Template(name, params)
else:
self._write(self._handle_token(token))
raise ParserError("_handle_template() missed a close token") | Handle a case where a template is at the head of the tokens. |
def htmlize_list(items):
out = ["<ul>"]
for item in items:
out.append("<li>" + htmlize(item) + "</li>")
out.append("</ul>")
return "\n".join(out) | Turn a python list into an html list. |
def _dequantize(q, params):
if not params.quantize:
return q
return tf.to_float(tf.bitcast(q, tf.int16)) * params.quantization_scale | Dequantize q according to params. |
def create_with_virtualenv(self, interpreter, virtualenv_options):
args = ['virtualenv', '--python', interpreter, self.env_path]
args.extend(virtualenv_options)
if not self.pip_installed:
args.insert(3, '--no-pip')
try:
helpers.logged_exec(args)
self.env_bin_path = os.path.join(self.env_path, 'bin')
except FileNotFoundError as error:
logger.error('Virtualenv is not installed. It is needed to create a virtualenv with '
'a different python version than fades (got {})'.format(error))
raise FadesError('virtualenv not found')
except helpers.ExecutionError as error:
error.dump_to_log(logger)
raise FadesError('virtualenv could not be run')
except Exception as error:
logger.exception("Error creating virtualenv: %s", error)
raise FadesError('General error while running virtualenv') | Create a virtualenv using the virtualenv lib. |
def cyan(cls):
"Make the text foreground color cyan."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_CYAN
cls._set_text_attributes(wAttributes) | Make the text foreground color cyan. |
def restore_initial_state(self):
self.list.clear()
self.is_visible = False
widgets = self.widgets_by_path
if not self.edit.clicked_outside:
for path in self.initial_cursors:
cursor = self.initial_cursors[path]
if path in widgets:
self.set_editor_cursor(widgets[path], cursor)
if self.initial_widget in self.paths_by_widget:
index = self.paths.index(self.initial_path)
self.sig_goto_file.emit(index) | Restores initial cursors and initial active editor. |
def _stop_process(p, name):
if p.poll() is not None:
print("{} is already stopped.".format(name))
return
p.terminate()
time.sleep(0.1)
if p.poll() is not None:
print("{} is terminated.".format(name))
return
p.kill()
print("{} is killed.".format(name)) | Stop process, by applying terminate and kill. |
def render(self, view, context=None):
return self.runtime.render(self, view, context) | Render `view` with this block's runtime and the supplied `context` |
def _has_bcftools_germline_stats(data):
stats_file = tz.get_in(["summary", "qc"], data)
if isinstance(stats_file, dict):
stats_file = tz.get_in(["variants", "base"], stats_file)
if not stats_file:
stats_file = ""
return stats_file.find("bcftools_stats_germline") > 0 | Check for the presence of a germline stats file, CWL compatible. |
def delete(self, key):
validate_is_bytes(key)
self.root_hash = self._set(self.root_hash, encode_to_bin(key), b'') | Equals to setting the value to None |
def map(self, fn, *seq):
"Perform a map operation distributed among the workers. Will "
"block until done."
results = Queue()
args = zip(*seq)
for seq in args:
j = SimpleJob(results, fn, seq)
self.put(j)
r = []
for i in range(len(list(args))):
r.append(results.get())
return r | Perform a map operation distributed among the workers. Will |
def _processDDL(self):
sql_statements = self._generateDDL()
logging.info('Generating sqllite tables')
for stmt in sql_statements:
c = self.conn.cursor()
c.execute(stmt)
self.conn.commit() | Generate and process table SQL, SQLLite version |
def relevant_rules_for_match(self, action, subject):
matches = []
for rule in self.rules:
rule.expanded_actions = self.expand_actions(rule.actions)
if rule.is_relevant(action, subject):
matches.append(rule)
return self.optimize(matches[::-1]) | retrive match action and subject |
def _calculate_dispersion(X: Union[pd.DataFrame, np.ndarray], labels: np.ndarray, centroids: np.ndarray) -> float:
disp = np.sum(np.sum([np.abs(inst - centroids[label]) ** 2 for inst, label in zip(X, labels)]))
return disp | Calculate the dispersion between actual points and their assigned centroids |
def charge(self, data):
token = data.get("token", self._response["token"])
data = {
"token": token,
"confirm_token": data.get("confirm_token")
}
return self._process('opr/charge', data) | Second stage of an OPR request |
def getDefaultParList(self):
if self.filename is None:
self.filename = self.getDefaultSaveFilename(stub=True)
return copy.deepcopy(self.__paramList)
tmpObj = ConfigObjPars(self.filename, associatedPkg=self.__assocPkg,
setAllToDefaults=True, strict=False)
return tmpObj.getParList() | Return a par list just like ours, but with all default values. |
def between_or_equal_to(y, z):
return _combinable(lambda x: (y <= x <= z) or _equal_or_float_equal(x, y) or _equal_or_float_equal(x, z)) | Greater than or equal to y and less than or equal to z. |
def load_config(args, config_path=".inlineplz.yml"):
config = {}
try:
with open(config_path) as configfile:
config = yaml.safe_load(configfile) or {}
if config:
print("Loaded config from {}".format(config_path))
pprint.pprint(config)
except (IOError, OSError, yaml.parser.ParserError):
traceback.print_exc()
args = update_from_config(args, config)
args.ignore_paths = args.__dict__.get("ignore_paths") or [
"node_modules",
".git",
".tox",
"godeps",
"vendor",
"site-packages",
"venv",
".env",
"spec",
"migrate",
"bin",
"fixtures",
"cassettes",
".cache",
".idea",
".pytest_cache",
"__pycache__",
"dist",
]
if config_path != ".inlineplz.yml":
return args
if args.config_dir and not config:
new_config_path = os.path.join(args.config_dir, config_path)
if os.path.exists(new_config_path):
return load_config(args, new_config_path)
return args | Load inline-plz config from yaml config file with reasonable defaults. |
def process_save(X, y, tokenizer, proc_data_path, max_len=400, train=False, ngrams=None, limit_top_tokens=None):
if train and limit_top_tokens is not None:
tokenizer.apply_encoding_options(limit_top_tokens=limit_top_tokens)
X_encoded = tokenizer.encode_texts(X)
if ngrams is not None:
X_encoded = tokenizer.add_ngrams(X_encoded, n=ngrams, train=train)
X_padded = tokenizer.pad_sequences(
X_encoded, fixed_token_seq_length=max_len)
if train:
ds = Dataset(X_padded,
y, tokenizer=tokenizer)
else:
ds = Dataset(X_padded, y)
ds.save(proc_data_path) | Process text and save as Dataset |
def construct_mapping(self, node, deep=False):
mapping = ODict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping | Use ODict for maps |
def newDocPI(self, name, content):
ret = libxml2mod.xmlNewDocPI(self._o, name, content)
if ret is None:raise treeError('xmlNewDocPI() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | Creation of a processing instruction element. |
def batch(input_iter, batch_size=32):
input_iter = iter(input_iter)
next_ = list(itertools.islice(input_iter, batch_size))
while next_:
yield next_
next_ = list(itertools.islice(input_iter, batch_size)) | Batches data from an iterator that returns single items at a time. |
def __set_log_file_name(self):
dir, _ = os.path.split(self.__logFileBasename)
if len(dir) and not os.path.exists(dir):
os.makedirs(dir)
self.__logFileName = self.__logFileBasename+"."+self.__logFileExtension
number = 0
while os.path.isfile(self.__logFileName):
if os.stat(self.__logFileName).st_size/1e6 < self.__maxlogFileSize:
break
number += 1
self.__logFileName = self.__logFileBasename+"_"+str(number)+"."+self.__logFileExtension
self.__logFileStream = None | Automatically set logFileName attribute |
def sorted(cls, items, orders):
return sorted(items, cmp=cls.multipleOrderComparison(orders)) | Returns the elements in `items` sorted according to `orders` |
def from_memdb_file(path):
path = to_bytes(path)
return View._from_ptr(rustcall(_lib.lsm_view_from_memdb_file, path)) | Creates a sourcemap view from MemDB at a given file. |
def _check_realign(data):
if "gatk4" not in data["algorithm"].get("tools_off", []) and not "gatk4" == data["algorithm"].get("tools_off"):
if data["algorithm"].get("realign"):
raise ValueError("In sample %s, realign specified but it is not supported for GATK4. "
"Realignment is generally not necessary for most variant callers." %
(dd.get_sample_name(data))) | Check for realignment, which is not supported in GATK4 |
def _create_entry(entry: dict, source: Source = None) -> dict:
entry["package_name"] = entry["package"].pop("package_name")
entry["package_version"] = entry["package"].pop("installed_version")
if source:
entry["index_url"] = source.url
entry["sha256"] = []
for item in source.get_package_hashes(entry["package_name"], entry["package_version"]):
entry["sha256"].append(item["sha256"])
entry.pop("package")
for dependency in entry["dependencies"]:
dependency.pop("key", None)
dependency.pop("installed_version", None)
return entry | Filter and normalize the output of pipdeptree entry. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.