code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def pages(self, limit=0):
if limit > 0:
self.iterator.limit = limit
return self.iterator | Return iterator for pages |
def query_all_issues(after):
page = count(1)
data = []
while True:
page_data = query_issues(next(page), after)
if not page_data:
break
data.extend(page_data)
return data | Hits the github API for all closed issues after the given date, returns the data. |
def create_cell_renderer_combo(self, tree_view, title="title", assign=0, editable=False, model=None, function=None):
renderer_combo = Gtk.CellRendererCombo()
renderer_combo.set_property('editable', editable)
if model:
renderer_combo.set_property('model', model)
if function:
renderer_combo.connect("edited", function)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", False)
column = Gtk.TreeViewColumn(title, renderer_combo, text=assign)
tree_view.append_column(column) | Function creates a CellRendererCombo with title, model |
def field_specific_errors(self):
return {
key: value for key, value in self.error_dict.items()
if key != NON_FIELD_ERRORS
} | Returns a dictionary of field-specific validation errors for this row. |
def update(self):
obj = self.__model__.objects.get_for_update(id=self.id)
for name, value in self.__dict__.items():
if name in self._properties:
setattr(obj, name, value)
obj.update()
return obj | Finds record and update it based in serializer values |
def load(fname):
content = _open(fname).read()
if PY2:
state = pickle.loads(content)
else:
state = pickle.loads(content, encoding='latin1')
voc, vec = state
if len(voc) == 2:
words, counts = voc
word_count = dict(zip(words, counts))
vocab = CountedVocabulary(word_count=word_count)
else:
vocab = OrderedVocabulary(voc)
return Embedding(vocabulary=vocab, vectors=vec) | Load an embedding dump generated by `save` |
def nextSunrise(jd, lat, lon):
return swe.sweNextTransit(const.SUN, jd, lat, lon, 'RISE') | Returns the JD of the next sunrise. |
def _ostaunicode(src):
if have_py_3:
bytename = src
else:
bytename = src.decode('utf-8')
try:
enc = bytename.encode('latin-1')
encbyte = b'\x08'
except (UnicodeEncodeError, UnicodeDecodeError):
enc = bytename.encode('utf-16_be')
encbyte = b'\x10'
return encbyte + enc | Internal function to create an OSTA byte string from a source string. |
def unwrap(self):
red = [self.red[i] for i in range(self.size)]
green = [self.green[i] for i in range(self.size)]
blue = [self.blue[i] for i in range(self.size)]
if NORMALIZE_GAMMA_RAMPS:
red = [value / 65535.0 for value in red]
green = [value / 65535.0 for value in green]
blue = [value / 65535.0 for value in blue]
return self.GLFWgammaramp(red, green, blue) | Returns a GLFWgammaramp object. |
def _slugify_internal_collection_name(self, json_repr):
collection = self._coerce_json_to_collection(json_repr)
if collection is None:
return None
internal_name = collection['name']
return slugify(internal_name) | Parse the JSON, find its name, return a slug of its name |
def as_dict(self):
"json friendly dict representation"
d = {}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["spin_mode"] = self.spin_mode.as_dict()
d["smearing"] = self.smearing.as_dict()
d["algorithm"] = self.algorithm.as_dict() if self.algorithm else None
d["nband"] = self.nband
d["fband"] = self.fband
d["charge"] = self.charge
d["comment"] = self.comment
return d | json friendly dict representation |
def config(self, name, suffix):
"Return config variable value, defaulting to environment"
var = '%s_%s' % (name, suffix)
var = var.upper().replace('-', '_')
if var in self._config:
return self._config[var]
return os.environ[var] | Return config variable value, defaulting to environment |
def submit_sample_url(self, url, params={}, _extra_params={}):
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params) | Submit a sample at a given URL for analysis. |
def change_number_matches(self, current_match=0, total_matches=0):
if current_match and total_matches:
matches_string = u"{} {} {}".format(current_match, _(u"of"),
total_matches)
self.number_matches_text.setText(matches_string)
elif total_matches:
matches_string = u"{} {}".format(total_matches, _(u"matches"))
self.number_matches_text.setText(matches_string)
else:
self.number_matches_text.setText(_(u"no matches")) | Change number of match and total matches. |
def _febrl_links(df):
index = df.index.to_series()
keys = index.str.extract(r'rec-(\d+)', expand=True)[0]
index_int = numpy.arange(len(df))
df_helper = pandas.DataFrame({
'key': keys,
'index': index_int
})
pairs_df = df_helper.merge(
df_helper, on='key'
)[['index_x', 'index_y']]
pairs_df = pairs_df[pairs_df['index_x'] > pairs_df['index_y']]
return pandas.MultiIndex(
levels=[df.index.values, df.index.values],
labels=[pairs_df['index_x'].values, pairs_df['index_y'].values],
names=[None, None],
verify_integrity=False
) | Get the links of a FEBRL dataset. |
def stop_containers(self):
while len(self._containers):
container = self._containers.pop()
try:
container.kill(signal.SIGKILL)
except docker.errors.APIError:
pass | Stops all containers used by this instance of the backend. |
def _simple_command(self, command, arg=None, **kwargs):
self._protocol.send_command(command, arg)
return self._protocol.handle_simple_responses(**kwargs) | Send a simple command. |
def list_security_group_rules(self, retrieve_all=True, **_params):
return self.list('security_group_rules',
self.security_group_rules_path,
retrieve_all, **_params) | Fetches a list of all security group rules for a project. |
def intinlist(lst):
for item in lst:
try:
item = int(item)
return True
except ValueError:
pass
return False | test if int in list |
def _create_config(self):
configinfo = {'creation_date': [ datetime.datetime.now().date().isoformat()],
'author': [self.site.site_config['default_author']],
'status': [u'draft'],
'lang': [u''],
'tags': [u''],
'title': [self._title],
'slug': [self._slug],
'theme': [u''],
'template': [u''],
'page_id': [uuid.uuid4().hex]
}
return configinfo | Create the default configuration dictionary for this page. |
def ping():
try:
curl_couchdb('/cozy/')
ping = True
except requests.exceptions.ConnectionError, error:
print error
ping = False
return ping | Ping CozyDB with existing credentials |
def _delete(collection_name, spec, opts, flags):
encoded = _dict_to_bson(spec, False, opts)
return b"".join([
_ZERO_32,
_make_c_string(collection_name),
_pack_int(flags),
encoded]), len(encoded) | Get an OP_DELETE message. |
def logp_partial_gradient(self, variable, calculation_set=None):
if self.verbose > 0:
print_('\t' + self.__name__ + ': logp_partial_gradient accessed.')
if not (datatypes.is_continuous(variable)
and datatypes.is_continuous(self)):
return zeros(shape(variable.value))
gradient = builtins.sum(
[child.logp_partial_gradient(self,
calculation_set) for child in self.children])
totalGradient = 0
for parameter, value in six.iteritems(self.parents):
if value is variable:
totalGradient += self.apply_jacobian(
parameter, variable, gradient)
return np.reshape(totalGradient, shape(variable.value)) | gets the logp gradient of this deterministic with respect to variable |
def argmax(self):
if "argmax" not in self.attrs.keys():
def f(dataset, s):
arr = dataset[s]
try:
amin = np.nanargmax(arr)
except ValueError:
amin = 0
idx = np.unravel_index(amin, arr.shape)
val = arr[idx]
return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)
chunk_res = self.chunkwise(f)
idxs = [i[0] for i in chunk_res.values()]
vals = [i[1] for i in chunk_res.values()]
self.attrs["argmax"] = idxs[np.nanargmax(vals)]
return tuple(self.attrs["argmax"]) | Index of the maximum, ignorning nans. |
def sample(self, histogram_logits):
histogram_probs = histogram_logits.exp()
atoms = self.support_atoms.view(1, 1, self.atoms)
return (histogram_probs * atoms).sum(dim=-1).argmax(dim=1) | Sample from a greedy strategy with given q-value histogram |
def _contains_cftime_datetimes(array) -> bool:
try:
from cftime import datetime as cftime_datetime
except ImportError:
return False
else:
if array.dtype == np.dtype('O') and array.size > 0:
sample = array.ravel()[0]
if isinstance(sample, dask_array_type):
sample = sample.compute()
if isinstance(sample, np.ndarray):
sample = sample.item()
return isinstance(sample, cftime_datetime)
else:
return False | Check if an array contains cftime.datetime objects |
def as_dict(self):
return {self.FLD_OP: self._op.name,
self.FLD_MARK: self._pos,
self.FLD_FLD: self._fld} | Representation as a dict for JSON serialization. |
def to_cls(self):
try:
if isinstance(self._to_cls, str):
self._to_cls = fetch_entity_cls_from_registry(self._to_cls)
except AssertionError:
pass
return self._to_cls | Property to retrieve to_cls as an entity when possible |
def fw_create(self, data, fw_name=None, cache=False):
LOG.debug("FW create %s", data)
try:
self._fw_create(fw_name, data, cache)
except Exception as exc:
LOG.error("Exception in fw_create %s", str(exc)) | Top level FW create function. |
def remove_file(self):
if not self.fullpath or not self.archived:
raise RuntimeError()
try:
os.remove(self.fullpath)
except:
print("Error removing %s: %s" % (self.fullpath, sys.exc_info()[1])) | Removes archived file associated with this DP |
def format_item(item, template, name='item'):
ctx = {name: item}
return render_template_to_string(template, **ctx) | Render a template to a string with the provided item in context. |
def message_info(message):
method = message.get('method')
msgid = message.get('id')
error = message.get('error')
if method and msgid is not None:
return 'method call "{}", id = "{}"'.format(method, msgid)
elif method:
return 'notification "{}"'.format(method)
elif error is not None and msgid is not None:
return 'error reply to id = "{}"'.format(msgid)
elif error is not None:
code = error.get('code', '(none)')
return 'error reply: {}'.format(errorcode.get(code, code))
else:
return 'method return for id = "{}"'.format(msgid) | Return a string describing a message, for debugging purposes. |
def _sanitize_numbers(uncleaned_numbers):
cleaned_numbers = []
for x in uncleaned_numbers:
try:
cleaned_numbers.append(int(x))
except ValueError:
cleaned_numbers.append(x)
return cleaned_numbers | Convert strings to integers if possible |
def requestSubsystem(self, subsystem):
data = common.NS(subsystem)
return self.sendRequest('subsystem', data, wantReply=True) | Request a subsystem and return a deferred reply. |
def setup(self, redis_conn=None, host='localhost', port=6379):
if redis_conn is None:
if host is not None and port is not None:
self.redis_conn = redis.Redis(host=host, port=port)
else:
raise Exception("Please specify some form of connection "
"to Redis")
else:
self.redis_conn = redis_conn
self.redis_conn.info() | Set up the redis connection |
def dict(self):
return_obj = {}
for attr in dir(self):
if not attr.startswith('__') and attr not in self.__reserved:
if isinstance(getattr(self, attr), list):
return_val = []
for item in getattr(self, attr):
if isinstance(item, DictClass):
return_val.append(dict(item))
else:
return_val.append(item)
elif isinstance(getattr(self, attr), dict):
return_val = {}
for key, item in getattr(self, attr).items():
if isinstance(item, DictClass):
return_val[key] = item.dict()
else:
return_val[key] = item
elif isinstance(getattr(self, attr), DictClass):
return_val = getattr(self, attr).dict()
else:
return_val = getattr(self, attr)
return_obj[attr] = return_val
return return_obj | converts the class to a dictionary object |
def load_buildfile(self, target):
log.info('Loading: %s', target)
filepath = os.path.join(target.path, app.get_options().buildfile_name)
try:
repo = self.repo_state.GetRepo(target.repo)
return repo.get_file(filepath)
except gitrepo.GitError as err:
log.error('Failed loading %s: %s', target, err)
raise error.BrokenGraph('Sadface.') | Pull a build file from git. |
def vhost_remove(cls, name):
oper = cls.call('hosting.rproxy.vhost.delete', name)
cls.echo('Deleting your virtual host %s' % name)
cls.display_progress(oper)
cls.echo('Your virtual host have been removed')
return oper | Delete a vhost in a webaccelerator |
def _from_dict(cls, _dict):
args = {}
if 'start_time' in _dict:
args['start_time'] = _dict.get('start_time')
else:
raise ValueError(
'Required property \'start_time\' not present in WordAlternativeResults JSON'
)
if 'end_time' in _dict:
args['end_time'] = _dict.get('end_time')
else:
raise ValueError(
'Required property \'end_time\' not present in WordAlternativeResults JSON'
)
if 'alternatives' in _dict:
args['alternatives'] = [
WordAlternativeResult._from_dict(x)
for x in (_dict.get('alternatives'))
]
else:
raise ValueError(
'Required property \'alternatives\' not present in WordAlternativeResults JSON'
)
return cls(**args) | Initialize a WordAlternativeResults object from a json dictionary. |
def export_data( self ):
klass = self.__class__
output = bytearray( b'\x00'*self.get_size() )
queue = []
for name in klass._fields:
self.scrub_field( name )
self.validate_field( name )
self.update_deps()
for name in klass._fields:
klass._fields[name].update_buffer_with_value(
self._field_data[name], output, parent=self
)
for name, check in klass._checks.items():
check.update_buffer( output, parent=self )
return output | Export data to a byte array. |
def formatBodyNode(root,path):
body = root
body.name = "body"
body.weight = calcFnWeight(body)
body.path = path
body.pclass = None
return body | Format the root node for use as the body node. |
def publish_json(self, channel, obj):
return self.publish(channel, json.dumps(obj)) | Post a JSON-encoded message to channel. |
def delete(*args):
from syn.base_utils import this_module
mod = this_module(npop=3)
yield
for arg in args:
name = arg
if not isinstance(name, STR):
name = arg.__name__
delattr(mod, name) | For using then deleting objects. |
def provision(self, tool: Tool) -> docker.models.containers.Container:
if not self.is_installed(tool):
raise Exception("tool is not installed: {}".format(tool.name))
client = self.__installation.docker
return client.containers.create(tool.image) | Provisions a mountable Docker container for a given tool. |
def init_datamembers(self, rec):
if 'synonym' in self.optional_attrs: rec.synonym = []
if 'xref' in self.optional_attrs: rec.xref = set()
if 'subset' in self.optional_attrs: rec.subset = set()
if 'comment' in self.optional_attrs: rec.comment = ""
if 'relationship' in self.optional_attrs:
rec.relationship = {}
rec.relationship_rev = {} | Initialize current GOTerm with data members for storing optional attributes. |
async def json(self, *, encoding: str=None,
loads: JSONDecoder=DEFAULT_JSON_DECODER,
content_type: Optional[str]='application/json') -> Any:
if self._body is None:
await self.read()
if content_type:
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
if not _is_expected_content_type(ctype, content_type):
raise ContentTypeError(
self.request_info,
self.history,
message=('Attempt to decode JSON with '
'unexpected mimetype: %s' % ctype),
headers=self.headers)
if encoding is None:
encoding = self.get_encoding()
return loads(self._body.decode(encoding)) | Read and decodes JSON response. |
def real_ip(self):
if self._real_ip is None:
response = get(ICANHAZIP)
self._real_ip = self._get_response_text(response)
return self._real_ip | The actual public IP of this host. |
def hide_navbar_items(portal):
logger.info("*** Hide Navigation Items ***")
object_ids = portal.objectIds()
object_ids = filter(lambda id: id in object_ids, NAV_BAR_ITEMS_TO_HIDE)
for object_id in object_ids:
item = portal[object_id]
item.setExcludeFromNav(True)
item.reindexObject() | Hide root items in navigation |
def main():
if sys.version_info[0] < 3:
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
options = docopt.docopt(__doc__,
help=True,
version='template_remover v%s' % __VERSION__)
print(template_remover.clean(io.open(options['FILENAME']).read()))
return 0 | Entry point for remove_template. |
def _tail_profile(self, db, interval):
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc | Tails the system.profile collection |
def ffill(arr, dim=None, limit=None):
import bottleneck as bn
axis = arr.get_axis_num(dim)
_limit = limit if limit is not None else arr.shape[axis]
return apply_ufunc(bn.push, arr,
dask='parallelized',
keep_attrs=True,
output_dtypes=[arr.dtype],
kwargs=dict(n=_limit, axis=axis)).transpose(*arr.dims) | forward fill missing values |
def walk_json(d, func):
if isinstance(d, Mapping):
return OrderedDict((k, walk_json(v, func)) for k, v in d.items())
elif isinstance(d, list):
return [walk_json(v, func) for v in d]
else:
return func(d) | Walk over a parsed JSON nested structure `d`, apply `func` to each leaf element and replace it with result |
def aead_filename(aead_dir, key_handle, public_id):
parts = [aead_dir, key_handle] + pyhsm.util.group(public_id, 2) + [public_id]
return os.path.join(*parts) | Return the filename of the AEAD for this public_id. |
def init_limit(self, key, lower=None, upper=None, limit=False):
above = agtb(self.__dict__[key], upper)
for idx, item in enumerate(above):
if item == 0.:
continue
maxval = upper[idx]
self.log(
'{0} <{1}.{2}> above its maximum of {3}.'.format(
self.name[idx], self._name, key, maxval), ERROR)
if limit:
self.__dict__[key][idx] = maxval
below = altb(self.__dict__[key], lower)
for idx, item in enumerate(below):
if item == 0.:
continue
minval = lower[idx]
self.log(
'{0} <{1}.{2}> below its minimum of {3}.'.format(
self.name[idx], self._name, key, minval), ERROR)
if limit:
self.__dict__[key][idx] = minval | check if data is within limits. reset if violates |
def generate_specifications(self, count=1):
out = {}
copy_from = self.get_copy_from()
for arnum in range(count):
source = copy_from.get(arnum)
if source is None:
out[arnum] = {}
continue
results_range = source.getResultsRange()
specification = {}
for rr in results_range:
specification[rr.get("keyword")] = rr
out[arnum] = specification
return out | Returns a mapping of count -> specification |
def squeeze(attrs, inputs, proto_obj):
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis'})
return 'squeeze', new_attrs, inputs | Remove single-dimensional entries from the shape of a tensor. |
def permute(self, ba):
c = ba.copy()
for i in xrange(len(self.mapping)):
ba[i] = c[self.mapping[i]]
return ba | Permute the bitarray ba inplace. |
def field_factory(base_class):
from .fields import TranslationField
class TranslationFieldField(TranslationField, base_class):
pass
TranslationFieldField.__name__ = "Translation%s" % base_class.__name__
return TranslationFieldField | Takes a field base class and wrap it with ``TranslationField`` class. |
def nmltostring(nml):
if not isinstance(nml,dict):
raise ValueError("nml should be a dict !")
curstr = ""
for key,group in nml.items():
namelist = ["&" + key]
for k, v in group.items():
if isinstance(v, list) or isinstance(v, tuple):
namelist.append(k + " = " + ",".join(map(str, v)) + ",")
elif is_string(v):
namelist.append(k + " = '" + str(v) + "',")
else:
namelist.append(k + " = " + str(v) + ",")
namelist.append("/")
curstr = curstr + "\n".join(namelist) + "\n"
return curstr | Convert a dictionary representing a Fortran namelist into a string. |
def elapsed(self):
if not self.started or self._start_time is None:
return 0.0
return self._timing_data[-1][0] - self._start_time | Returns the number of seconds it has been since the start until the latest entry. |
def task_stop(self, **kw):
id, task = self.get_task(**kw)
self._execute(id, 'stop')
return self.get_task(uuid=task['uuid'])[1] | Marks a task as stopped. |
def cmd_xcode(self, *args):
app_name = self.buildozer.namify(self.buildozer.config.get('app',
'package.name'))
app_name = app_name.lower()
ios_dir = ios_dir = join(self.buildozer.platform_dir, 'kivy-ios')
self.buildozer.cmd('open {}.xcodeproj'.format(
app_name), cwd=join(ios_dir, '{}-ios'.format(app_name))) | Open the xcode project. |
def cleanup_lib(self):
if not self.using_openmp:
logging.debug('unloading shared library')
_ctypes.dlclose(self.lib._handle) | unload the previously loaded shared library |
def saccadic_momentum_effect(durations, forward_angle,
summary_stat=nanmean):
durations_per_da = np.nan * np.ones((len(e_angle) - 1,))
for i, (bo, b1) in enumerate(zip(e_angle[:-1], e_angle[1:])):
idx = (
bo <= forward_angle) & (
forward_angle < b1) & (
~np.isnan(durations))
durations_per_da[i] = summary_stat(durations[idx])
return durations_per_da | Computes the mean fixation duration at forward angles. |
def _sample(probability_vec):
return map(int,
numpy.random.random(probability_vec.size) <= probability_vec) | Return random binary string, with given probabilities. |
def _remove_references(self, removed_part):
removed_part.parent = None
referrers_to_remove = set()
for referrer in removed_part.referrers:
if removed_part not in referrer.ancestors():
for label, referred_part in list(referrer.labels.items()):
if referred_part is removed_part:
del referrer.labels[label]
referrers_to_remove.add(referrer)
removed_part.referrers -= referrers_to_remove
labels_to_delete = []
if isinstance(removed_part, Compound):
for label, part in list(removed_part.labels.items()):
if not isinstance(part, Compound):
for p in part:
self._remove_references(p)
elif removed_part not in part.ancestors():
try:
part.referrers.discard(removed_part)
except KeyError:
pass
else:
labels_to_delete.append(label)
for label in labels_to_delete:
removed_part.labels.pop(label, None) | Remove labels pointing to this part and vice versa. |
def _load_version(cls, state, version):
from ._audio_feature_extractor import _get_feature_extractor
from .._mxnet import _mxnet_utils
state['_feature_extractor'] = _get_feature_extractor(state['feature_extractor_name'])
num_classes = state['num_classes']
num_inputs = state['_feature_extractor'].output_length
if 'custom_layer_sizes' in state:
custom_layer_sizes = list(map(int, state['custom_layer_sizes']))
else:
custom_layer_sizes = [100, 100]
state['custom_layer_sizes'] = custom_layer_sizes
net = SoundClassifier._build_custom_neural_network(num_inputs, num_classes, custom_layer_sizes)
net_params = net.collect_params()
ctx = _mxnet_utils.get_mxnet_context()
_mxnet_utils.load_net_params_from_state(net_params, state['_custom_classifier'], ctx=ctx)
state['_custom_classifier'] = net
return SoundClassifier(state) | A function to load a previously saved SoundClassifier instance. |
def session(self):
from sqlalchemy.orm import sessionmaker
from sqlalchemy.event import listen
if not self.Session:
self.Session = sessionmaker(bind=self.engine)
if not self._session:
self._session = self.Session()
if self._schema:
def after_begin(session, transaction, connection):
session.execute('SET search_path TO {}'.format(self._schema))
listen(self._session, 'after_begin', after_begin)
return self._session | Return a SqlAlchemy session. |
def analyses_info(self):
f_config = os.path.join(self.analyses_path, 'index.yaml')
tornado.autoreload.watch(f_config)
with io.open(f_config, 'r', encoding='utf8') as f:
config = yaml.safe_load(f)
self.info.update(config)
if self.debug:
self.info['version'] += '.debug-{:04X}'.format(
int(random.random() * 0xffff))
readme = Readme(self.analyses_path)
if self.info['description'] is None:
self.info['description'] = readme.text.strip()
self.info['description_html'] = readme.html | Add analyses from the analyses folder. |
def _ask(question, default=None, data_type='str', show_hint=False):
data = default
if data_type == 'bool':
data = None
default_string = "Y" if default else "N"
while data not in ('Y', 'J', 'N', '1', '0'):
data = input("%s? [%s]: " % (question, default_string)).upper()
if data == '':
return default
return data in ('Y', 'J', '1')
elif data_type in ('str', 'unicode'):
if show_hint:
msg = "%s? [%s] (%s): " % (question, default, data_type)
else:
msg = question
data = input(msg)
if len(data) == 0:
data = default
elif data_type == 'int':
if show_hint:
msg = "%s? [%s] (%s): " % (question, default, data_type)
else:
msg = question
data = input(msg)
if len(data) == 0:
data = int(default)
else:
data = int(data)
return data | Interactively ask the user for data |
def slicenet_params1_tiny():
hparams = slicenet_params1()
hparams.attention_type = "simple"
hparams.separability = 0
hparams.hidden_size = 128
hparams.num_hidden_layers = 2
hparams.batch_size = 512
hparams.learning_rate_warmup_steps = 200
return hparams | Version for fast local runs. |
def skew_normal_expval(mu, tau, alpha):
delta = alpha / np.sqrt(1. + alpha ** 2)
return mu + np.sqrt(2 / pi / tau) * delta | Expectation of skew-normal random variables. |
def raw(self, from_, to, body):
if isinstance(to, string_types):
raise TypeError('"to" parameter must be enumerable')
return self._session.post('{}/raw'.format(self._url), json={
'from': from_,
'to': to,
'body': body,
}).json() | Send a raw MIME message. |
def parse_sm_config():
sagemaker_config = "/opt/ml/input/config/hyperparameters.json"
if os.path.exists(sagemaker_config):
conf = {}
conf["sagemaker_training_job_name"] = os.getenv('TRAINING_JOB_NAME')
for k, v in six.iteritems(json.load(open(sagemaker_config))):
cast = v.strip('"')
if os.getenv("WANDB_API_KEY") is None and k == "wandb_api_key":
os.environ["WANDB_API_KEY"] = cast
else:
if re.match(r'^[-\d]+$', cast):
cast = int(cast)
elif re.match(r'^[-.\d]+$', cast):
cast = float(cast)
conf[k] = cast
return conf
else:
return False | Attempts to parse SageMaker configuration returning False if it can't find it |
def iter_links_element_text(cls, element):
if element.text:
link_type = identify_link_type(element.text)
yield LinkInfo(
element=element, tag=element.tag, attrib=None,
link=element.text,
inline=False, linked=True,
base_link=None,
value_type='plain',
link_type=link_type
) | Get the element text as a link. |
def find_focusable(node):
if not node.children:
return node
if node.focus:
return find_focusable(node.children_dict[node.focus[0]]) | Search for the first focusable window within the node tree |
def file_containing_import(import_path, import_root):
if not _import_paths:
load_stdlib()
if os.path.isfile(import_root):
import_root = os.path.dirname(import_root)
search_paths = [import_root] + _import_paths
module_parts = import_path.split('.')
for i in range(len(module_parts), 0, -1):
module_path = os.path.join(*module_parts[:i])
for sp in search_paths:
p = os.path.join(sp, module_path)
if os.path.isdir(p):
return os.path.join(p, '__init__.py')
elif os.path.isfile(p + '.py'):
return p + '.py'
return None | Finds the file that might contain the import_path. |
def process_role(ctx, param, value):
role = Role.query.filter(Role.name == value).first()
if not role:
raise click.BadParameter('Role with name \'%s\' not found.', value)
return role | Return a role if it exists. |
def _glyph_for_complex_pattern(self, pattern):
monomer_glyphs = []
for monomer_pattern in pattern.monomer_patterns:
glyph = self._glyph_for_monomer_pattern(monomer_pattern)
monomer_glyphs.append(glyph)
if len(monomer_glyphs) > 1:
pattern.matches_key = lambda: str(pattern)
agent_id = self._make_agent_id(pattern)
complex_glyph = \
emaker.glyph(emaker.bbox(**self.complex_style),
class_('complex'), id=agent_id)
for glyph in monomer_glyphs:
glyph.attrib['id'] = agent_id + glyph.attrib['id']
complex_glyph.append(glyph)
return complex_glyph
return monomer_glyphs[0] | Add glyph and member glyphs for a PySB ComplexPattern. |
def _imm_repr(self):
return (type(self).__name__
+ ('(' if _imm_is_persist(self) else '*(')
+ ', '.join([k + '=' + str(v) for (k,v) in six.iteritems(imm_params(self))])
+ ')') | The default representation function for an immutable object. |
def _set_scatter_signature(self):
self._scatter_signature = (self.thet0, self.thet, self.phi0, self.phi,
self.alpha, self.beta, self.orient) | Mark the amplitude and scattering matrices as up to date. |
def match(self, *args, **kwargs):
return self._any_args or \
self._arguments_rule.validate(*args, **kwargs) | Check the if these args match this expectation. |
def _query(self, filename):
log.Info('Querying size of %s' % filename)
from jottalib.JFS import JFSNotFoundError, JFSIncompleteFile
remote_path = posixpath.join(self.folder.path, filename)
try:
remote_file = self.client.getObject(remote_path)
except JFSNotFoundError:
return {'size': -1}
return {
'size': remote_file.size,
} | Get size of filename |
def _build_matrix(p, q, deriv):
A = [([1 for _ in range(-p, q+1)])]
for i in range(1, p + q + 1):
A.append([j**i for j in range(-p, q+1)])
return np.array(A) | Constructs the equation system matrix for the finite difference coefficients |
def _setup_log_prefix(self, plugin_id=''):
self._logger_console_fmtter.prefix = '%s: ' % plugin_id
self._logger_console_fmtter.plugin_id = plugin_id
self._logger_file_fmtter.prefix = '*'
self._logger_file_fmtter.plugin_id = '%s: ' % plugin_id | Setup custom warning notification. |
def encode_metadata(self):
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key)
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value)
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list | Return list of encoded metadata as defined by the Tus protocol. |
def make(parser):
parser.add_argument(
'mon',
metavar='HOST',
nargs='+',
help='monitor host to pull keys from',
)
parser.set_defaults(
func=gatherkeys,
) | Gather authentication keys for provisioning new nodes. |
def decrypt(secret, modN, d, blockSize):
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList) | reverse function of encrypt |
def stop_all(self):
for alias, service in self._service_objects.items():
if service.is_alive:
with expects.expect_no_raises(
'Failed to stop service "%s".' % alias):
service.stop() | Stops all active service instances. |
def unmarshal_event(self, data: str, response_type):
js = json.loads(data)
js['raw_object'] = js['object']
if js['type'].lower() == 'error':
return js
if response_type is not None:
js['object'] = self._api_client.deserialize(
response=SimpleNamespace(data=json.dumps(js['raw_object'])),
response_type=response_type
)
if hasattr(js['object'], 'metadata'):
self.resource_version = js['object'].metadata.resource_version
elif (isinstance(js['object'], dict) and
'metadata' in js['object'] and
'resourceVersion' in js['object']['metadata']):
self.resource_version = js['object']['metadata']['resourceVersion']
return js | Return the K8s response `data` in JSON format. |
def config_from_prefix(prefix):
settings = {}
if prefix.lower() in ('default', 'auto', ''):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = False
elif prefix.lower() in ('bundled', 'extension'):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = True
settings['no_libzmq_extension'] = False
else:
settings['zmq_prefix'] = prefix
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = True
return settings | Get config from zmq prefix |
def _filter_queryset(self, perms, queryset):
user = self.request.user if self.request else AnonymousUser()
return get_objects_for_user(user, perms, queryset) | Filter object objects by permissions of user in request. |
def next_id(self):
msgid = self._id_template.format(self._next_id)
self._next_id += 1
return msgid | Return a unique message ID. |
def on_response(self, msg: Dict[str, str]) -> None:
response = msg.get('data', False)
if response:
task = self.__tasks.pop(msg.get('reqid'), False)
if task and not task.cancelled() and not task.done():
task.set_result(msg.get('data')) | Run when get response from browser. |
def isargument(self, node):
try:
node_id, _ = self.node_to_id(node)
return (node_id in self.name_to_nodes and
any([isinstance(n, ast.Name) and
isinstance(n.ctx, ast.Param)
for n in self.name_to_nodes[node_id]]))
except UnboundableRValue:
return False | checks whether node aliases to a parameter. |
def _process_out_of_bounds(self, value, start, end):
"Clips out of bounds values"
if isinstance(value, np.datetime64):
v = dt64_to_dt(value)
if isinstance(start, (int, float)):
start = convert_timestamp(start)
if isinstance(end, (int, float)):
end = convert_timestamp(end)
s, e = start, end
if isinstance(s, np.datetime64):
s = dt64_to_dt(s)
if isinstance(e, np.datetime64):
e = dt64_to_dt(e)
else:
v, s, e = value, start, end
if v < s:
value = start
elif v > e:
value = end
return value | Clips out of bounds values |
def disconnect(self):
_LOGGING.debug('Disconnecting from stream: %s', self.name)
self.kill_thrd.set()
self.thrd.join()
_LOGGING.debug('Event stream thread for %s is stopped', self.name)
self.kill_thrd.clear() | Disconnect from event stream. |
def update_entitlement(owner, repo, identifier, name, token, show_tokens):
client = get_entitlements_api()
data = {}
if name is not None:
data["name"] = name
if token is not None:
data["token"] = token
with catch_raise_api_exception():
data, _, headers = client.entitlements_partial_update_with_http_info(
owner=owner,
repo=repo,
identifier=identifier,
data=data,
show_tokens=show_tokens,
)
ratelimits.maybe_rate_limit(client, headers)
return data.to_dict() | Update an entitlement in a repository. |
def _parse_resource_declarations(cls, declarations, resource_map):
resources = {}
for decl in declarations:
name = decl.pop('name')
typename = decl.pop('type')
desc = decl.pop('description', None)
autocreate = decl.pop('autocreate', False)
args = decl
res_type = resource_map.get(typename)
if res_type is None:
raise UnknownRecipeResourceType("Could not find shared resource type", type=typename, name=name)
if hasattr(res_type, "ARG_SCHEMA"):
try:
args = res_type.ARG_SCHEMA.verify(args)
except ValidationError as exc:
raise RecipeFileInvalid("Recipe file resource declarttion has invalid parameters", resource=name, error_message=exc.msg, **exc.params)
if name in resources:
raise RecipeFileInvalid("Attempted to add two shared resources with the same name", name=name)
res = ResourceDeclaration(name, resource_map.get(typename), args, autocreate, desc, typename)
resources[name] = res
return resources | Parse out what resources are declared as shared for this recipe. |
def find_genome_length(self):
for sample in self.metadata:
sample[self.analysistype].genome_length = sum(sample[self.analysistype].contig_lengths) | Determine the total length of all the contigs for each strain |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.