code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def compileGSUB(self):
from ufo2ft.util import compileGSUB
compiler = self.context.compiler
if compiler is not None:
if hasattr(compiler, "_gsub"):
return compiler._gsub
glyphOrder = compiler.ttFont.getGlyphOrder()
else:
glyphOrder = sorted(self.context.font.keys())
gsub = compileGSUB(self.context.feaFile, glyphOrder)
if compiler and not hasattr(compiler, "_gsub"):
compiler._gsub = gsub
return gsub | Compile a temporary GSUB table from the current feature file. |
def cmd_init(self, *args):
if exists('buildozer.spec'):
print('ERROR: You already have a buildozer.spec file.')
exit(1)
copyfile(join(dirname(__file__), 'default.spec'), 'buildozer.spec')
print('File buildozer.spec created, ready to customize!') | Create a initial buildozer.spec in the current directory |
def can_fetch(self, url_info: URLInfo, user_agent: str):
key = self.url_info_key(url_info)
parser = self._parsers[key]
return parser.is_allowed(user_agent, url_info.url) | Return whether the URL can be fetched. |
def autosave_all(self):
for index in range(self.stack.get_stack_count()):
self.autosave(index) | Autosave all opened files. |
def display(self):
"Renders the scene once every refresh"
self.compositor.waitGetPoses(self.poses, openvr.k_unMaxTrackedDeviceCount, None, 0)
hmd_pose0 = self.poses[openvr.k_unTrackedDeviceIndex_Hmd]
if not hmd_pose0.bPoseIsValid:
return
if True:
glClearColor(0.8, 0.4, 0.4, 0)
glClear(GL_COLOR_BUFFER_BIT)
glFlush()
glBindFramebuffer(GL_FRAMEBUFFER, self.fb)
glClearColor(0.8, 0.4, 0.4, 0)
glClear(GL_COLOR_BUFFER_BIT)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
self.compositor.submit(openvr.Eye_Left, self.texture)
self.compositor.submit(openvr.Eye_Right, self.texture)
glBindFramebuffer(GL_FRAMEBUFFER, 0) | Renders the scene once every refresh |
def generative(func):
def wrap(inst, *args, **kw):
clone = type(inst).__new__(type(inst))
clone.__dict__ = inst.__dict__.copy()
return func(clone, *args, **kw)
return update_wrapper(wrap, func) | Marks an instance method as generative. |
def push(self):
tx = Tx(self.tx_project_slug)
template = babel.messages.catalog.Catalog()
for topic in self.desk.topics():
if topic.show_in_portal:
template.add(topic.name)
template_po = StringIO()
babel.messages.pofile.write_po(template_po, template)
tx.create_or_update_resource(
self.TOPIC_STRINGS_SLUG,
DEFAULT_SOURCE_LANGUAGE,
"Help Center Topics",
template_po.getvalue(),
i18n_type='PO',
project_slug=self.tx_project_slug,
) | Push topics to Transifex. |
def restore_row(self, row, schema):
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row | Restore row from SQL |
def update_consumer_offsets(self, partition_offsets):
self.logger.debug("Updating consumer offsets to: %s", partition_offsets)
for partition, offset in partition_offsets.items():
self.consumer.offsets[partition] = offset
self.consumer.seek(0, 1) | Update consumer offsets to explicit positions. |
def __store_processing_state(self):
steps = self.Application_Progress_Status_processing.Processing_progressBar.maximum()
value = self.Application_Progress_Status_processing.Processing_progressBar.value()
message = self.Application_Progress_Status_processing.Processing_label.text()
state = self.__is_processing
self.__processing_state = steps, value, message, state | Stores the processing state. |
def _gen_keys_from_multicol_key(key_multicol, n_keys):
keys = [('{}{:03}of{:03}')
.format(key_multicol, i+1, n_keys) for i in range(n_keys)]
return keys | Generates single-column keys from multicolumn key. |
def cache_key(self, request, method=None):
if method is None:
method = request.method
return "bettercache_page:%s:%s" %(request.build_absolute_uri(), method) | the cache key is the absolute uri and the request method |
def _requirements_to_dict(rs):
out = []
added = set([])
for r in rs:
if r["class"] == "DockerRequirement" and "docker" not in added:
added.add("docker")
out.append({"requirement_type": "docker", "value": r["dockerImageId"]})
elif r["class"] == "ResourceRequirement":
if "coresMin" in r and "cpu" not in added:
added.add("cpu")
out.append({"requirement_type": "cpu", "value": r["coresMin"]})
if "ramMin" in r and "memory" not in added:
added.add("memory")
out.append({"requirement_type": "memory", "value": "%s MB" % r["ramMin"]})
if "tmpdirMin" in r and "disks" not in added:
added.add("disks")
out.append({"requirement_type": "disks", "value": "local-disk %s HDD" % r["tmpdirMin"]})
return out | Convert supported requirements into dictionary for output. |
def ok_button_status(self):
if not self.layer.currentLayer():
self.button_box.button(
QtWidgets.QDialogButtonBox.Ok).setEnabled(False)
elif (self.hazard_class_form.count() > 0
and self.layer.currentLayer().name()
and len(self.output_form.text()) >= 0):
self.button_box.button(
QtWidgets.QDialogButtonBox.Ok).setEnabled(True)
else:
self.button_box.button(
QtWidgets.QDialogButtonBox.Ok).setEnabled(False) | Function to enable or disable OK button. |
def draw_footer(canvas):
note = (
u'Bank Details: Street address, Town, County, POSTCODE',
u'Sort Code: 00-00-00 Account No: 00000000 (Quote invoice number).',
u'Please pay via bank transfer or cheque. All payments should be made in CURRENCY.',
u'Make cheques payable to Company Name Ltd.',
)
textobject = canvas.beginText(1 * cm, -27 * cm)
for line in note:
textobject.textLine(line)
canvas.drawText(textobject) | Draws the invoice footer |
def _prep_mod_opts(self):
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts | Returns a copy of the opts with key bits stripped out |
def describe_all(self, refresh=True):
tables = self.connection.list_tables()
descs = []
for tablename in tables:
descs.append(self.describe(tablename, refresh))
return descs | Describe all tables in the connected region |
def prepare_buckets(self):
for mdl in self.registry.get_base_models():
bucket = mdl(super_context).objects.adapter.bucket
self.buckets[bucket.name] = bucket | loads buckets to bucket cache. |
def delete_relation(sender, instance, **kwargs):
def process_signal(relation_id):
try:
relation = Relation.objects.get(pk=relation_id)
except Relation.DoesNotExist:
return
if relation.entities.count() == 0:
relation.delete()
transaction.on_commit(lambda: process_signal(instance.relation_id)) | Delete the Relation object when the last Entity is removed. |
def targetwords(self, index, targetwords, alignment):
return [ targetwords[x] for x in alignment[index] ] | Return the aligned targetwords for a specified index in the source words |
def rested_filter(name, location, size, unsize):
ver = slack_ver()
if _meta_.slack_rel == "current":
ver = "current"
path_pkg = "pkg"
if _meta_.arch == "x86_64":
path_pkg = "pkg64"
(fname, flocation, fsize, funsize) = ([] for i in range(4))
for n, l, s, u in zip(name, location, size, unsize):
if path_pkg == l.split("/")[-2] and ver == l.split("/")[-1]:
fname.append(n)
flocation.append(l)
fsize.append(s)
funsize.append(u)
return [fname, flocation, fsize, funsize] | Filter Alien"s repository data |
def attach(gandi, name, vhost, remote):
return gandi.paas.attach(name, vhost, remote) | Add remote for an instance's default vhost to the local git repository. |
def intersection (set1, set2):
assert is_iterable(set1)
assert is_iterable(set2)
result = []
for v in set1:
if v in set2:
result.append (v)
return result | Removes from set1 any items which don't appear in set2 and returns the result. |
def _get(self, id):
"Return keys and value for karma id"
VALUE_SQL = "SELECT karmavalue from karma_values where karmaid = ?"
KEYS_SQL = "SELECT karmakey from karma_keys where karmaid = ?"
value = self.db.execute(VALUE_SQL, [id]).fetchall()[0][0]
keys_cur = self.db.execute(KEYS_SQL, [id]).fetchall()
keys = sorted(x[0] for x in keys_cur)
return keys, value | Return keys and value for karma id |
def default_decode(events, mode='full'):
event, elem = next(events)
root = elem
while (event, elem.tag) not in [('start', 'igt'), ('end', 'xigt-corpus')]:
event, elem = next(events)
igts = None
if event == 'start' and elem.tag == 'igt':
igts = (
decode_igt(e)
for e in iter_elements(
'igt', events, root, break_on=[('end', 'xigt-corpus')]
)
)
xc = decode_xigtcorpus(root, igts=igts, mode=mode)
return xc | Decode a XigtCorpus element. |
def _FindPartition(self, key):
hash_value = self.hash_generator.ComputeHash(key)
return self._LowerBoundSearch(self.partitions, hash_value) | Finds the partition from the byte array representation of the partition key. |
def replicasResource(self):
if self._replicasResource is None:
self._replicasResource = {}
for replica in self.replicas:
self._replicasResource["replicaName"] = replica.name
self._replicasResource["replicaID"] = replica.guid
return self._replicasResource | returns a list of replices |
def populate_tree(self, master, parent, element,from_file=False):
data = WidgetDescr(None, None)
data.from_xml_node(element)
cname = data.get_class()
uniqueid = self.get_unique_id(cname, data.get_id())
data.set_property('id', uniqueid)
if cname in builder.CLASS_MAP:
pwidget = self._insert_item(master, data,from_file=from_file)
xpath = "./child"
children = element.findall(xpath)
for child in children:
child_object = child.find('./object')
cwidget = self.populate_tree(pwidget, child, child_object,from_file=from_file)
return pwidget
else:
raise Exception('Class "{0}" not mapped'.format(cname)) | Reads xml nodes and populates tree item |
def from_string(self, value):
if value.startswith('[') and value.endswith(']'):
text = value[1:-1].strip()
else:
text = value.strip()
result = []
if text.startswith('('):
tokens = text.split(',')
if len(tokens) % 2 != 0:
raise ValueError('not a valid list of pairs')
pos = 0
while (pos < len(tokens)):
val1 = float(tokens[pos].strip()[1:].strip())
val2 = float(tokens[pos + 1].strip()[:-1])
result.append((val1, val2))
pos += 2
else:
for val in text.split(','):
result.append(float(val))
if len(result) < 2:
raise ValueError('invalid number of elements in list: ' + str(len(result)))
return result | Convert string to list. |
def _get_span_name(servicer_context):
method_name = servicer_context._rpc_event.call_details.method[1:]
if isinstance(method_name, bytes):
method_name = method_name.decode('utf-8')
method_name = method_name.replace('/', '.')
return '{}.{}'.format(RECV_PREFIX, method_name) | Generates a span name based off of the gRPC server rpc_request_info |
def _extract_parameters_from_properties(properties):
new_properties = {}
parameters = []
for key, value in six.iteritems(properties):
if key.startswith(_PARAMETER_PREFIX):
parameters.append((key.replace(_PARAMETER_PREFIX, ""), value))
else:
new_properties[key] = value
return new_properties, sorted(parameters) | Extracts parameters from properties. |
def obj(x):
j = np.arange(1, 6)
tmp1 = np.dot(j, np.cos((j+1)*x[0] + j))
tmp2 = np.dot(j, np.cos((j+1)*x[1] + j))
return tmp1 * tmp2 | Two Dimensional Shubert Function |
def build_dictionary(self):
d = {}
for t in self.all_tags_of_type(DefinitionTag, recurse_into_sprites = False):
if t.characterId in d:
raise ValueError('illegal redefinition of character')
d[t.characterId] = t
return d | Return a dictionary of characterIds to their defining tags. |
def _check_key(self, key):
if not len(key) == 2:
raise TypeError('invalid key: %r' % key)
elif key[1] not in TYPES:
raise TypeError('invalid datatype: %s' % key[1]) | Ensures well-formedness of a key. |
def triplify_object(binding):
triples = []
if binding.uri:
triples.append((binding.subject, RDF.type, binding.uri))
if binding.parent is not None:
parent = binding.parent.subject
if binding.parent.is_array:
parent = binding.parent.parent.subject
triples.append((parent, binding.predicate, binding.subject))
if binding.reverse is not None:
triples.append((binding.subject, binding.reverse, parent))
for prop in binding.properties:
_, prop_triples = triplify(prop)
triples.extend(prop_triples)
return binding.subject, triples | Create bi-directional bindings for object relationships. |
def maybe_convert_platform(values):
if isinstance(values, (list, tuple)):
values = construct_1d_object_array_from_listlike(list(values))
if getattr(values, 'dtype', None) == np.object_:
if hasattr(values, '_values'):
values = values._values
values = lib.maybe_convert_objects(values)
return values | try to do platform conversion, allow ndarray or list here |
def rm_raw(ctx, dataset, kwargs):
"removes the raw unprocessed data"
kwargs = parse_kwargs(kwargs)
data(dataset, **ctx.obj).rm_raw(**kwargs) | removes the raw unprocessed data |
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
self.fileExtension = extension
with open(path, 'r') as f:
for line in f:
sline = line.strip().split()
if len(sline) == 1:
self.numParameters = sline[0]
else:
target = TargetParameter(targetVariable=sline[0],
varFormat=sline[1])
target.replaceParamFile = self | Replace Param File Read from File Method |
def close(self):
if self._wrapped_connection and self._pool:
logger.debug("Returning connection %s to pool %s" % (self._wrapped_connection, self._pool))
self._pool.putconn(self._wrapped_connection)
self._wrapped_connection = None | Override to return the connection to the pool rather than closing it. |
def use_plenary_asset_view(self):
self._object_views['asset'] = PLENARY
for session in self._get_provider_sessions():
try:
session.use_plenary_asset_view()
except AttributeError:
pass | Pass through to provider AssetLookupSession.use_plenary_asset_view |
def lock_pid(self):
if os.path.exists(self.lock_filename):
return int(open(self.lock_filename).read())
else:
return None | Get the pid of the lock. |
def _get_site_dummy_variables(self, vs30):
s_b = np.zeros_like(vs30)
s_c = np.zeros_like(vs30)
s_d = np.zeros_like(vs30)
s_b[np.logical_and(vs30 >= 360., vs30 < 800.)] = 1.0
s_c[np.logical_and(vs30 >= 180., vs30 < 360.)] = 1.0
s_d[vs30 < 180] = 1.0
return s_b, s_c, s_d | Returns the Eurocode 8 site class dummy variable |
def store(self, installed_stuff, metadata, interpreter, options):
new_content = {
'timestamp': int(time.mktime(time.localtime())),
'installed': installed_stuff,
'metadata': metadata,
'interpreter': interpreter,
'options': options
}
logger.debug("Storing installed=%s metadata=%s interpreter=%s options=%s",
installed_stuff, metadata, interpreter, options)
with filelock(self.lockpath):
self._write_cache([json.dumps(new_content)], append=True) | Store the virtualenv metadata for the indicated installed_stuff. |
def _bin_op(name, doc="binary operator"):
def _(self, other):
jc = other._jc if isinstance(other, Column) else other
njc = getattr(self._jc, name)(jc)
return Column(njc)
_.__doc__ = doc
return _ | Create a method for given binary operator |
def extract_email(text):
result = list()
for tp in re.findall(_regex_extract_email, text.lower()):
for email in tp:
if re.match(_regex_validate_email, email):
result.append(email)
return result | Extract email from text. |
def getAnalysisCategories(self):
bsc = api.get_tool("bika_setup_catalog")
cats = []
for st in bsc(portal_type="AnalysisCategory",
is_active=True,
sort_on="sortable_title"):
cats.append((st.UID, st.Title))
return DisplayList(cats) | Return all available analysis categories |
def _parse_plan(self, match):
expected_tests = int(match.group("expected"))
directive = Directive(match.group("directive"))
if directive.text and not directive.skip:
return Unknown()
return Plan(expected_tests, directive) | Parse a matching plan line. |
def adev(self, tau0, tau):
prefactor = self.adev_from_qd(tau0=tau0, tau=tau)
c = self.c_avar()
avar = pow(prefactor, 2)*pow(tau, c)
return np.sqrt(avar) | return predicted ADEV of noise-type at given tau |
def hashes_above(path, line_number):
def hash_lists(path):
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match:
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes
hashes = []
elif PIP_COUNTS_COMMENTS:
yield []
return next(islice(hash_lists(path), line_number - 1, None)) | Yield hashes from contiguous comment lines before line ``line_number``. |
def IsFleetspeakEnabledClient(grr_id, token=None):
if grr_id is None:
return False
if data_store.RelationalDBEnabled():
md = data_store.REL_DB.ReadClientMetadata(grr_id)
if not md:
return False
return md.fleetspeak_enabled
else:
with aff4.FACTORY.Create(
rdf_client.ClientURN(grr_id),
aff4.AFF4Object.classes["VFSGRRClient"],
mode="r",
token=token) as client:
return bool(client.Get(client.Schema.FLEETSPEAK_ENABLED)) | Returns whether the provided GRR id is a Fleetspeak client. |
def processLibraryDetails(details):
for includeDir in details.includeDirs:
for pattern in CUSTOM_FLAGS_FOR_INCLUDE_DIRS:
if pattern in includeDir:
flag = '-D' + CUSTOM_FLAGS_FOR_INCLUDE_DIRS[pattern] + '=' + includeDir
details.cmakeFlags.append(flag)
for lib in details.libs:
filename = os.path.basename(lib)
(name, ext) = os.path.splitext(filename)
libName = name.replace('lib', '') if name.startswith('lib') else name
libName = libName.rstrip('_-1234567890')
if libName in CUSTOM_FLAGS_FOR_LIBS:
flag = '-D' + CUSTOM_FLAGS_FOR_LIBS[libName] + '=' + lib
details.cmakeFlags.append(flag) | Processes the supplied ThirdPartyLibraryDetails instance and sets any custom CMake flags |
def edge_val_set(self, graph, orig, dest, idx, key, branch, turn, tick, value):
if (branch, turn, tick) in self._btts:
raise TimeError
self._btts.add((branch, turn, tick))
graph, orig, dest, key, value = map(self.pack, (graph, orig, dest, key, value))
self._edgevals2set.append(
(graph, orig, dest, idx, key, branch, turn, tick, value)
) | Set this key of this edge to this value. |
def _enum_attached_rows(self, indices):
records = self._records
i = 0
for i, line in self._enum_lines():
if i in indices:
row = records[i]
if row is None:
row = decode_row(line)
yield (i, row)
for j in range(i, len(records)):
if j in indices:
if records[j] is not None:
yield (j, records[j]) | Enumerate on-disk and in-memory records. |
def __findRange(self, excelLib, start, end):
inc = 1
low = 0
high = 0
dates = excelLib.readCol(0, 1)
for index, date in enumerate(dates):
if int(start) <= int(date):
low = index + inc
break
if low:
for index, date in reversed(list(enumerate(dates))):
if int(date) <= int(end):
high = index + inc
break
return low, high | return low and high as excel range |
def _read_header(self):
self._header = self.cdmrf.fetch_header()
self.load_from_stream(self._header) | Get the needed header information to initialize dataset. |
def entity_from_snapshot(snapshot):
assert isinstance(snapshot, AbstractSnapshop), type(snapshot)
if snapshot.state is not None:
entity_class = resolve_topic(snapshot.topic)
return reconstruct_object(entity_class, snapshot.state) | Reconstructs domain entity from given snapshot. |
def press(button=LEFT):
location = get_position()
button_code, button_down, _, _ = _button_mapping[button]
e = Quartz.CGEventCreateMouseEvent(
None,
button_down,
location,
button_code)
if _last_click["time"] is not None and datetime.datetime.now() - _last_click["time"] < datetime.timedelta(seconds=0.3) and _last_click["button"] == button and _last_click["position"] == location:
_last_click["click_count"] = min(3, _last_click["click_count"]+1)
else:
_last_click["click_count"] = 1
Quartz.CGEventSetIntegerValueField(
e,
Quartz.kCGMouseEventClickState,
_last_click["click_count"])
Quartz.CGEventPost(Quartz.kCGHIDEventTap, e)
_button_state[button] = True
_last_click["time"] = datetime.datetime.now()
_last_click["button"] = button
_last_click["position"] = location | Sends a down event for the specified button, using the provided constants |
def _context_source_file_url(path_or_url):
if path_or_url.startswith('http'):
return path_or_url
if path_or_url.startswith('/'):
return "file://" + path_or_url
return "file://" + os.path.join(os.path.realpath(os.getcwd()), path_or_url) | Returns a URL for a remote or local context CSV file |
def _should_allocate_port(pid):
if pid <= 0:
log.info('Not allocating a port to invalid pid')
return False
if pid == 1:
log.info('Not allocating a port to init.')
return False
try:
os.kill(pid, 0)
except ProcessLookupError:
log.info('Not allocating a port to a non-existent process')
return False
return True | Determine if we should allocate a port for use by the given process id. |
def _add_metadata_as_attrs_da(data, units, description, dtype_out_vert):
if dtype_out_vert == 'vert_int':
if units != '':
units = '(vertical integral of {0}): {0} kg m^-2)'.format(units)
else:
units = '(vertical integral of quantity with unspecified units)'
data.attrs['units'] = units
data.attrs['description'] = description
return data | Add metadata attributes to DataArray |
def __normalize_list(self, msg):
if isinstance(msg, list):
msg = "".join(msg)
return list(map(lambda x: x.strip(), msg.split(","))) | Split message to list by commas and trim whitespace. |
def update_datasources_cache():
from superset.models.core import Database
for database in db.session.query(Database).all():
if database.allow_multi_schema_metadata_fetch:
print('Fetching {} datasources ...'.format(database.name))
try:
database.all_table_names_in_database(
force=True, cache=True, cache_timeout=24 * 60 * 60)
database.all_view_names_in_database(
force=True, cache=True, cache_timeout=24 * 60 * 60)
except Exception as e:
print('{}'.format(str(e))) | Refresh sqllab datasources cache |
async def eval(self, text, opts=None):
async for pode in self.cell.iterStormPodes(text, opts=opts, user=self.user):
yield pode | Evalute a storm query and yield packed nodes. |
def use_federated_repository_view(self):
self._repository_view = FEDERATED
for session in self._get_provider_sessions():
try:
session.use_federated_repository_view()
except AttributeError:
pass | Pass through to provider AssetLookupSession.use_federated_repository_view |
def addPos(self, dp_x=None, dy=None, dz=None):
p = np.array(self.GetPosition())
if dz is None:
self.SetPosition(p + dp_x)
else:
self.SetPosition(p + [dp_x, dy, dz])
if self.trail:
self.updateTrail()
return self | Add vector to current actor position. |
def statsId(obj):
if hasattr(obj, ID_KEY):
return getattr(obj, ID_KEY)
newId = next(NEXT_ID)
setattr(obj, ID_KEY, newId)
return newId | Gets a unique ID for each object. |
def forum_topic_get_by_tag_for_user(self, tag=None, author=None):
if not tag:
return None
if author:
r = self._request('ebuio/forum/search/bytag/' + tag + '?u=' + author)
else:
r = self._request('ebuio/forum/search/bytag/' + tag)
if not r:
return None
retour = []
for data in r.json().get('data', []):
retour.append(data)
return retour | Get all forum topics with a specific tag |
def convert_reaction_entry(self, reaction):
d = OrderedDict()
d['id'] = reaction.id
def is_equation_valid(equation):
return (equation is not None and (
not isinstance(equation, Reaction) or
len(equation.compounds) > 0))
order = {
key: i for i, key in enumerate(
['name', 'genes', 'equation', 'subsystem', 'ec'])}
prop_keys = (set(reaction.properties) -
{'lower_flux', 'upper_flux', 'reversible'})
for prop in sorted(prop_keys, key=lambda x: (order.get(x, 1000), x)):
if reaction.properties[prop] is None:
continue
d[prop] = reaction.properties[prop]
if prop == 'equation' and not is_equation_valid(d[prop]):
del d[prop]
return d | Convert reaction entry to YAML dict. |
def check_apm_out(self):
now = time.time()
if now - self.last_apm_send_time < 0.02:
return
self.last_apm_send_time = now
if self.hil_state_msg is not None:
self.master.mav.send(self.hil_state_msg) | check if we should send new data to the APM |
def plos_doi_to_xmlurl(doi_string):
doi_url = 'http://dx.doi.org/{0}'.format(doi_string)
log.debug('DOI URL: {0}'.format(doi_url))
try:
resolved_page = urllib.request.urlopen(doi_url)
except urllib.error.URLError as err:
print('Unable to resolve DOI URL, or could not connect')
raise err
else:
resolved_address = resolved_page.geturl()
log.debug('DOI resolved to {0}'.format(resolved_address))
parsed = urllib.parse.urlparse(resolved_address)
xml_url = '{0}://{1}'.format(parsed.scheme, parsed.netloc)
xml_url += '/article/fetchObjectAttachment.action?uri='
xml_path = parsed.path.replace(':', '%3A').replace('/', '%2F')
xml_path = xml_path.split('article%2F')[1]
xml_url += '{0}{1}'.format(xml_path, '&representation=XML')
log.debug('Shaped PLoS request for XML {0}'.format(xml_url))
return xml_url | Attempts to resolve a PLoS DOI into a URL path to the XML file. |
def in_query(expression):
def _in(index, expression=expression):
ev = expression() if callable(expression) else expression
try:
iter(ev)
except TypeError:
raise AttributeError('$in argument must be an iterable!')
hashed_ev = [index.get_hash_for(v) for v in ev]
store_keys = set()
for value in hashed_ev:
store_keys |= set(index.get_keys_for(value))
return list(store_keys)
return _in | Match any of the values that exist in an array specified in query. |
def add_vertex_buffer(self, material, vertex_format, byte_offset, byte_length):
self._vertex_buffers.append({
"material": material,
"vertex_format": vertex_format,
"byte_offset": byte_offset,
"byte_length": byte_length,
}) | Add a vertex buffer |
def stress(syllabified_simplex_word):
syllables = syllabified_simplex_word.split('.')
stressed = '\'' + syllables[0]
try:
n = 0
medial = syllables[1:-1]
for i, syll in enumerate(medial):
if (i + n) % 2 == 0:
stressed += '.' + syll
else:
try:
if is_light(syll) and is_heavy(medial[i + 1]):
stressed += '.' + syll
n += 1
continue
except IndexError:
pass
stressed += '.`' + syll
except IndexError:
pass
if len(syllables) > 1:
stressed += '.' + syllables[-1]
return stressed | Assign primary and secondary stress to 'syllabified_simplex_word'. |
def manager(self, **kwargs):
return PreferencesManager(registry=self, model=self.preference_model, **kwargs) | Return a preference manager that can be used to retrieve preference values |
def make_export(self, exports):
sql = 'drop table if exists export'
logging.debug(sql)
self.cursor.execute(sql)
sql = 'create table if not exists export ' \
'(func text unique, module text)'
logging.debug(sql)
self.cursor.execute(sql)
for module in exports:
logging.debug(_('insering exports from %s'), module)
sql = 'insert into export values (?, ?)'
for func in exports[module]:
if func:
try:
self.cursor.execute(sql, (func, module))
except sqlite3.IntegrityError:
pass
self.con.commit() | Populate library exported function data. |
def AddAttributePath(self, **_):
attribute_path = self.current_expression.attribute
if not attribute_path:
attribute_path = []
attribute_path.append(self.string)
self.current_expression.SetAttribute(attribute_path) | Adds a path component to the current attribute. |
def branch_inlet_outlet(data, commdct, branchname):
objkey = 'Branch'.upper()
theobjects = data.dt[objkey]
theobject = [obj for obj in theobjects if obj[1] == branchname]
theobject = theobject[0]
inletindex = 6
outletindex = len(theobject) - 2
return [theobject[inletindex], theobject[outletindex]] | return the inlet and outlet of a branch |
def remove_empty_dir(path):
try:
if not os.path.isdir(path):
return
files = os.listdir(path)
if len(files) == 0:
os.rmdir(path)
elif len(files) > 0:
for f in files:
abspath = os.path.join(path, f)
if os.path.isdir(abspath):
remove_empty_dir(abspath)
except OSError as e:
if e.errno == errno.ENOTEMPTY:
pass | Function to remove empty folders |
def lookup_ids(handles):
ids = set()
for handle_list in [handles[100 * i:100 * i + 100] for i in range(len(handles))]:
if len(handle_list) > 0:
while True:
r = twapi.request('users/lookup', {'screen_name': ','.join(handle_list)})
if r.status_code in [88, 130, 420, 429]:
sys.stderr.write('Sleeping off rate limit for %s: %s\n' % (str(handle_list), r.text))
time.sleep(300)
elif r.status_code == 200:
for item in r.get_iterator():
ids.add(item['id_str'])
break
else:
sys.stderr.write('Error: %s\nSkipping %s...\n' % (str(handle_list), r.text))
break
return ids | Fetch the twitter ids of each screen_name. |
def connection(self):
try:
con = self.thread.connection
except AttributeError:
con = self.steady_connection()
self.thread.connection = con
return con | Get a steady, persistent PyGreSQL connection. |
def _authenticate():
global url, port, ticket, csrf, verify_ssl
url = config.get_cloud_config_value(
'url', get_configured_provider(), __opts__, search_global=False
)
port = config.get_cloud_config_value(
'port', get_configured_provider(), __opts__,
default=8006, search_global=False
)
username = config.get_cloud_config_value(
'user', get_configured_provider(), __opts__, search_global=False
),
passwd = config.get_cloud_config_value(
'password', get_configured_provider(), __opts__, search_global=False
)
verify_ssl = config.get_cloud_config_value(
'verify_ssl', get_configured_provider(), __opts__,
default=True, search_global=False
)
connect_data = {'username': username, 'password': passwd}
full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port)
returned_data = requests.post(
full_url, verify=verify_ssl, data=connect_data).json()
ticket = {'PVEAuthCookie': returned_data['data']['ticket']}
csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) | Retrieve CSRF and API tickets for the Proxmox API |
def update_datatype(self, datatype, w=None, dw=None, pw=None,
return_body=None, timeout=None, include_context=None):
raise NotImplementedError | Updates a Riak Datatype by sending local operations to the server. |
def create_toggle_view_action(self):
title = self.get_plugin_title()
if self.CONF_SECTION == 'editor':
title = _('Editor')
if self.shortcut is not None:
action = create_action(self, title,
toggled=lambda checked: self.toggle_view(checked),
shortcut=QKeySequence(self.shortcut),
context=Qt.WidgetShortcut)
else:
action = create_action(self, title, toggled=lambda checked:
self.toggle_view(checked))
self.toggle_view_action = action | Associate a toggle view action with each plugin |
def _get_spades_circular_nodes(self, fastg):
seq_reader = pyfastaq.sequences.file_reader(fastg)
names = set([x.id.rstrip(';') for x in seq_reader if ':' in x.id])
found_fwd = set()
found_rev = set()
for name in names:
l = name.split(':')
if len(l) != 2:
continue
if l[0] == l[1]:
if l[0][-1] == "'":
found_rev.add(l[0][:-1])
else:
found_fwd.add(l[0])
return found_fwd.intersection(found_rev) | Returns set of names of nodes in SPAdes fastg file that are circular. Names will match those in spades fasta file |
def cat_acc(y, z):
weights = _cat_sample_weights(y)
_acc = K.cast(K.equal(K.argmax(y, axis=-1),
K.argmax(z, axis=-1)),
K.floatx())
_acc = K.sum(_acc * weights) / K.sum(weights)
return _acc | Classification accuracy for multi-categorical case |
def markdown_2_rst(lines):
out = []
code = False
for line in lines:
if line.strip() == "```":
code = not code
space = " " * (len(line.rstrip()) - 3)
if code:
out.append("\n\n%s.. code-block:: none\n\n" % space)
else:
out.append("\n")
else:
if code and line.strip():
line = " " + line
else:
line = line.replace("\\", "\\\\")
out.append(line)
return out | Convert markdown to restructured text |
def _construct_X_M(self, omega, **kwargs):
X = self._construct_X(omega, weighted=True, **kwargs)
M = np.dot(X.T, X)
if getattr(self, 'regularization', None) is not None:
diag = M.ravel(order='K')[::M.shape[0] + 1]
if self.regularize_by_trace:
diag += diag.sum() * np.asarray(self.regularization)
else:
diag += np.asarray(self.regularization)
return X, M | Construct the weighted normal matrix of the problem |
def bounds_overlap(bound1, bound2):
(x1,y1,w1,h1) = bound1
(x2,y2,w2,h2) = bound2
if x1+w1 < x2:
return False
if x2+w2 < x1:
return False
if y1+h1 < y2:
return False
if y2+h2 < y1:
return False
return True | return true if two bounding boxes overlap |
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
if _enclosing_tpu_context() is None:
if hasattr(self._primary_var, '_dense_var_to_tensor'):
return self._primary_var._dense_var_to_tensor(dtype, name, as_ref)
else:
return ops.convert_to_tensor(self._primary_var)
if dtype is not None and dtype != self.dtype:
return NotImplemented
if as_ref:
return self.handle
else:
return self.read_value() | Converts a variable to a tensor. |
def flip(f):
ensure_callable(f)
result = lambda *args, **kwargs: f(*reversed(args), **kwargs)
functools.update_wrapper(result, f, ('__name__', '__module__'))
return result | Flip the order of positonal arguments of given function. |
def from_git_rev_read(path):
if ":" not in path:
raise ValueError("Path identifier must start with a revision hash.")
cmd = "git", "show", "-t", path
try:
return subprocess.check_output(cmd).rstrip().decode("utf-8")
except subprocess.CalledProcessError:
raise ValueError | Retrieve given file path contents of certain Git revision. |
def _make_function(instr, queue, stack, body, context):
assert stack, "Empty stack before MAKE_FUNCTION."
prev = stack[-1]
expect(prev, instrs.LOAD_CONST, "before MAKE_FUNCTION")
stack.append(instr)
if is_lambda_name(prev.arg):
return
return context.update(
make_function_context=MakeFunctionContext(
closure=isinstance(instr, instrs.MAKE_CLOSURE),
)
) | Set a make_function_context, then push onto the stack. |
def to_dict(self):
d = {'sequence': self.sequence,
'targetComponent': self.target_component.to_dict()}
props = []
for name in self.properties:
p = {'name': name}
if self.properties[name]:
p['value'] = str(self.properties[name])
props.append(p)
if props:
d[RTS_EXT_NS_YAML + 'properties'] = props
return d | Save this condition into a dictionary. |
def update_record(self, zeroconf, now, record):
if record is not None and not record.is_expired(now):
if record.type == _TYPE_A:
if record.name == self.name:
if not record.address in self.address:
self.address.append(record.address)
elif record.type == _TYPE_SRV:
if record.name == self.name:
self.server = record.server
self.port = record.port
self.weight = record.weight
self.priority = record.priority
self.address = []
self.update_record(zeroconf, now,
zeroconf.cache.get_by_details(self.server,
_TYPE_A, _CLASS_IN))
elif record.type == _TYPE_TXT:
if record.name == self.name:
self.set_text(record.text) | Updates service information from a DNS record |
def package_version(self):
vbase = self.base_version
if self.ncommits:
vbase += '.dev{0}+{1}'.format(self.ncommits, self.sha)
return vbase | Returns the well formed PEP-440 version |
def convert(self, value, view):
if isinstance(value, BASESTRING):
if self.pattern and not self.regex.match(value):
self.fail(
u"must match the pattern {0}".format(self.pattern),
view
)
return value
else:
self.fail(u'must be a string', view, True) | Check that the value is a string and matches the pattern. |
def authenticate(self):
if self.__token:
try:
resp = self._refresh_token()
except exceptions.TVDBRequestException as err:
if getattr(err.response, 'status_code', 0) == 401:
resp = self._login()
else:
raise
else:
resp = self._login()
self.__token = resp.get('token')
self._token_timer = timeutil.utcnow() | Aquire authorization token for using thetvdb apis. |
def init_sources(path):
for f in dir_list(path):
if(os.path.splitext(f)[1][1:] == config.source_ext):
print "Source file discovered: %s" % (f)
script = Script(f)
if (script.filename not in config.sources.keys()):
config.sources[script.path] = script
parse.parse_dependencies(script,script) | initializes array of groups and their associated js files |
def to_datetime(value):
if value is None:
return None
if isinstance(value, six.integer_types):
return parser.parse(value)
return parser.isoparse(value) | Converts a string to a datetime. |
def register_segment_dcnm(self, cfg, seg_id_min, seg_id_max):
orch_id = cfg.dcnm.orchestrator_id
try:
segid_range = self.dcnm_client.get_segmentid_range(orch_id)
if segid_range is None:
self.dcnm_client.set_segmentid_range(orch_id, seg_id_min,
seg_id_max)
else:
conf_min, _, conf_max = segid_range[
"segmentIdRanges"].partition("-")
if int(conf_min) != seg_id_min or int(conf_max) != seg_id_max:
self.dcnm_client.update_segmentid_range(orch_id,
seg_id_min,
seg_id_max)
except dexc.DfaClientRequestFailed as exc:
LOG.error("Segment ID range could not be created/updated"
" on DCNM: %s", exc)
raise SystemExit(exc) | Register segmentation id pool with DCNM. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.