code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def hitail(E: np.ndarray, diffnumflux: np.ndarray, isimE0: np.ndarray, E0: np.ndarray,
Bhf: np.ndarray, bh: float, verbose: int = 0):
Bh = np.empty_like(E0)
for iE0 in np.arange(E0.size):
Bh[iE0] = Bhf[iE0]*diffnumflux[isimE0[iE0], iE0]
het = Bh*(E[:, None] / E0)**-bh
het[E[:, None] < E0] = 0.
if verbose > 0:
print('Bh: ' + (' '.join('{:0.1f}'.format(b) for b in Bh)))
return het | strickland 1993 said 0.2, but 0.145 gives better match to peak flux at 2500 = E0 |
def update_vrf_table_links(self, vrf_table, new_imp_rts,
removed_imp_rts):
assert vrf_table
if new_imp_rts:
self._link_vrf_table(vrf_table, new_imp_rts)
if removed_imp_rts:
self._remove_links_to_vrf_table_for_rts(vrf_table,
removed_imp_rts) | Update mapping from RT to VRF table. |
def dtdEntity(self, name):
ret = libxml2mod.xmlGetDtdEntity(self._o, name)
if ret is None:raise treeError('xmlGetDtdEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp | Do an entity lookup in the DTD entity hash table and |
def get_average_voltage(self, min_voltage=None, max_voltage=None):
pairs_in_range = self._select_in_voltage_range(min_voltage,
max_voltage)
if len(pairs_in_range) == 0:
return 0
total_cap_in_range = sum([p.mAh for p in pairs_in_range])
total_edens_in_range = sum([p.mAh * p.voltage for p in pairs_in_range])
return total_edens_in_range / total_cap_in_range | Average voltage for path satisfying between a min and max voltage.
Args:
min_voltage (float): The minimum allowable voltage for a given
step.
max_voltage (float): The maximum allowable voltage allowable for a
given step.
Returns:
Average voltage in V across the insertion path (a subset of the
path can be chosen by the optional arguments) |
def mediated_transfer_async(
self,
token_network_identifier: TokenNetworkID,
amount: PaymentAmount,
target: TargetAddress,
identifier: PaymentID,
fee: FeeAmount = MEDIATION_FEE,
secret: Secret = None,
secret_hash: SecretHash = None,
) -> PaymentStatus:
if secret is None:
if secret_hash is None:
secret = random_secret()
else:
secret = EMPTY_SECRET
payment_status = self.start_mediated_transfer_with_secret(
token_network_identifier=token_network_identifier,
amount=amount,
fee=fee,
target=target,
identifier=identifier,
secret=secret,
secret_hash=secret_hash,
)
return payment_status | Transfer `amount` between this node and `target`.
This method will start an asynchronous transfer, the transfer might fail
or succeed depending on a couple of factors:
- Existence of a path that can be used, through the usage of direct
or intermediary channels.
- Network speed, making the transfer sufficiently fast so it doesn't
expire. |
def write_events(self, outname):
self.make_output_dir(outname)
if '.hdf' in outname:
self.write_to_hdf(outname)
else:
raise ValueError('Cannot write to this format') | Write the found events to a sngl inspiral table |
def declare_string(self, value):
byte_s = BytesIO(str(value).encode(ENCODING))
data_file = self.research_object.add_data_file(byte_s, content_type=TEXT_PLAIN)
checksum = posixpath.basename(data_file)
data_id = "data:%s" % posixpath.split(data_file)[1]
entity = self.document.entity(
data_id, {provM.PROV_TYPE: WFPROV["Artifact"],
provM.PROV_VALUE: str(value)})
return entity, checksum | Save as string in UTF-8. |
def match_path(rule, path):
split_rule = split_by_slash(rule)
split_path = split_by_slash(path)
url_vars = {}
if len(split_rule) != len(split_path):
return False, {}
for r, p in zip(split_rule, split_path):
if r.startswith('{') and r.endswith('}'):
url_vars[r[1:-1]] = p
continue
if r != p:
return False, {}
return True, url_vars | Match path.
>>> match_path('/foo', '/foo')
(True, {})
>>> match_path('/foo', '/bar')
(False, {})
>>> match_path('/users/{user_id}', '/users/1')
(True, {'user_id': '1'})
>>> match_path('/users/{user_id}', '/users/not-integer')
(True, {'user_id': 'not-integer'}) |
def decode_nibbles(value):
nibbles_with_flag = bytes_to_nibbles(value)
flag = nibbles_with_flag[0]
needs_terminator = flag in {HP_FLAG_2, HP_FLAG_2 + 1}
is_odd_length = flag in {HP_FLAG_0 + 1, HP_FLAG_2 + 1}
if is_odd_length:
raw_nibbles = nibbles_with_flag[1:]
else:
raw_nibbles = nibbles_with_flag[2:]
if needs_terminator:
nibbles = add_nibbles_terminator(raw_nibbles)
else:
nibbles = raw_nibbles
return nibbles | The inverse of the Hex Prefix function |
def set_edist_powerlaw(self, emin_mev, emax_mev, delta, ne_cc):
if not (emin_mev >= 0):
raise ValueError('must have emin_mev >= 0; got %r' % (emin_mev,))
if not (emax_mev >= emin_mev):
raise ValueError('must have emax_mev >= emin_mev; got %r, %r' % (emax_mev, emin_mev))
if not (delta >= 0):
raise ValueError('must have delta >= 0; got %r, %r' % (delta,))
if not (ne_cc >= 0):
raise ValueError('must have ne_cc >= 0; got %r, %r' % (ne_cc,))
self.in_vals[IN_VAL_EDIST] = EDIST_PLW
self.in_vals[IN_VAL_EMIN] = emin_mev
self.in_vals[IN_VAL_EMAX] = emax_mev
self.in_vals[IN_VAL_DELTA1] = delta
self.in_vals[IN_VAL_NB] = ne_cc
return self | Set the energy distribution function to a power law.
**Call signature**
*emin_mev*
The minimum energy of the distribution, in MeV
*emax_mev*
The maximum energy of the distribution, in MeV
*delta*
The power-law index of the distribution
*ne_cc*
The number density of energetic electrons, in cm^-3.
Returns
*self* for convenience in chaining. |
def get_ts_stats_significance(self, x, ts, stat_ts_func, null_ts_func, B=1000, permute_fast=False, label_ts=''):
stats_ts, pvals, nums = ts_stats_significance(
ts, stat_ts_func, null_ts_func, B=B, permute_fast=permute_fast)
return stats_ts, pvals, nums | Returns the statistics, pvalues and the actual number of bootstrap
samples. |
def _wrapper(func, *vect_args, **vect_kwargs):
if not hasattr(func, '__name__'):
func.__name__ = '{}.__call__'.format(func.__class__.__name__)
return wraps(func)(_NumpyVectorizeWrapper(func, *vect_args,
**vect_kwargs)) | Return the vectorized wrapper function. |
def _format_title_string(self, title_string):
if "StreamTitle='" in title_string:
tmp = title_string[title_string.find("StreamTitle='"):].replace("StreamTitle='", self.icy_title_prefix)
ret_string = tmp[:tmp.find("';")]
else:
ret_string = title_string
if '"artist":"' in ret_string:
ret_string = self.icy_title_prefix + ret_string[ret_string.find('"artist":')+10:].replace('","title":"', ' - ').replace('"}\';', '')
return self._title_string_format_text_tag(ret_string) | format mplayer's title |
def register(listener):
if not isinstance(listener, _EventListener):
raise TypeError("Listeners for %s must be either a "
"CommandListener, ServerHeartbeatListener, "
"ServerListener, or TopologyListener." % (listener,))
if isinstance(listener, CommandListener):
_LISTENERS.command_listeners.append(listener)
if isinstance(listener, ServerHeartbeatListener):
_LISTENERS.server_heartbeat_listeners.append(listener)
if isinstance(listener, ServerListener):
_LISTENERS.server_listeners.append(listener)
if isinstance(listener, TopologyListener):
_LISTENERS.topology_listeners.append(listener) | Register a global event listener.
:Parameters:
- `listener`: A subclasses of :class:`CommandListener`,
:class:`ServerHeartbeatListener`, :class:`ServerListener`, or
:class:`TopologyListener`. |
def _batch_entry(self):
try:
while True:
self._batch_entry_run()
except:
self.exc_info = sys.exc_info()
os.kill(self.pid, signal.SIGUSR1) | Entry point for the batcher thread. |
def _untag_sentence(tagged_sentence):
untagged_sentence = TAG_PATT.sub('\\2', tagged_sentence)
clean_sentence = JUNK_PATT.sub('', untagged_sentence)
return clean_sentence.strip() | Removes all tags in the sentence, returning the original sentence
without Medscan annotations.
Parameters
----------
tagged_sentence : str
The tagged sentence
Returns
-------
untagged_sentence : str
Sentence with tags and annotations stripped out |
def srfrec(body, longitude, latitude):
body = ctypes.c_int(body)
longitude = ctypes.c_double(longitude)
latitude = ctypes.c_double(latitude)
rectan = stypes.emptyDoubleVector(3)
libspice.srfrec_c(body, longitude, latitude, rectan)
return stypes.cVectorToPython(rectan) | Convert planetocentric latitude and longitude of a surface
point on a specified body to rectangular coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/srfrec_c.html
:param body: NAIF integer code of an extended body.
:type body: int
:param longitude: Longitude of point in radians.
:type longitude: float
:param latitude: Latitude of point in radians.
:type latitude: float
:return: Rectangular coordinates of the point.
:rtype: 3-Element Array of floats |
def create_box(self, orientation=Gtk.Orientation.HORIZONTAL, spacing=0):
h_box = Gtk.Box(orientation=orientation, spacing=spacing)
h_box.set_homogeneous(False)
return h_box | Function creates box. Based on orientation
it can be either HORIZONTAL or VERTICAL |
def GetCoinAssets(self):
assets = set()
for coin in self.GetCoins():
assets.add(coin.Output.AssetId)
return list(assets) | Get asset ids of all coins present in the wallet.
Returns:
list: of UInt256 asset id's. |
def find_project_by_short_name(short_name, pbclient, all=None):
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise | Return project by short_name. |
def import_dashboards(path, recursive):
p = Path(path)
files = []
if p.is_file():
files.append(p)
elif p.exists() and not recursive:
files.extend(p.glob('*.json'))
elif p.exists() and recursive:
files.extend(p.rglob('*.json'))
for f in files:
logging.info('Importing dashboard from file %s', f)
try:
with f.open() as data_stream:
dashboard_import_export.import_dashboards(
db.session, data_stream)
except Exception as e:
logging.error('Error when importing dashboard from file %s', f)
logging.error(e) | Import dashboards from JSON |
def commit_or_abort(self, ctx, timeout=None, metadata=None,
credentials=None):
return self.stub.CommitOrAbort(ctx, timeout=timeout, metadata=metadata,
credentials=credentials) | Runs commit or abort operation. |
def hook_scope(self, name=""):
assert not self.revision
self.cursor.execute(
'insert into hooks (hook, date) values (?, ?)',
(name or sys.argv[0],
datetime.datetime.utcnow().isoformat()))
self.revision = self.cursor.lastrowid
try:
yield self.revision
self.revision = None
except Exception:
self.flush(False)
self.revision = None
raise
else:
self.flush() | Scope all future interactions to the current hook execution
revision. |
async def _wait(self):
for buid in self.otherbldgbuids:
nodeevnt = self.allbldgbuids.get(buid)
if nodeevnt is None:
continue
await nodeevnt[1].wait() | Wait on the other editatoms who are constructing nodes my new nodes refer to |
def get_service_packages(self):
api = self._get_api(billing.DefaultApi)
package_response = api.get_service_packages()
packages = []
for state in PACKAGE_STATES:
items = getattr(package_response, state) or []
for item in ensure_listable(items):
params = item.to_dict()
params['state'] = state
packages.append(ServicePackage(params))
return packages | Get all service packages |
def database_current_migration(self):
if not self.migration_table.exists(self.session.bind):
return None
if self.migration_data is None:
return None
return self.migration_data.version | Return the current migration in the database. |
def _gcd(a, b):
while b:
a, b = b, (a % b)
return a | Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive). |
def _cast_repr(self, caster, *args, **kwargs):
if self.__repr_content is None:
self.__repr_content = hash_and_truncate(self)
assert self.__uses_default_repr
return caster(self.__repr_content, *args, **kwargs) | Will cast this constant with the provided caster, passing args and kwargs.
If there is no registered representation, will hash the name using sha512 and use the first 8 bytes
of the digest. |
def convertLatLngToPixelXY(self, lat, lng, level):
mapSize = self.getMapDimensionsByZoomLevel(level)
lat = self.clipValue(lat, self.min_lat, self.max_lat)
lng = self.clipValue(lng, self.min_lng, self.max_lng)
x = (lng + 180) / 360
sinlat = math.sin(lat * math.pi / 180)
y = 0.5 - math.log((1 + sinlat) / (1 - sinlat)) / (4 * math.pi)
pixelX = int(self.clipValue(x * mapSize + 0.5, 0, mapSize - 1))
pixelY = int(self.clipValue(y * mapSize + 0.5, 0, mapSize - 1))
return (pixelX, pixelY) | returns the x and y values of the pixel corresponding to a latitude
and longitude. |
def indent(text, amount, ch=' '):
padding = amount * ch
return ''.join(padding+line for line in text.splitlines(True)) | Indents a string by the given amount of characters. |
def _get_svc_path(name='*', status=None):
if not SERVICE_DIR:
raise CommandExecutionError('Could not find service directory.')
ena = set()
for el in glob.glob(os.path.join(SERVICE_DIR, name)):
if _is_svc(el):
ena.add(os.readlink(el))
log.trace('found enabled service path: %s', el)
if status == 'ENABLED':
return sorted(ena)
ava = set()
for d in AVAIL_SVR_DIRS:
for el in glob.glob(os.path.join(d, name)):
if _is_svc(el):
ava.add(el)
log.trace('found available service path: %s', el)
if status == 'DISABLED':
ret = ava.difference(ena)
else:
ret = ava.union(ena)
return sorted(ret) | Return a list of paths to services with ``name`` that have the specified ``status``
name
a glob for service name. default is '*'
status
None : all services (no filter, default choice)
'DISABLED' : available service(s) that is not enabled
'ENABLED' : enabled service (whether started on boot or not) |
def inner(self, x1, x2):
if x1 not in self:
raise LinearSpaceTypeError('`x1` {!r} is not an element of '
'{!r}'.format(x1, self))
if x2 not in self:
raise LinearSpaceTypeError('`x2` {!r} is not an element of '
'{!r}'.format(x2, self))
inner = self._inner(x1, x2)
if self.field is None:
return inner
else:
return self.field.element(self._inner(x1, x2)) | Return the inner product of ``x1`` and ``x2``.
Parameters
----------
x1, x2 : `LinearSpaceElement`
Elements whose inner product to compute.
Returns
-------
inner : `LinearSpace.field` element
Inner product of ``x1`` and ``x2``. |
def delete_from_all_link_group(self, group):
msg = StandardSend(self._address,
COMMAND_DELETE_FROM_ALL_LINK_GROUP_0X02_NONE,
cmd2=group)
self._send_msg(msg) | Delete a device to an All-Link Group. |
def _join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False):
NETSETUP_JOIN_DOMAIN = 0x1
NETSETUP_ACCOUNT_CREATE = 0x2
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20
NETSETUP_JOIN_WITH_NEW_NAME = 0x400
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.JoinDomainOrWorkgroup(
Name=domain, Password=password, UserName=username, AccountOU=account_ou,
FJoinOptions=join_options)[0] | Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``user@domain.tld`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return: |
def close(self):
if not self._closed:
try:
self._cursor.close()
except Exception:
pass
self._closed = True | Close the tough cursor.
It will not complain if you close it more than once. |
def reload_module(self, module_name):
module = self.loaded_modules.get(module_name)
if module:
module.stop(reloading=True)
else:
_log.info("Reload loading new module module '%s'",
module_name)
success = self.load_module(module_name)
if success:
_log.info("Successfully (re)loaded module '%s'.", module_name)
elif module:
_log.error("Unable to reload module '%s', reusing existing.",
module_name)
else:
_log.error("Failed to load module '%s'.", module_name)
return False
self.loaded_modules[module_name].start(reloading=True)
return success | Reloads the specified module without changing its ordering.
1. Calls stop(reloading=True) on the module
2. Reloads the Module object into .loaded_modules
3. Calls start(reloading=True) on the new object
If called with a module name that is not currently loaded, it will load it.
Returns True if the module was successfully reloaded, otherwise False. |
def seek_to_end(self, *partitions):
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(), 'Unassigned partition'
for tp in partitions:
log.debug("Seeking to end of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.LATEST) | Seek to the most recent available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned. |
def get_gene_count_tab(infile,
bc_getter=None):
gene = None
counts = collections.Counter()
for line in infile:
values = line.strip().split("\t")
assert len(values) == 2, "line: %s does not contain 2 columns" % line
read_id, assigned_gene = values
if assigned_gene != gene:
if gene:
yield gene, counts
gene = assigned_gene
counts = collections.defaultdict(collections.Counter)
cell, umi = bc_getter(read_id)
counts[cell][umi] += 1
yield gene, counts | Yields the counts per umi for each gene
bc_getter: method to get umi (plus optionally, cell barcode) from
read, e.g get_umi_read_id or get_umi_tag
TODO: ADD FOLLOWING OPTION
skip_regex: skip genes matching this regex. Useful to ignore
unassigned reads (as per get_bundles class above) |
def sprinkler_reaches_cell(x, y, sx, sy, r):
dx = sx - x
dy = sy - y
return math.sqrt(dx ** 2 + dy ** 2) <= r | Return whether a cell is within the radius of the sprinkler.
x: column index of cell
y: row index of cell
sx: column index of sprinkler
sy: row index of sprinkler
r: sprinkler radius |
def selected_purpose(self):
item = self.lstCategories.currentItem()
try:
return definition(item.data(QtCore.Qt.UserRole))
except (AttributeError, NameError):
return None | Obtain the layer purpose selected by user.
:returns: Metadata of the selected layer purpose.
:rtype: dict, None |
def get_current_user(with_domain=True):
try:
user_name = win32api.GetUserNameEx(win32api.NameSamCompatible)
if user_name[-1] == '$':
test_user = win32api.GetUserName()
if test_user == 'SYSTEM':
user_name = 'SYSTEM'
elif get_sid_from_name(test_user) == 'S-1-5-18':
user_name = 'SYSTEM'
elif not with_domain:
user_name = win32api.GetUserName()
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to get current user: {0}'.format(exc))
if not user_name:
return False
return user_name | Gets the user executing the process
Args:
with_domain (bool):
``True`` will prepend the user name with the machine name or domain
separated by a backslash
Returns:
str: The user name |
def _checkAndConvertIndex(self, index):
if index < 0:
index = len(self) + index
if index < 0 or index >= self._doc.blockCount():
raise IndexError('Invalid block index', index)
return index | Check integer index, convert from less than zero notation |
def audio_open(path, backends=None):
if backends is None:
backends = available_backends()
for BackendClass in backends:
try:
return BackendClass(path)
except DecodeError:
pass
raise NoBackendError() | Open an audio file using a library that is available on this
system.
The optional `backends` parameter can be a list of audio file
classes to try opening the file with. If it is not provided,
`audio_open` tries all available backends. If you call this function
many times, you can avoid the cost of checking for available
backends every time by calling `available_backends` once and passing
the result to each `audio_open` call.
If all backends fail to read the file, a NoBackendError exception is
raised. |
def kind(self):
with self._mutex:
kind = self._obj.get_kind()
if kind == RTC.PERIODIC:
return self.PERIODIC
elif kind == RTC.EVENT_DRIVEN:
return self.EVENT_DRIVEN
else:
return self.OTHER | The kind of this execution context. |
def get_m2m_widget(cls, field):
return functools.partial(
widgets.ManyToManyWidget,
model=get_related_model(field)) | Prepare widget for m2m field |
def _product_filter(products) -> str:
_filter = 0
for product in {PRODUCTS[p] for p in products}:
_filter += product
return format(_filter, "b")[::-1] | Calculate the product filter. |
def collect_results(self) -> Optional[Tuple[int, Dict[str, float]]]:
self.wait_to_finish()
if self.decoder_metric_queue.empty():
if self._results_pending:
self._any_process_died = True
self._results_pending = False
return None
decoded_checkpoint, decoder_metrics = self.decoder_metric_queue.get()
assert self.decoder_metric_queue.empty()
self._results_pending = False
logger.info("Decoder-%d finished: %s", decoded_checkpoint, decoder_metrics)
return decoded_checkpoint, decoder_metrics | Returns the decoded checkpoint and the decoder metrics or None if the queue is empty. |
def generate_gap_bed(fname, outname):
f = Fasta(fname)
with open(outname, "w") as bed:
for chrom in f.keys():
for m in re.finditer(r'N+', f[chrom][:].seq):
bed.write("{}\t{}\t{}\n".format(chrom, m.start(0), m.end(0))) | Generate a BED file with gap locations.
Parameters
----------
fname : str
Filename of input FASTA file.
outname : str
Filename of output BED file. |
def zoom_fit(self):
zoom = self.grid.grid_renderer.zoom
grid_width, grid_height = self.grid.GetSize()
rows_height = self._get_rows_height() + \
(float(self.grid.GetColLabelSize()) / zoom)
cols_width = self._get_cols_width() + \
(float(self.grid.GetRowLabelSize()) / zoom)
zoom_height = float(grid_height) / rows_height
zoom_width = float(grid_width) / cols_width
target_zoom = min(zoom_height, zoom_width)
if config["minimum_zoom"] < target_zoom < config["maximum_zoom"]:
self.zoom(target_zoom) | Zooms the rid to fit the window.
Only has an effect if the resulting zoom level is between
minimum and maximum zoom level. |
def total_power(self):
power = self.average_current * self.voltage
return round(power, self.sr) | Total power used. |
def get_group_id(name, vpc_id=None, vpc_name=None, region=None, key=None,
keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if name.startswith('sg-'):
log.debug('group %s is a group id. get_group_id not called.', name)
return name
group = _get_group(conn=conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
region=region, key=key, keyid=keyid, profile=profile)
return getattr(group, 'id', None) | Get a Group ID given a Group Name or Group Name and VPC ID
CLI example::
salt myminion boto_secgroup.get_group_id mysecgroup |
def _rewrite_f(self, q):
if isinstance(q, models.F):
q.name = rewrite_lookup_key(self.model, q.name)
return q
if isinstance(q, Node):
q.children = list(map(self._rewrite_f, q.children))
if hasattr(q, 'lhs'):
q.lhs = self._rewrite_f(q.lhs)
if hasattr(q, 'rhs'):
q.rhs = self._rewrite_f(q.rhs)
return q | Rewrite field names inside F call. |
def squared_error(eval_data, predictions, scores='ignored', learner='ignored'):
return [np.sum((np.array(pred) - np.array(inst.output)) ** 2)
for inst, pred in zip(eval_data, predictions)] | Return the squared error of each prediction in `predictions` with respect
to the correct output in `eval_data`.
>>> data = [Instance('input', (0., 0., 1.)),
... Instance('input', (0., 1., 1.)),
... Instance('input', (1., 0., 0.))]
>>> squared_error(data, [(0., 1., 1.), (0., 1., 1.), (-1., 1., 0.)])
[1.0, 0.0, 5.0] |
def update(self, report: str = None) -> bool:
if report is not None:
self.raw = report
else:
raw = self.service.fetch(self.station)
if raw == self.raw:
return False
self.raw = raw
self.data, self.units = metar.parse(self.station, self.raw)
self.translations = translate.metar(self.data, self.units)
self.last_updated = datetime.utcnow()
return True | Updates raw, data, and translations by fetching and parsing the METAR report
Returns True is a new report is available, else False |
def btc_tx_script_to_asm( script_hex ):
if len(script_hex) == 0:
return ""
try:
script_array = btc_script_deserialize(script_hex)
except:
log.error("Failed to convert '%s' to assembler" % script_hex)
raise
script_tokens = []
for token in script_array:
if token is None:
token = 0
token_name = None
if type(token) in [int,long]:
token_name = OPCODE_NAMES.get(token, None)
if token_name is None:
token_name = str(token)
else:
token_name = token
script_tokens.append(token_name)
return " ".join(script_tokens) | Decode a script into assembler |
def not_empty(value,
allow_empty = False,
**kwargs):
if not value and allow_empty:
return None
elif not value:
raise errors.EmptyValueError('value was empty')
return value | Validate that ``value`` is not empty.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` |
def _get_pgtiou(pgt):
pgtIou = None
retries_left = 5
if not settings.CAS_PGT_FETCH_WAIT:
retries_left = 1
while not pgtIou and retries_left:
try:
return PgtIOU.objects.get(tgt=pgt)
except PgtIOU.DoesNotExist:
if settings.CAS_PGT_FETCH_WAIT:
time.sleep(1)
retries_left -= 1
logger.info('Did not fetch ticket, trying again. {tries} tries left.'.format(
tries=retries_left
))
raise CasTicketException("Could not find pgtIou for pgt %s" % pgt) | Returns a PgtIOU object given a pgt.
The PgtIOU (tgt) is set by the CAS server in a different request
that has completed before this call, however, it may not be found in
the database by this calling thread, hence the attempt to get the
ticket is retried for up to 5 seconds. This should be handled some
better way.
Users can opt out of this waiting period by setting CAS_PGT_FETCH_WAIT = False
:param: pgt |
def FindProxies():
sc = objc.SystemConfiguration()
settings = sc.dll.SCDynamicStoreCopyProxies(None)
if not settings:
return []
try:
cf_http_enabled = sc.CFDictRetrieve(settings, "kSCPropNetProxiesHTTPEnable")
if cf_http_enabled and bool(sc.CFNumToInt32(cf_http_enabled)):
cfproxy = sc.CFDictRetrieve(settings, "kSCPropNetProxiesHTTPProxy")
cfport = sc.CFDictRetrieve(settings, "kSCPropNetProxiesHTTPPort")
if cfproxy and cfport:
proxy = sc.CFStringToPystring(cfproxy)
port = sc.CFNumToInt32(cfport)
return ["http://%s:%d/" % (proxy, port)]
cf_auto_enabled = sc.CFDictRetrieve(
settings, "kSCPropNetProxiesProxyAutoConfigEnable")
if cf_auto_enabled and bool(sc.CFNumToInt32(cf_auto_enabled)):
cfurl = sc.CFDictRetrieve(settings,
"kSCPropNetProxiesProxyAutoConfigURLString")
if cfurl:
unused_url = sc.CFStringToPystring(cfurl)
return []
finally:
sc.dll.CFRelease(settings)
return [] | This reads the OSX system configuration and gets the proxies. |
def passphrase_file(passphrase=None):
cmd = []
pass_file = None
if not passphrase and 'CRYPTORITO_PASSPHRASE_FILE' in os.environ:
pass_file = os.environ['CRYPTORITO_PASSPHRASE_FILE']
if not os.path.isfile(pass_file):
raise CryptoritoError('CRYPTORITO_PASSPHRASE_FILE is invalid')
elif passphrase:
tmpdir = ensure_tmpdir()
pass_file = "%s/p_pass" % tmpdir
p_handle = open(pass_file, 'w')
p_handle.write(passphrase)
p_handle.close()
if pass_file:
cmd = cmd + ["--batch", "--passphrase-file", pass_file]
vsn = gpg_version()
if vsn[0] >= 2 and vsn[1] >= 1:
cmd = cmd + ["--pinentry-mode", "loopback"]
return cmd | Read passphrase from a file. This should only ever be
used by our built in integration tests. At this time,
during normal operation, only pinentry is supported for
entry of passwords. |
def associate_psds_to_single_ifo_segments(opt, fd_segments, gwstrain, flen,
delta_f, flow, ifo,
dyn_range_factor=1., precision=None):
single_det_opt = copy_opts_for_single_ifo(opt, ifo)
associate_psds_to_segments(single_det_opt, fd_segments, gwstrain, flen,
delta_f, flow, dyn_range_factor=dyn_range_factor,
precision=precision) | Associate PSDs to segments for a single ifo when using the multi-detector
CLI |
def make_gating_node(workflow, datafind_files, outdir=None, tags=None):
cp = workflow.cp
if tags is None:
tags = []
condition_strain_class = select_generic_executable(workflow,
"condition_strain")
condition_strain_nodes = []
condition_strain_outs = FileList([])
for ifo in workflow.ifos:
input_files = FileList([datafind_file for datafind_file in \
datafind_files if datafind_file.ifo == ifo])
condition_strain_jobs = condition_strain_class(cp, "condition_strain",
ifo=ifo, out_dir=outdir, tags=tags)
condition_strain_node, condition_strain_out = \
condition_strain_jobs.create_node(input_files, tags=tags)
condition_strain_nodes.append(condition_strain_node)
condition_strain_outs.extend(FileList([condition_strain_out]))
return condition_strain_nodes, condition_strain_outs | Generate jobs for autogating the data for PyGRB runs.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
datafind_files : pycbc.workflow.core.FileList
A FileList containing the frame files to be gated.
outdir : string
Path of the output directory
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
condition_strain_nodes : list
List containing the pycbc.workflow.core.Node objects representing the
autogating jobs.
condition_strain_outs : pycbc.workflow.core.FileList
FileList containing the pycbc.workflow.core.File objects representing
the gated frame files. |
def _load_version(cls, state, version):
assert(version == cls._PYTHON_NN_CLASSIFIER_MODEL_VERSION)
knn_model = _tc.nearest_neighbors.NearestNeighborsModel(state['knn_model'])
del state['knn_model']
state['_target_type'] = eval(state['_target_type'])
return cls(knn_model, state) | A function to load a previously saved NearestNeighborClassifier model.
Parameters
----------
unpickler : GLUnpickler
A GLUnpickler file handler.
version : int
Version number maintained by the class writer. |
def ModuleLogger(globs):
if not globs.has_key('_debug'):
raise RuntimeError("define _debug before creating a module logger")
logger_name = globs['__name__']
logger = logging.getLogger(logger_name)
logger.globs = globs
if '.' not in logger_name:
hdlr = logging.StreamHandler()
hdlr.setLevel(logging.WARNING)
hdlr.setFormatter(logging.Formatter(logging.BASIC_FORMAT, None))
logger.addHandler(hdlr)
return logger | Create a module level logger.
To debug a module, create a _debug variable in the module, then use the
ModuleLogger function to create a "module level" logger. When a handler
is added to this logger or a child of this logger, the _debug variable will
be incremented.
All of the calls within functions or class methods within the module should
first check to see if _debug is set to prevent calls to formatter objects
that aren't necessary. |
def _merge_inplace(self, other):
if other is None:
yield
else:
priority_vars = OrderedDict(
kv for kv in self.variables.items() if kv[0] not in self.dims)
variables = merge_coords_for_inplace_math(
[self.variables, other.variables], priority_vars=priority_vars)
yield
self._update_coords(variables) | For use with in-place binary arithmetic. |
def validate(opts):
if hasattr(opts, 'extensions'):
return _validate(opts.extensions)
elif isinstance(opts, list):
return _validate(opts)
else:
raise ValueError("Value passed into extension validation must either "
"be a list of strings or a namespace with an "
"attribute of 'extensions'") | Client-facing validate method. Checks to see if the passed in opts
argument is either a list or a namespace containing the attribute
'extensions' and runs validations on it accordingly. If opts is neither
of those things, this will raise a ValueError
:param opts: either a list of strings or a namespace with the attribute
'extensions'
:raises ValueError: if the value passed in is not a list or a namespace
with the attribute 'extensions'
:raises ValidationException: if the extensions fail validations
:return: True if extensions pass the validations |
def __populate_sections(self):
if not self._ptr:
raise BfdException("BFD not initialized")
for section in _bfd.get_sections_list(self._ptr):
try:
bfd_section = BfdSection(self._ptr, section)
self._sections[bfd_section.name] = bfd_section
except BfdSectionException, err:
pass | Get a list of the section present in the bfd to populate our
internal list. |
def as_euler_angles(q):
alpha_beta_gamma = np.empty(q.shape + (3,), dtype=np.float)
n = np.norm(q)
q = as_float_array(q)
alpha_beta_gamma[..., 0] = np.arctan2(q[..., 3], q[..., 0]) + np.arctan2(-q[..., 1], q[..., 2])
alpha_beta_gamma[..., 1] = 2*np.arccos(np.sqrt((q[..., 0]**2 + q[..., 3]**2)/n))
alpha_beta_gamma[..., 2] = np.arctan2(q[..., 3], q[..., 0]) - np.arctan2(-q[..., 1], q[..., 2])
return alpha_beta_gamma | Open Pandora's Box
If somebody is trying to make you use Euler angles, tell them no, and
walk away, and go and tell your mum.
You don't want to use Euler angles. They are awful. Stay away. It's
one thing to convert from Euler angles to quaternions; at least you're
moving in the right direction. But to go the other way?! It's just not
right.
Assumes the Euler angles correspond to the quaternion R via
R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)
The angles are naturally in radians.
NOTE: Before opening an issue reporting something "wrong" with this
function, be sure to read all of the following page, *especially* the
very last section about opening issues or pull requests.
<https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible>
Parameters
----------
q: quaternion or array of quaternions
The quaternion(s) need not be normalized, but must all be nonzero
Returns
-------
alpha_beta_gamma: float array
Output shape is q.shape+(3,). These represent the angles (alpha,
beta, gamma) in radians, where the normalized input quaternion
represents `exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)`.
Raises
------
AllHell
...if you try to actually use Euler angles, when you could have
been using quaternions like a sensible person. |
def chunks(event_list, chunk_size):
for i in range(0, len(event_list), chunk_size):
yield event_list[i:i + chunk_size] | Yield successive n-sized chunks from the event list. |
def move_notes(self, noteids, folderid):
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/move', post_data={
'noteids[]' : noteids,
'folderid' : folderid
})
return response | Move notes to a folder
:param noteids: The noteids to move
:param folderid: The folderid to move notes to |
def make_python_name(s, default=None, number_prefix='N',encoding="utf-8"):
if s in ('', None):
s = default
s = str(s)
s = re.sub("[^a-zA-Z0-9_]", "_", s)
if not re.match('\d', s) is None:
s = number_prefix+s
return unicode(s, encoding) | Returns a unicode string that can be used as a legal python identifier.
:Arguments:
*s*
string
*default*
use *default* if *s* is ``None``
*number_prefix*
string to prepend if *s* starts with a number |
def is_datetime_arraylike(arr):
if isinstance(arr, ABCDatetimeIndex):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return (is_object_dtype(arr.dtype)
and lib.infer_dtype(arr, skipna=False) == 'datetime')
return getattr(arr, 'inferred_type', None) == 'datetime' | Check whether an array-like is a datetime array-like or DatetimeIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a datetime array-like or
DatetimeIndex.
Examples
--------
>>> is_datetime_arraylike([1, 2, 3])
False
>>> is_datetime_arraylike(pd.Index([1, 2, 3]))
False
>>> is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))
True |
def describe_connection(self):
if self.device==None:
return "%s [disconnected]" % (self.name)
else:
return "%s connected to %s %s version: %s [serial: %s]" % (self.name,
self.vendor_name, self.product_name,
self.version_number, self.serial_number) | Return string representation of the device, including
the connection state |
def run(items):
items = [utils.to_single_data(x) for x in items]
work_dir = _sv_workdir(items[0])
input_backs = list(set(filter(lambda x: x is not None,
[dd.get_background_cnv_reference(d, "seq2c") for d in items])))
coverage_file = _combine_coverages(items, work_dir, input_backs)
read_mapping_file = _calculate_mapping_reads(items, work_dir, input_backs)
normal_names = []
if input_backs:
with open(input_backs[0]) as in_handle:
for line in in_handle:
if len(line.split()) == 2:
normal_names.append(line.split()[0])
normal_names += [dd.get_sample_name(x) for x in items if population.get_affected_status(x) == 1]
seq2c_calls_file = _call_cnv(items, work_dir, read_mapping_file, coverage_file, normal_names)
items = _split_cnv(items, seq2c_calls_file, read_mapping_file, coverage_file)
return items | Normalization and log2 ratio calculation plus CNV calling for full cohort.
- Combine coverage of each region for each sample
- Prepare read counts for each sample
- Normalize coverages in cohort by gene and sample, and calculate log2 ratios
- Call amplifications and deletions |
def computeISI(spikeTrains):
zeroCount = 0
isi = []
cells = 0
for i in range(np.shape(spikeTrains)[0]):
if cells > 0 and cells % 250 == 0:
print str(cells) + " cells processed"
for j in range(np.shape(spikeTrains)[1]):
if spikeTrains[i][j] == 0:
zeroCount += 1
elif zeroCount > 0:
isi.append(zeroCount)
zeroCount = 0
zeroCount = 0
cells += 1
print "**All cells processed**"
return isi | Estimates the inter-spike interval from a spike train matrix.
@param spikeTrains (array) matrix of spike trains
@return isi (array) matrix with the inter-spike interval obtained from the spike train.
Each entry in this matrix represents the number of time-steps in-between 2 spikes
as the algorithm scans the spike train matrix. |
def color(colors, export_type, output_file=None):
all_colors = flatten_colors(colors)
template_name = get_export_type(export_type)
template_file = os.path.join(MODULE_DIR, "templates", template_name)
output_file = output_file or os.path.join(CACHE_DIR, template_name)
if os.path.isfile(template_file):
template(all_colors, template_file, output_file)
logging.info("Exported %s.", export_type)
else:
logging.warning("Template '%s' doesn't exist.", export_type) | Export a single template file. |
def export(self, remote_function):
if self._worker.mode is None:
self._functions_to_export.append(remote_function)
return
if self._worker.mode != ray.worker.SCRIPT_MODE:
return
self._do_export(remote_function) | Export a remote function.
Args:
remote_function: the RemoteFunction object. |
def serialize(self, value):
if isinstance(value, float) and self.as_type is six.text_type:
value = u'{0:.{1}f}'.format(value, self.float_places)
value = self.as_type(value)
elif self.as_type is six.text_type:
if isinstance(value, bool):
value = six.text_type(int(bool(value)))
elif isinstance(value, bytes):
value = value.decode('utf-8', 'ignore')
else:
value = six.text_type(value)
else:
value = self.as_type(value)
if self.suffix:
value += self.suffix
return value | Convert the external Python value to a type that is suitable for
storing in a Mutagen file object. |
def listen_to_node(self, id_):
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_ | Attach a callback on the job pubsub if it exists |
def _validate(self):
probably_good_to_go = True
sheet = self.table
identity = self.db_sheet_cols.id
id_col = sheet.loc[:, identity]
if any(id_col.duplicated()):
warnings.warn(
"your database is corrupt: duplicates"
" encountered in the srno-column")
logger.debug("srno duplicates:\n" + str(
id_col.duplicated()))
probably_good_to_go = False
return probably_good_to_go | Checks that the db-file is ok
Returns:
True if OK, False if not. |
def chunkWidgets(self, group):
ui_groups = []
subgroup = []
for index, item in enumerate(group['items']):
if getin(item, ['options', 'full_width'], False):
ui_groups.append(subgroup)
ui_groups.append([item])
subgroup = []
else:
subgroup.append(item)
if len(subgroup) == getin(group, ['options', 'columns'], 2) \
or item == group['items'][-1]:
ui_groups.append(subgroup)
subgroup = []
return ui_groups | chunk the widgets up into groups based on their sizing hints |
def get_buckets(self, bucket_type=None, timeout=None):
bucket_type = self._get_bucket_type(bucket_type)
url = self.bucket_list_path(bucket_type=bucket_type,
timeout=timeout)
status, headers, body = self._request('GET', url)
if status == 200:
props = json.loads(bytes_to_str(body))
return props['buckets']
else:
raise RiakError('Error getting buckets.') | Fetch a list of all buckets |
def display(port=None, height=None):
_display(port=port, height=height, print_message=True, display_handle=None) | Display a TensorBoard instance already running on this machine.
Args:
port: The port on which the TensorBoard server is listening, as an
`int`, or `None` to automatically select the most recently
launched TensorBoard.
height: The height of the frame into which to render the TensorBoard
UI, as an `int` number of pixels, or `None` to use a default value
(currently 800). |
def get_interpolated_value(self, energy):
f = {}
for spin in self.densities.keys():
f[spin] = get_linear_interpolated_value(self.energies,
self.densities[spin],
energy)
return f | Returns interpolated density for a particular energy.
Args:
energy: Energy to return the density for. |
def generate_wavelengths(minwave=500, maxwave=26000, num=10000, delta=None,
log=True, wave_unit=u.AA):
wave_unit = units.validate_unit(wave_unit)
if delta is not None:
num = None
waveset_str = 'Min: {0}, Max: {1}, Num: {2}, Delta: {3}, Log: {4}'.format(
minwave, maxwave, num, delta, log)
if log:
logmin = np.log10(minwave)
logmax = np.log10(maxwave)
if delta is None:
waveset = np.logspace(logmin, logmax, num, endpoint=False)
else:
waveset = 10 ** np.arange(logmin, logmax, delta)
else:
if delta is None:
waveset = np.linspace(minwave, maxwave, num, endpoint=False)
else:
waveset = np.arange(minwave, maxwave, delta)
return waveset.astype(np.float64) * wave_unit, waveset_str | Generate wavelength array to be used for spectrum sampling.
.. math::
minwave \\le \\lambda < maxwave
Parameters
----------
minwave, maxwave : float
Lower and upper limits of the wavelengths.
These must be values in linear space regardless of ``log``.
num : int
The number of wavelength values.
This is only used when ``delta=None``.
delta : float or `None`
Delta between wavelength values.
When ``log=True``, this is the spacing in log space.
log : bool
If `True`, the wavelength values are evenly spaced in log scale.
Otherwise, spacing is linear.
wave_unit : str or `~astropy.units.core.Unit`
Wavelength unit. Default is Angstrom.
Returns
-------
waveset : `~astropy.units.quantity.Quantity`
Generated wavelength set.
waveset_str : str
Info string associated with the result. |
def statcast(start_dt=None, end_dt=None, team=None, verbose=True):
start_dt, end_dt = sanitize_input(start_dt, end_dt)
small_query_threshold = 5
if start_dt and end_dt:
date_format = "%Y-%m-%d"
d1 = datetime.datetime.strptime(start_dt, date_format)
d2 = datetime.datetime.strptime(end_dt, date_format)
days_in_query = (d2 - d1).days
if days_in_query <= small_query_threshold:
data = small_request(start_dt,end_dt)
else:
data = large_request(start_dt,end_dt,d1,d2,step=small_query_threshold,verbose=verbose)
data = postprocessing(data, team)
return data | Pulls statcast play-level data from Baseball Savant for a given date range.
INPUTS:
start_dt: YYYY-MM-DD : the first date for which you want statcast data
end_dt: YYYY-MM-DD : the last date for which you want statcast data
team: optional (defaults to None) : city abbreviation of the team you want data for (e.g. SEA or BOS)
If no arguments are provided, this will return yesterday's statcast data. If one date is provided, it will return that date's statcast data. |
def zdiffstore(self, dest, keys, withscores=False):
keys = (dest,) + tuple(keys)
wscores = 'withscores' if withscores else ''
return self.execute_script('zdiffstore', keys, wscores,
withscores=withscores) | Compute the difference of multiple sorted.
The difference of sets specified by ``keys`` into a new sorted set
in ``dest``. |
def get_data_len(self):
padding_len = self.getfieldval('padlen')
fld, fval = self.getfield_and_val('padlen')
padding_len_len = fld.i2len(self, fval)
ret = self.s_len - padding_len_len - padding_len
assert(ret >= 0)
return ret | get_data_len computes the length of the data field
To do this computation, the length of the padlen field and the actual
padding is subtracted to the string that was provided to the pre_dissect # noqa: E501
fun of the pkt parameter
@return int; length of the data part of the HTTP/2 frame packet provided as parameter # noqa: E501
@raise AssertionError |
def RawBytesToScriptHash(raw):
rawh = binascii.unhexlify(raw)
rawhashstr = binascii.unhexlify(bytes(Crypto.Hash160(rawh), encoding='utf-8'))
return UInt160(data=rawhashstr) | Get a hash of the provided raw bytes using the ripemd160 algorithm.
Args:
raw (bytes): byte array of raw bytes. e.g. b'\xAA\xBB\xCC'
Returns:
UInt160: |
def dumplist(args):
from .query import Database
db = Database()
r = db.objects(
protocol=args.protocol,
purposes=args.purpose,
model_ids=(args.client,),
groups=args.group,
classes=args.sclass
)
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
for f in r:
output.write('%s\n' % (f.make_path(args.directory, args.extension),))
return 0 | Dumps lists of files based on your criteria |
def flg(self, name, help, abbrev=None):
abbrev = abbrev or '-' + name[0]
longname = '--' + name.replace('_', '-')
self._add(name, abbrev, longname, action='store_true', help=help) | Describe a flag |
def GetValue(
self,
Channel,
Parameter):
try:
if Parameter == PCAN_API_VERSION or Parameter == PCAN_HARDWARE_NAME or Parameter == PCAN_CHANNEL_VERSION or Parameter == PCAN_LOG_LOCATION or Parameter == PCAN_TRACE_LOCATION or Parameter == PCAN_BITRATE_INFO_FD or Parameter == PCAN_IP_ADDRESS:
mybuffer = create_string_buffer(256)
else:
mybuffer = c_int(0)
res = self.__m_dllBasic.CAN_GetValue(Channel,Parameter,byref(mybuffer),sizeof(mybuffer))
return TPCANStatus(res),mybuffer.value
except:
logger.error("Exception on PCANBasic.GetValue")
raise | Retrieves a PCAN Channel value
Remarks:
Parameters can be present or not according with the kind
of Hardware (PCAN Channel) being used. If a parameter is not available,
a PCAN_ERROR_ILLPARAMTYPE error will be returned.
The return value of this method is a 2-touple, where
the first value is the result (TPCANStatus) of the method and
the second one, the asked value
Parameters:
Channel : A TPCANHandle representing a PCAN Channel
Parameter : The TPCANParameter parameter to get
Returns:
A touple with 2 values |
def run_missing_simulations(self, param_list, runs=None):
if isinstance(param_list, dict):
param_list = list_param_combinations(param_list)
self.run_simulations(
self.get_missing_simulations(param_list, runs)) | Run the simulations from the parameter list that are not yet available
in the database.
This function also makes sure that we have at least runs replications
for each parameter combination.
Additionally, param_list can either be a list containing the desired
parameter combinations or a dictionary containing multiple values for
each parameter, to be expanded into a list.
Args:
param_list (list, dict): either a list of parameter combinations or
a dictionary to be expanded into a list through the
list_param_combinations function.
runs (int): the number of runs to perform for each parameter
combination. This parameter is only allowed if the param_list
specification doesn't feature an 'RngRun' key already. |
def sync_experiments_from_spec(filename):
redis = oz.redis.create_connection()
with open(filename, "r") as f:
schema = escape.json_decode(f.read())
oz.bandit.sync_from_spec(redis, schema) | Takes the path to a JSON file declaring experiment specifications, and
modifies the experiments stored in redis to match the spec.
A spec looks like this:
{
"experiment 1": ["choice 1", "choice 2", "choice 3"],
"experiment 2": ["choice 1", "choice 2"]
} |
def sup_of_layouts(layout1, layout2):
if len(layout1) > len(layout2):
layout1, layout2 = layout2, layout1
if len(layout1) < len(layout2):
layout1 += [0] * (len(layout2) - len(layout1))
return [max(layout1[i], layout2[i]) for i in xrange(len(layout1))] | Return the least layout compatible with layout1 and layout2 |
def autoescape(context, nodelist, setting):
old_setting = context.autoescape
context.autoescape = setting
output = nodelist.render(context)
context.autoescape = old_setting
if setting:
return mark_safe(output)
else:
return output | Force autoescape behaviour for this block. |
def _fetch_chunker(self, uri, chunk_size, size, obj_size):
pos = 0
total_bytes = 0
size = size or obj_size
max_size = min(size, obj_size)
while True:
endpos = min(obj_size, pos + chunk_size - 1)
headers = {"Range": "bytes=%s-%s" % (pos, endpos)}
resp, resp_body = self.api.method_get(uri, headers=headers,
raw_content=True)
pos = endpos + 1
if not resp_body:
return
yield resp_body
total_bytes += len(resp_body)
if total_bytes >= max_size:
return | Returns a generator that returns an object in chunks. |
def incrementSub(self, amount=1):
self._subProgressBar.setValue(self.subValue() + amount)
QApplication.instance().processEvents() | Increments the sub-progress bar by amount. |
def terminate_jobflows(self, jobflow_ids):
params = {}
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
return self.get_status('TerminateJobFlows', params, verb='POST') | Terminate an Elastic MapReduce job flow
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs |
def sessions_info(self, hosts):
info_by_id = {}
for server_endpoint, dump in self.dump_by_server(hosts).items():
server_ip, server_port = server_endpoint
for line in dump.split("\n"):
mat = self.IP_PORT_REGEX.match(line)
if mat is None:
continue
ip, port, sid = mat.groups()
info_by_id[sid] = ClientInfo(sid, ip, port, server_ip, server_port)
return info_by_id | Returns ClientInfo per session.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of (session_id, ClientInfo). |
def transform_flask_bare_import(node):
new_names = []
for (name, as_name) in node.names:
match = re.match(r'flask\.ext\.(.*)', name)
from_name = match.group(1)
actual_module_name = 'flask_{}'.format(from_name)
new_names.append((actual_module_name, as_name))
new_node = nodes.Import()
copy_node_info(node, new_node)
new_node.names = new_names
mark_transformed(new_node)
return new_node | Translates a flask.ext.wtf bare import into a non-magical import.
Translates:
import flask.ext.admin as admin
Into:
import flask_admin as admin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.