code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def check_error(res, error_enum):
"""Raise if the result has an error, otherwise return the result."""
if res.HasField("error"):
enum_name = error_enum.DESCRIPTOR.full_name
error_name = error_enum.Name(res.error)
details = getattr(res, "error_details", "<none>")
raise RequestError("%s.%s: '%s'" % (enum_name, error_name, details), res)
return res
|
Raise if the result has an error, otherwise return the result.
|
def get_schema_model():
"""
Returns the schema model that is active in this project.
"""
try:
return django_apps.get_model(settings.POSTGRES_SCHEMA_MODEL, require_ready=False)
except ValueError:
raise ImproperlyConfigured("POSTGRES_SCHEMA_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"POSTGRES_SCHEMA_MODEL refers to model '%s' that has not been installed" % settings.POSTGRES_SCHEMA_MODEL
)
|
Returns the schema model that is active in this project.
|
def create(cls, api, run_id=None, project=None, username=None):
"""Create a run for the given project"""
run_id = run_id or util.generate_id()
project = project or api.settings.get("project")
mutation = gql('''
mutation upsertRun($project: String, $entity: String, $name: String!) {
upsertBucket(input: {modelName: $project, entityName: $entity, name: $name}) {
bucket {
project {
name
entity { name }
}
id
name
}
inserted
}
}
''')
variables = {'entity': username,
'project': project, 'name': run_id}
res = api.client.execute(mutation, variable_values=variables)
res = res['upsertBucket']['bucket']
return Run(api.client, res["project"]["entity"]["name"], res["project"]["name"], res["name"], {
"id": res["id"],
"config": "{}",
"systemMetrics": "{}",
"summaryMetrics": "{}",
"tags": [],
"description": None,
"state": "running"
})
|
Create a run for the given project
|
def distinct_seeds(k):
""" returns k distinct seeds for random number generation
"""
seeds = []
for _ in range(k):
while True:
s = random.randint(2**32 - 1)
if s not in seeds:
break
seeds.append(s)
return seeds
|
returns k distinct seeds for random number generation
|
def delta(self,local=False):
""" Returns the number of days of difference
"""
(s,e) = self.get(local)
return e-s
|
Returns the number of days of difference
|
def read_user_data(self, user_data_path):
"""Reads and parses a user_data file.
Args:
user_data_path (str):
path to the userdata file
Returns:
str: the parsed user data file
"""
raw_user_data = read_value_from_path(user_data_path)
variables = self.get_variables()
return parse_user_data(variables, raw_user_data, self.name)
|
Reads and parses a user_data file.
Args:
user_data_path (str):
path to the userdata file
Returns:
str: the parsed user data file
|
def shrink(self, shrink):
"""
Remove unnecessary parts
:param shrink: Object to shringk
:type shrink: dict | list
:return: Shrunk object
:rtype: dict | list
"""
if isinstance(shrink, list):
return self._shrink_list(shrink)
if isinstance(shrink, dict):
return self._shrink_dict(shrink)
return shrink
|
Remove unnecessary parts
:param shrink: Object to shringk
:type shrink: dict | list
:return: Shrunk object
:rtype: dict | list
|
def path_components(path):
"""Convert a path into group and channel name components"""
def yield_components(path):
# Iterate over each character and the next character
chars = zip_longest(path, path[1:])
try:
# Iterate over components
while True:
c, n = next(chars)
if c != '/':
raise ValueError("Invalid path, expected \"/\"")
elif (n is not None and n != "'"):
raise ValueError("Invalid path, expected \"'\"")
else:
# Consume "'" or raise StopIteration if at the end
next(chars)
component = []
# Iterate over characters in component name
while True:
c, n = next(chars)
if c == "'" and n == "'":
component += "'"
# Consume second "'"
next(chars)
elif c == "'":
yield "".join(component)
break
else:
component += c
except StopIteration:
return
return list(yield_components(path))
|
Convert a path into group and channel name components
|
def get_newest_app_version() -> Version:
"""
Download the version tag from remote.
:return: version from remote
:rtype: ~packaging.version.Version
"""
with urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) as p_man:
pypi_json = p_man.urlopen('GET', static_data.PYPI_JSON_URL).data.decode('utf-8')
releases = json.loads(pypi_json).get('releases', [])
online_version = Version('0.0.0')
for release in releases:
cur_version = Version(release)
if not cur_version.is_prerelease:
online_version = max(online_version, cur_version)
return online_version
|
Download the version tag from remote.
:return: version from remote
:rtype: ~packaging.version.Version
|
def normalizeInterpolationFactor(value):
"""
Normalizes interpolation factor.
* **value** must be an :ref:`type-int-float`, ``tuple`` or ``list``.
* If **value** is a ``tuple`` or ``list``, it must have exactly two items.
These items must be instances of :ref:`type-int-float`.
* Returned value is a ``tuple`` of two ``float``.
"""
if not isinstance(value, (int, float, list, tuple)):
raise TypeError("Interpolation factor must be an int, float, or tuple "
"instances, not %s." % type(value).__name__)
if isinstance(value, (int, float)):
value = (float(value), float(value))
else:
if not len(value) == 2:
raise ValueError("Interpolation factor tuple must contain two "
"values, not %d." % len(value))
for v in value:
if not isinstance(v, (int, float)):
raise TypeError("Interpolation factor tuple values must be an "
":ref:`type-int-float`, not %s."
% type(value).__name__)
value = tuple([float(v) for v in value])
return value
|
Normalizes interpolation factor.
* **value** must be an :ref:`type-int-float`, ``tuple`` or ``list``.
* If **value** is a ``tuple`` or ``list``, it must have exactly two items.
These items must be instances of :ref:`type-int-float`.
* Returned value is a ``tuple`` of two ``float``.
|
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
|
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
|
def echo_via_pager(*args, **kwargs):
"""Display pager only if it does not fit in one terminal screen.
NOTE: The feature is available only on ``less``-based pager.
"""
try:
restore = 'LESS' not in os.environ
os.environ.setdefault('LESS', '-iXFR')
click.echo_via_pager(*args, **kwargs)
finally:
if restore:
os.environ.pop('LESS', None)
|
Display pager only if it does not fit in one terminal screen.
NOTE: The feature is available only on ``less``-based pager.
|
def delete_and_rm_options(*args, **kwargs):
"""
Options which apply both to `globus delete` and `globus rm`
"""
def inner_decorator(f, supports_batch=True, default_enable_globs=False):
f = click.option(
"--recursive", "-r", is_flag=True, help="Recursively delete dirs"
)(f)
f = click.option(
"--ignore-missing",
"-f",
is_flag=True,
help="Don't throw errors if the file or dir is absent",
)(f)
f = click.option(
"--star-silent",
"--unsafe",
"star_silent",
is_flag=True,
help=(
'Don\'t prompt when the trailing character is a "*".'
+ (" Implicit in --batch" if supports_batch else "")
),
)(f)
f = click.option(
"--enable-globs/--no-enable-globs",
is_flag=True,
default=default_enable_globs,
show_default=True,
help=(
"Enable expansion of *, ?, and [ ] characters in the last "
"component of file paths, unless they are escaped with "
"a preceeding backslash, \\"
),
)(f)
if supports_batch:
f = click.option(
"--batch",
is_flag=True,
help=(
"Accept a batch of paths on stdin (i.e. run in "
"batchmode). Uses ENDPOINT_ID as passed on the "
"commandline. Any commandline PATH given will be used "
"as a prefix to all paths given"
),
)(f)
return f
return detect_and_decorate(inner_decorator, args, kwargs)
|
Options which apply both to `globus delete` and `globus rm`
|
def gen_file_lines(path, mode='rUb', strip_eol=True, ascii=True, eol='\n'):
"""Generate a sequence of "documents" from the lines in a file
Arguments:
path (file or str): path to a file or an open file_obj ready to be read
mode (str): file mode to open a file in
strip_eol (bool): whether to strip the EOL char from lines as they are read/generated/yielded
ascii (bool): whether to use the stringify and to_ascii functions on each line
eol (str): UNUSED character delimitting lines in the file
TODO:
Use `eol` to split lines (currently ignored because use `file.readline` doesn't have EOL arg)
"""
if isinstance(path, str):
path = open(path, mode)
with path:
# TODO: read one char at a time looking for the eol char and yielding the interveening chars
for line in path:
if ascii:
line = str(line)
if strip_eol:
line = line.rstrip('\n')
yield line
|
Generate a sequence of "documents" from the lines in a file
Arguments:
path (file or str): path to a file or an open file_obj ready to be read
mode (str): file mode to open a file in
strip_eol (bool): whether to strip the EOL char from lines as they are read/generated/yielded
ascii (bool): whether to use the stringify and to_ascii functions on each line
eol (str): UNUSED character delimitting lines in the file
TODO:
Use `eol` to split lines (currently ignored because use `file.readline` doesn't have EOL arg)
|
def _match_member(self, i, column):
"""Looks at line 'i' to see if the line matches a module member def."""
self.col_match = self.RE_MEMBERS.match(self._source[i])
if self.col_match is not None:
if column < self._source[i].index(":"):
self.el_call = "name"
else:
self.el_call = "assign"
return True
else:
return False
|
Looks at line 'i' to see if the line matches a module member def.
|
async def connect_controller(self, controller_name=None):
"""Connect to a controller by name. If the name is empty, it
connect to the current controller.
"""
if not controller_name:
controller_name = self.jujudata.current_controller()
if not controller_name:
raise JujuConnectionError('No current controller')
controller = self.jujudata.controllers()[controller_name]
# TODO change Connection so we can pass all the endpoints
# instead of just the first.
endpoint = controller['api-endpoints'][0]
accounts = self.jujudata.accounts().get(controller_name, {})
await self.connect(
endpoint=endpoint,
uuid=None,
username=accounts.get('user'),
password=accounts.get('password'),
cacert=controller.get('ca-cert'),
bakery_client=self.bakery_client_for_controller(controller_name),
)
self.controller_name = controller_name
|
Connect to a controller by name. If the name is empty, it
connect to the current controller.
|
def GetPeaksExons(bed,parsedGTF):
"""
Annotates a bedtool, BED narrow peak
:param bed: a pandas dataframe in bed format
:param parsedGTF: a parsed GTF file as outputed by parseGTF() with the following columns
:returns: a Pandas dataframe
"""
bedtool_AB=dfTObedtool(bed)
exonsGTF=parsedGTF[parsedGTF["feature"]=="exon"]
exonsGTF.reset_index(inplace=True, drop=True)
exonsBED=GTFtoBED(exonsGTF, "exon_id")
exonsBED.columns=['chrom', 'chromStart', 'chromEnd', 'exon_id', 'score', 'strand']
exonsBEDcols=exonsBED.columns.tolist()
bedcols=bed.columns.tolist()
exonsBEDcols_=[]
for c in exonsBEDcols:
if c in bedcols:
exonsBEDcols_.append(c+"_exon")
else:
exonsBEDcols_.append(c)
cols=[bedcols,exonsBEDcols_,["overlap"] ]
cols=[item for sublist in cols for item in sublist]
bedtool_exons=dfTObedtool(exonsBED)
bedtool_target_exons=bedtool_AB.intersect(bedtool_exons, wo=True, s=True)
dfTargetE=pd.read_table(bedtool_target_exons.fn, names=cols)
ExonsTransGenes=parsedGTF[["exon_id","transcript_id","gene_id"]].drop_duplicates()
dfTargets=pd.merge(dfTargetE,ExonsTransGenes,on=["exon_id"],how="left")
dfTargets["count"]=1
def getCounts(df,field):
"""
For each field in a bed narrow peak returns the number or times that field is present,\
the normalized mean of the '-log10(pValue)' and normalized mean of the signal value.
:param df: a Pandas dataframe of a bed narrow peak
:param field: field to analyse, ie. exons or transcripts
:returns: a Pandas dataframe
"""
tmp=df[[field,'name',"count"]].drop_duplicates()
tmp=tmp.drop(["name"],axis=1)
tmp["count"]=tmp["count"].astype(int)
tmp.columns=[field,"%s_count" %str(field)]
tmp=tmp.groupby(field, as_index=False).sum()
df=pd.merge(df,tmp,on=field,how="left")
tmp=df[[field,'name',"-log10(pValue)"]].drop_duplicates()
tmp=tmp.drop(["name"],axis=1)
tmp["-log10(pValue)"]=tmp["-log10(pValue)"].astype(float)
tmp=tmp.groupby(field).apply(lambda l: reduce(lambda x, y: x*y, l["-log10(pValue)"]) )
tmp=pd.DataFrame(tmp)
tmp.reset_index(inplace=True,drop=False)
tmp.columns=[field,"%s norm. mean -log10(pValue)" %str(field)]
df=pd.merge(df,tmp,on=field,how="left")
tmp=df[[field,'name',"signalValue"]].drop_duplicates()
tmp=tmp.drop(["name"],axis=1)
tmp["signalValue"]=tmp["signalValue"].astype(float)
tmp=tmp.groupby(field).apply(lambda l: reduce(lambda x, y: x*y, l["signalValue"]) )
tmp=pd.DataFrame(tmp)
tmp.reset_index(inplace=True,drop=False)
tmp.columns=[field,"%s signalValue" %str(field)]
df=pd.merge(df,tmp,on=field,how="left")
return df
for f in ["exon_id","transcript_id"]:
dfTargets=getCounts(dfTargets,f)
def getCounts_GeneIDs(df):
"""
For each gene id in a bed narrow peak returns the number or times that field is present,\
the normalized mean of the '-log10(pValue)' and normalized mean of the signal value.
:param df: a Pandas dataframe of a bed narrow peak
:returns: a Pandas dataframe
"""
field="gene_id"
tmp=df[[field,"transcript_id","transcript_id_count"]].drop_duplicates()
tmp=tmp.drop(["transcript_id"],axis=1)
tmp["transcript_id_count"]=tmp["transcript_id_count"].astype(int)
tmp.columns=[field,"%s_count" %str(field)]
tmp=tmp.groupby(field, as_index=False).sum()
df=pd.merge(df,tmp,on=field,how="left")
tmp=df[[field,'transcript_id',"transcript_id norm. mean -log10(pValue)"]].drop_duplicates()
tmp=tmp.drop(["transcript_id"],axis=1)
tmp["transcript_id norm. mean -log10(pValue)"]=tmp["transcript_id norm. mean -log10(pValue)"].astype(float)
tmp.columns=[field,"%s norm. mean -log10(pValue)" %str(field)]
tmp=tmp.groupby(field, as_index=False).sum()
df=pd.merge(df,tmp,on=field,how="left")
tmp=df[[field,'transcript_id',"transcript_id signalValue"]].drop_duplicates()
tmp=tmp.drop(["transcript_id"],axis=1)
tmp["transcript_id signalValue"]=tmp["transcript_id signalValue"].astype(float)
tmp.columns=[field,"%s signalValue" %str(field)]
tmp=tmp.groupby(field, as_index=False).sum()
df=pd.merge(df,tmp,on=field,how="left")
return df
dfTargets=getCounts_GeneIDs(dfTargets)
dfTargets=dfTargets.drop(["count"],axis=1)
return dfTargets
|
Annotates a bedtool, BED narrow peak
:param bed: a pandas dataframe in bed format
:param parsedGTF: a parsed GTF file as outputed by parseGTF() with the following columns
:returns: a Pandas dataframe
|
def get_pmag_dir():
"""
Returns directory in which PmagPy is installed
"""
# this is correct for py2exe (DEPRECATED)
#win_frozen = is_frozen()
#if win_frozen:
# path = os.path.abspath(unicode(sys.executable, sys.getfilesystemencoding()))
# path = os.path.split(path)[0]
# return path
# this is correct for py2app
try:
return os.environ['RESOURCEPATH']
# this works for everything else
except KeyError: pass
# new way:
# if we're in the local PmagPy directory:
if os.path.isfile(os.path.join(os.getcwd(), 'pmagpy', 'pmag.py')):
lib_dir = os.path.join(os.getcwd(), 'pmagpy')
# if we're anywhere else:
elif getattr(sys, 'frozen', False): #pyinstaller datafile directory
return sys._MEIPASS
else:
# horrible, hack-y fix
# (prevents namespace issue between
# local github PmagPy and pip-installed PmagPy).
# must reload because we may have
# changed directories since importing
temp = os.getcwd()
os.chdir('..')
reload(locator)
lib_file = resource_filename('locator', 'resource.py')
full_dir = os.path.split(lib_file)[0]
ind = full_dir.rfind(os.sep)
lib_dir = full_dir[:ind+1]
lib_dir = os.path.realpath(os.path.join(lib_dir, 'pmagpy'))
os.chdir(temp)
# end fix
# old way:
#lib_dir = os.path.dirname(os.path.realpath(__file__))
if not os.path.isfile(os.path.join(lib_dir, 'pmag.py')):
lib_dir = os.getcwd()
fname = os.path.join(lib_dir, 'pmag.py')
if not os.path.isfile(fname):
pmag_dir = os.path.split(os.path.split(__file__)[0])[0]
if os.path.isfile(os.path.join(pmag_dir,'pmagpy','pmag.py')):
return pmag_dir
else:
print('-W- Can\'t find the data model! Make sure you have installed pmagpy using pip: "pip install pmagpy --upgrade"')
return '.'
# strip "/" or "\" and "pmagpy" to return proper PmagPy directory
if lib_dir.endswith(os.sep):
lib_dir = lib_dir[:-1]
if lib_dir.endswith('pmagpy'):
pmag_dir = os.path.split(lib_dir)[0]
else:
pmag_dir = lib_dir
return pmag_dir
|
Returns directory in which PmagPy is installed
|
def __initialize_instance(self):
"""
Take any predefined methods/handlers and insert them into Sanic JWT
"""
config = self.config
# Initialize instance of the Authentication class
self.instance.auth = self.authentication_class(self.app, config=config)
init_handlers = (
handlers if config.auth_mode() else auth_mode_agnostic_handlers
)
for handler in init_handlers:
if handler.keys is None:
self.__check_method_in_auth(handler.name, handler.exception)
else:
if all(map(config.get, handler.keys)):
self.__check_method_in_auth(
handler.name, handler.exception
)
for handler in init_handlers:
if handler.name in self.kwargs:
method = self.kwargs.pop(handler.name)
setattr(self.instance.auth, handler.name, method)
|
Take any predefined methods/handlers and insert them into Sanic JWT
|
def get_attributes(path):
'''
Return a dictionary object with the Windows
file attributes for a file.
Args:
path (str): The path to the file or directory
Returns:
dict: A dictionary of file attributes
CLI Example:
.. code-block:: bash
salt '*' file.get_attributes c:\\temp\\a.txt
'''
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
# set up dictionary for attribute values
attributes = {}
# Get cumulative int value of attributes
intAttributes = win32file.GetFileAttributes(path)
# Assign individual attributes
attributes['archive'] = (intAttributes & 32) == 32
attributes['reparsePoint'] = (intAttributes & 1024) == 1024
attributes['compressed'] = (intAttributes & 2048) == 2048
attributes['directory'] = (intAttributes & 16) == 16
attributes['encrypted'] = (intAttributes & 16384) == 16384
attributes['hidden'] = (intAttributes & 2) == 2
attributes['normal'] = (intAttributes & 128) == 128
attributes['notIndexed'] = (intAttributes & 8192) == 8192
attributes['offline'] = (intAttributes & 4096) == 4096
attributes['readonly'] = (intAttributes & 1) == 1
attributes['system'] = (intAttributes & 4) == 4
attributes['temporary'] = (intAttributes & 256) == 256
# check if it's a Mounted Volume
attributes['mountedVolume'] = False
if attributes['reparsePoint'] is True and attributes['directory'] is True:
fileIterator = win32file.FindFilesIterator(path)
findDataTuple = next(fileIterator)
if findDataTuple[6] == 0xA0000003:
attributes['mountedVolume'] = True
# check if it's a soft (symbolic) link
# Note: os.path.islink() does not work in
# Python 2.7 for the Windows NTFS file system.
# The following code does, however, work (tested in Windows 8)
attributes['symbolicLink'] = False
if attributes['reparsePoint'] is True:
fileIterator = win32file.FindFilesIterator(path)
findDataTuple = next(fileIterator)
if findDataTuple[6] == 0xA000000C:
attributes['symbolicLink'] = True
return attributes
|
Return a dictionary object with the Windows
file attributes for a file.
Args:
path (str): The path to the file or directory
Returns:
dict: A dictionary of file attributes
CLI Example:
.. code-block:: bash
salt '*' file.get_attributes c:\\temp\\a.txt
|
def draw_commands(self, surf):
"""Draw the list of available commands."""
past_abilities = {act.ability for act in self._past_actions if act.ability}
for y, cmd in enumerate(sorted(self._abilities(
lambda c: c.name != "Smart"), key=lambda c: c.name), start=2):
if self._queued_action and cmd == self._queued_action:
color = colors.green
elif self._queued_hotkey and cmd.hotkey.startswith(self._queued_hotkey):
color = colors.green * 0.75
elif cmd.ability_id in past_abilities:
color = colors.red
else:
color = colors.yellow
hotkey = cmd.hotkey[0:3] # truncate "escape" -> "esc"
surf.write_screen(self._font_large, color, (0.2, y), hotkey)
surf.write_screen(self._font_large, color, (3, y), cmd.name)
|
Draw the list of available commands.
|
def position_target_global_int_send(self, time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False):
'''
Reports the current commanded vehicle position, velocity, and
acceleration as specified by the autopilot. This
should match the commands sent in
SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being
controlled this way.
time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t)
coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t)
lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t)
alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
'''
return self.send(self.position_target_global_int_encode(time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1)
|
Reports the current commanded vehicle position, velocity, and
acceleration as specified by the autopilot. This
should match the commands sent in
SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being
controlled this way.
time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t)
coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t)
lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t)
alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
|
def _getMostActiveCells(self):
"""
Gets the most active cells in the Union SDR having at least non-zero
activation in sorted order.
@return: a list of cell indices
"""
poolingActivation = self._poolingActivation
nonZeroCells = numpy.argwhere(poolingActivation > 0)[:,0]
# include a tie-breaker before sorting
poolingActivationSubset = poolingActivation[nonZeroCells] + \
self._poolingActivation_tieBreaker[nonZeroCells]
potentialUnionSDR = nonZeroCells[numpy.argsort(poolingActivationSubset)[::-1]]
topCells = potentialUnionSDR[0: self._maxUnionCells]
if max(self._poolingTimer) > self._minHistory:
self._unionSDR = numpy.sort(topCells).astype(UINT_DTYPE)
else:
self._unionSDR = []
return self._unionSDR
|
Gets the most active cells in the Union SDR having at least non-zero
activation in sorted order.
@return: a list of cell indices
|
def clean_proc_dir(opts):
'''
Clean out old tracked jobs running on the master
Generally, anything tracking a job should remove the job
once the job has finished. However, this will remove any
jobs that for some reason were not properly removed
when finished or errored.
'''
serial = salt.payload.Serial(opts)
proc_dir = os.path.join(opts['cachedir'], 'proc')
for fn_ in os.listdir(proc_dir):
proc_file = os.path.join(*[proc_dir, fn_])
data = salt.utils.master.read_proc_file(proc_file, opts)
if not data:
try:
log.warning(
"Found proc file %s without proper data. Removing from tracked proc files.",
proc_file
)
os.remove(proc_file)
except (OSError, IOError) as err:
log.error('Unable to remove proc file: %s.', err)
continue
if not salt.utils.master.is_pid_healthy(data['pid']):
try:
log.warning(
"PID %s not owned by salt or no longer running. Removing tracked proc file %s",
data['pid'],
proc_file
)
os.remove(proc_file)
except (OSError, IOError) as err:
log.error('Unable to remove proc file: %s.', err)
|
Clean out old tracked jobs running on the master
Generally, anything tracking a job should remove the job
once the job has finished. However, this will remove any
jobs that for some reason were not properly removed
when finished or errored.
|
def extensions():
"""How do we handle cython:
1. when on git, require cython during setup time (do not distribute
generated .c files via git)
a) cython present -> fine
b) no cython present -> install it on the fly. Extensions have to have .pyx suffix
This is solved via a lazy evaluation of the extension list. This is needed,
because build_ext is being called before cython will be available.
https://bitbucket.org/pypa/setuptools/issue/288/cannot-specify-cython-under-setup_requires
2. src dist install (have pre-converted c files and pyx files)
a) cython present -> fine
b) no cython -> use .c files
"""
USE_CYTHON = False
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
warnings.warn('Cython not found. Using pre cythonized files.')
import mdtraj
# Note, that we add numpy include to every extension after declaration.
from numpy import get_include as _np_inc
np_inc = _np_inc()
pybind_inc = get_pybind_include()
lib_prefix = 'lib' if sys.platform.startswith('win') else ''
common_cflags = ['-O3', ]
clustering_module = \
Extension('pyemma.coordinates.clustering._ext',
sources=['pyemma/coordinates/clustering/src/clustering_module.cpp'],
include_dirs=[
mdtraj.capi()['include_dir'],
pybind_inc,
'pyemma/coordinates/clustering/include',
],
language='c++',
libraries=[lib_prefix+'theobald'],
library_dirs=[mdtraj.capi()['lib_dir']],
extra_compile_args=common_cflags)
covar_module = \
Extension('pyemma._ext.variational.estimators.covar_c._covartools',
sources=['pyemma/_ext/variational/estimators/covar_c/covartools.cpp'],
include_dirs=['pyemma/_ext/variational/estimators/covar_c/',
pybind_inc,
],
language='c++',
extra_compile_args=common_cflags)
eig_qr_module = \
Extension('pyemma._ext.variational.solvers.eig_qr.eig_qr',
sources=['pyemma/_ext/variational/solvers/eig_qr/eig_qr.pyx'],
include_dirs=['pyemma/_ext/variational/solvers/eig_qr/', np_inc],
extra_compile_args=['-std=c99'] + common_cflags)
orderedset = \
Extension('pyemma._ext.orderedset._orderedset',
sources=['pyemma/_ext/orderedset/_orderedset.pyx'],
extra_compile_args=['-std=c99'] + common_cflags)
extra_compile_args = ["-O3", "-std=c99"]
ext_bar = Extension(
"pyemma.thermo.extensions.bar",
sources=["pyemma/thermo/extensions/bar/bar.pyx",
"pyemma/thermo/extensions/bar/_bar.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_wham = Extension(
"pyemma.thermo.extensions.wham",
sources=["pyemma/thermo/extensions/wham/wham.pyx",
"pyemma/thermo/extensions/wham/_wham.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_mbar = Extension(
"pyemma.thermo.extensions.mbar",
sources=["pyemma/thermo/extensions/mbar/mbar.pyx",
"pyemma/thermo/extensions/mbar/_mbar.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_tram = Extension(
"pyemma.thermo.extensions.tram",
sources=["pyemma/thermo/extensions/tram/tram.pyx",
"pyemma/thermo/extensions/tram/_tram.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_dtram = Extension(
"pyemma.thermo.extensions.dtram",
sources=["pyemma/thermo/extensions/dtram/dtram.pyx",
"pyemma/thermo/extensions/dtram/_dtram.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_trammbar = Extension(
"pyemma.thermo.extensions.trammbar",
sources=["pyemma/thermo/extensions/trammbar/trammbar.pyx",
"pyemma/thermo/extensions/tram/_tram.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args + ["-DTRAMMBAR"])
ext_mbar_direct = Extension(
"pyemma.thermo.extensions.mbar_direct",
sources=["pyemma/thermo/extensions/mbar_direct/mbar_direct.pyx",
"pyemma/thermo/extensions/mbar_direct/_mbar_direct.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_tram_direct = Extension(
"pyemma.thermo.extensions.tram_direct",
sources=["pyemma/thermo/extensions/tram_direct/tram_direct.pyx",
"pyemma/thermo/extensions/tram_direct/_tram_direct.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_trammbar_direct = Extension(
"pyemma.thermo.extensions.trammbar_direct",
sources=["pyemma/thermo/extensions/trammbar_direct/trammbar_direct.pyx",
"pyemma/thermo/extensions/tram_direct/_tram_direct.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args + ["-DTRAMMBAR"])
ext_util = Extension(
"pyemma.thermo.extensions.util",
sources=["pyemma/thermo/extensions/util/util.pyx",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
exts_thermo = [
ext_bar,
ext_wham,
ext_mbar,
ext_tram,
ext_dtram,
ext_trammbar,
ext_mbar_direct,
ext_tram_direct,
ext_trammbar_direct,
ext_util]
exts = [clustering_module,
covar_module,
eig_qr_module,
orderedset
]
exts += exts_thermo
for e in exts:
e.include_dirs.append(np_inc)
if not USE_CYTHON:
# replace pyx files by their pre generated c code.
for e in exts:
new_src = []
for s in e.sources:
new_src.append(s.replace('.pyx', '.c'))
e.sources = new_src
else:
exts = cythonize(exts, language_level=sys.version_info[0])
return exts
|
How do we handle cython:
1. when on git, require cython during setup time (do not distribute
generated .c files via git)
a) cython present -> fine
b) no cython present -> install it on the fly. Extensions have to have .pyx suffix
This is solved via a lazy evaluation of the extension list. This is needed,
because build_ext is being called before cython will be available.
https://bitbucket.org/pypa/setuptools/issue/288/cannot-specify-cython-under-setup_requires
2. src dist install (have pre-converted c files and pyx files)
a) cython present -> fine
b) no cython -> use .c files
|
def build_chain(self, source, chain):
"""
Build markov chain from source on top of existin chain
Args:
source: iterable which will be used to build chain
chain: MarkovChain in currently loaded shelve file that
will be extended by source
"""
for group in WalkByGroup(source, chain.order+1):
pre = group[:-1]
res = group[-1]
if pre not in chain.content:
chain.content[pre] = {res: 1}
else:
if res not in chain.content[pre]:
chain.content[pre][res] = 1
else:
chain.content[pre][res] += 1
chain.decache()
|
Build markov chain from source on top of existin chain
Args:
source: iterable which will be used to build chain
chain: MarkovChain in currently loaded shelve file that
will be extended by source
|
def remote_delete_user(model, request):
"""Remove user via remote service.
Returns a JSON response containing success state and a message indicating
what happened::
{
success: true, // respective false
message: 'message'
}
Expected request parameters:
id
Id of user to delete.
"""
params = request.params
uid = params.get('id')
if not uid:
return {
'success': False,
'message': u"No user ID given.",
}
users = model.backend
if uid not in users:
return {
'success': False,
'message': u"User with given ID not exists.",
}
try:
del users[uid]
users.parent()
message = u"Deleted user with ID '%s'." % uid
return {
'success': True,
'message': message,
}
except Exception as e:
return {
'success': False,
'message': str(e),
}
finally:
model.invalidate()
|
Remove user via remote service.
Returns a JSON response containing success state and a message indicating
what happened::
{
success: true, // respective false
message: 'message'
}
Expected request parameters:
id
Id of user to delete.
|
def render_template(template_name: str, **kwargs):
"""
Renders the template file with the given filename from within Cauldron's
template environment folder.
:param template_name:
The filename of the template to render. Any path elements should be
relative to Cauldron's root template folder.
:param kwargs:
Any elements passed to Jinja2 for rendering the template
:return:
The rendered string
"""
return get_environment().get_template(template_name).render(
cauldron_template_uid=make_template_uid(),
**kwargs
)
|
Renders the template file with the given filename from within Cauldron's
template environment folder.
:param template_name:
The filename of the template to render. Any path elements should be
relative to Cauldron's root template folder.
:param kwargs:
Any elements passed to Jinja2 for rendering the template
:return:
The rendered string
|
def _setup_dmtf_schema(self):
"""
Install the DMTF CIM schema from the DMTF web site if it is not already
installed. This includes downloading the DMTF CIM schema zip file from
the DMTF web site and expanding that file into a subdirectory defined
by `schema_mof_dir`.
Once the schema zip file is downloaded into `schema_root_dir`, it is
not re-downloaded if this function is recalled since DMTF CIM Schema
releases are never modified; new update versions are released for minor
changes. If the `schema_zip_file` is in the `schema_root_dir`
directory, but no 'schema_mof_dir' subdirectory exists, the schema is
unzipped.
This allows the DMTF CIM schema zip file to be downloaded once and
reused and the user to chose if they want to retain the extracted MOF
files or remove them with :meth:`~pywbem_mock.DMTFCIMSchema.clean` when
not being used.
If the schema is to be committed a source repository such as git
it is logical to commit only the DMTF CIM schema zip file. Creation of
the `schema_mof_dir` subdirectory will be created when the
:class:`pywbem_mock.DMTFCIMSchema` object is created.
Raises:
ValueError: If the schema cannot be retrieved from the DMTF web
site.
TypeError: If the `schema_version` is not a valid tuple with 3
integer components
"""
def print_verbose(msg):
"""
Inner method prints msg if self.verbose is `True`.
"""
if self.verbose:
print(msg)
if not os.path.isdir(self.schema_root_dir):
print_verbose(
_format("Creating directory for CIM Schema archive: {0}",
self.schema_root_dir))
os.mkdir(self.schema_root_dir)
if not os.path.isfile(self.schema_zip_file):
print_verbose(
_format("Downloading CIM Schema archive from: {0}",
self.schema_zip_url))
try:
ufo = urlopen(self.schema_zip_url)
except IOError as ie:
os.rmdir(self.schema_root_dir)
raise ValueError(
_format("DMTF Schema archive not found at url {0}: {1}",
self.schema_zip_url, ie))
with open(self.schema_zip_file, 'wb') as fp:
for data in ufo:
fp.write(data)
if not os.path.isdir(self.schema_mof_dir):
print_verbose(
_format("Creating directory for CIM Schema MOF files: {0}",
self.schema_mof_dir))
os.mkdir(self.schema_mof_dir)
if not os.path.isfile(self._schema_mof_file):
print_verbose(
_format("Unpacking CIM Schema archive: {0}",
self.schema_zip_file))
zfp = None
try:
zfp = ZipFile(self.schema_zip_file, 'r')
nlist = zfp.namelist()
for file_ in nlist:
dfile = os.path.join(self.schema_mof_dir, file_)
if dfile[-1] == '/':
if not os.path.exists(dfile):
os.mkdir(dfile)
else:
with open(dfile, 'w+b') as dfp:
dfp.write(zfp.read(file_))
finally:
if zfp:
zfp.close()
|
Install the DMTF CIM schema from the DMTF web site if it is not already
installed. This includes downloading the DMTF CIM schema zip file from
the DMTF web site and expanding that file into a subdirectory defined
by `schema_mof_dir`.
Once the schema zip file is downloaded into `schema_root_dir`, it is
not re-downloaded if this function is recalled since DMTF CIM Schema
releases are never modified; new update versions are released for minor
changes. If the `schema_zip_file` is in the `schema_root_dir`
directory, but no 'schema_mof_dir' subdirectory exists, the schema is
unzipped.
This allows the DMTF CIM schema zip file to be downloaded once and
reused and the user to chose if they want to retain the extracted MOF
files or remove them with :meth:`~pywbem_mock.DMTFCIMSchema.clean` when
not being used.
If the schema is to be committed a source repository such as git
it is logical to commit only the DMTF CIM schema zip file. Creation of
the `schema_mof_dir` subdirectory will be created when the
:class:`pywbem_mock.DMTFCIMSchema` object is created.
Raises:
ValueError: If the schema cannot be retrieved from the DMTF web
site.
TypeError: If the `schema_version` is not a valid tuple with 3
integer components
|
def get_bytes(self, n):
"""
Return the next ``n`` bytes of the message (as a `str`), without
decomposing into an int, decoded string, etc. Just the raw bytes are
returned. Returns a string of ``n`` zero bytes if there weren't ``n``
bytes remaining in the message.
"""
b = self.packet.read(n)
max_pad_size = 1 << 20 # Limit padding to 1 MB
if len(b) < n < max_pad_size:
return b + zero_byte * (n - len(b))
return b
|
Return the next ``n`` bytes of the message (as a `str`), without
decomposing into an int, decoded string, etc. Just the raw bytes are
returned. Returns a string of ``n`` zero bytes if there weren't ``n``
bytes remaining in the message.
|
def remove_replica(self, partition_name, osr_broker_ids, count=1):
"""Removing a replica is done by trying to remove a replica from every
broker and choosing the resulting state with the highest fitness score.
Out-of-sync replicas will always be removed before in-sync replicas.
:param partition_name: (topic_id, partition_id) of the partition to remove replicas of.
:param osr_broker_ids: A list of the partition's out-of-sync broker ids.
:param count: The number of replicas to remove.
"""
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found.".format(name=partition_name),
)
if partition.replication_factor - count < 1:
raise InvalidReplicationFactorError(
"Cannot decrease replication factor from {rf} to {new_rf}."
"Replication factor must be at least 1."
.format(
rf=partition.replication_factor,
new_rf=partition.replication_factor - count,
)
)
osr = {
broker for broker in partition.replicas
if broker.id in osr_broker_ids
}
# Create state from current cluster topology.
state = _State(self.cluster_topology)
partition_index = state.partitions.index(partition)
for _ in range(count):
# Find eligible replication groups.
non_empty_rgs = [
rg for rg in six.itervalues(self.cluster_topology.rgs)
if rg.count_replica(partition) > 0
]
rgs_with_osr = [
rg for rg in non_empty_rgs
if any(b in osr for b in rg.brokers)
]
candidate_rgs = rgs_with_osr or non_empty_rgs
# Since replicas will only be removed from the candidate rgs, only
# count replicas on those rgs when determining which rgs are
# over-replicated.
replica_count = sum(
rg.count_replica(partition)
for rg in candidate_rgs
)
opt_replicas, _ = compute_optimum(
len(candidate_rgs),
replica_count,
)
over_replicated_rgs = [
rg for rg in candidate_rgs
if rg.count_replica(partition) > opt_replicas
] or candidate_rgs
candidate_rgs = over_replicated_rgs or candidate_rgs
# Remove the replica from every eligible broker.
new_states = []
for rg in candidate_rgs:
osr_brokers = {
broker for broker in rg.brokers
if broker in osr
}
candidate_brokers = osr_brokers or rg.brokers
for broker in candidate_brokers:
if broker in partition.replicas:
broker_index = state.brokers.index(broker)
new_states.append(
state.remove_replica(partition_index, broker_index)
)
# Update cluster topology with highest scoring state.
state = sorted(new_states, key=self._score, reverse=True)[0]
self.cluster_topology.update_cluster_topology(state.assignment)
osr = {b for b in osr if b in partition.replicas}
|
Removing a replica is done by trying to remove a replica from every
broker and choosing the resulting state with the highest fitness score.
Out-of-sync replicas will always be removed before in-sync replicas.
:param partition_name: (topic_id, partition_id) of the partition to remove replicas of.
:param osr_broker_ids: A list of the partition's out-of-sync broker ids.
:param count: The number of replicas to remove.
|
def _validate_features(self, data):
"""
Validate Booster and data's feature_names are identical.
Set feature_names and feature_types from DMatrix
"""
if self.feature_names is None:
self.feature_names = data.feature_names
self.feature_types = data.feature_types
else:
# Booster can't accept data with different feature names
if self.feature_names != data.feature_names:
msg = 'feature_names mismatch: {0} {1}'
raise ValueError(msg.format(self.feature_names,
data.feature_names))
|
Validate Booster and data's feature_names are identical.
Set feature_names and feature_types from DMatrix
|
def nt_counts(bam, positions, stranded=False, vcf=False, bed=False):
"""
Find the number of nucleotides covered at all positions in a bed or vcf
file.
Parameters
----------
bam : str or pysam.calignmentfile.AlignmentFile
Bam file opened with pysam or path to bam file (must
be sorted and indexed).
positions : str or pybedtools.BedTool
Path to bed or vcf file or pybedtools.BedTool object. The extension is
used to determine whether the file is a bed or vcf (.bed vs .vcf).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
vcf : boolean
Set to True if you are providing a vcf file that doesn't have a .vcf
suffix.
bed : boolean
Set to True if you are providing a bed file that doesn't have a .bed
suffix.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files.
"""
if not bed and not vcf:
if type(positions) == pbt.bedtool.BedTool:
df = positions.to_dataframe()
elif positions[-4:] == '.bed':
bed = True
elif positions[-4:] == '.vcf':
vcf = True
else:
sys.stderr.write('Positions must be BedTool, bed file, or vcf '
'file.\n')
if bed:
df = pbt.BedTool(positions).to_dataframe()
elif vcf:
from variants import vcf_as_df
tdf = vcf_as_df(positions)
df = pd.DataFrame(index=tdf.index)
df['chrom'] = tdf.CHROM
df['start'] = tdf.POS - 1
df['end'] = tdf.POS
res = []
for i in df.index:
region = [df.ix[i, 'chrom'], df.ix[i, 'start'], df.ix[i, 'end']]
res.append(get_region_nt_counts(region, bam, stranded))
res = pd.concat(res)
return res
|
Find the number of nucleotides covered at all positions in a bed or vcf
file.
Parameters
----------
bam : str or pysam.calignmentfile.AlignmentFile
Bam file opened with pysam or path to bam file (must
be sorted and indexed).
positions : str or pybedtools.BedTool
Path to bed or vcf file or pybedtools.BedTool object. The extension is
used to determine whether the file is a bed or vcf (.bed vs .vcf).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
vcf : boolean
Set to True if you are providing a vcf file that doesn't have a .vcf
suffix.
bed : boolean
Set to True if you are providing a bed file that doesn't have a .bed
suffix.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files.
|
def process_bind_param(self, value: Optional[List[str]],
dialect: Dialect) -> str:
"""Convert things on the way from Python to the database."""
retval = self._strlist_to_dbstr(value)
return retval
|
Convert things on the way from Python to the database.
|
def parse_get_list_response(content):
"""Parses of response content XML from WebDAV server and extract file and directory names.
:param content: the XML content of HTTP response from WebDAV server for getting list of files by remote path.
:return: list of extracted file or directory names.
"""
try:
tree = etree.fromstring(content)
hrees = [Urn.separate + unquote(urlsplit(hree.text).path) for hree in tree.findall('.//{DAV:}href')]
return [Urn(hree) for hree in hrees]
except etree.XMLSyntaxError:
return list()
|
Parses of response content XML from WebDAV server and extract file and directory names.
:param content: the XML content of HTTP response from WebDAV server for getting list of files by remote path.
:return: list of extracted file or directory names.
|
def compute_y(self, coefficients, num_x):
""" Return calculated y-values for the domain of x-values in [1, num_x]. """
y_vals = []
for x in range(1, num_x + 1):
y = sum([c * x ** i for i, c in enumerate(coefficients[::-1])])
y_vals.append(y)
return y_vals
|
Return calculated y-values for the domain of x-values in [1, num_x].
|
def committees_legislators(self, *args, **kwargs):
'''Return an iterable of committees with all the
legislators cached for reference in the Committee model.
So do a "select_related" operation on committee members.
'''
committees = list(self.committees(*args, **kwargs))
legislators = self.legislators({'active': True},
fields=['full_name',
settings.LEVEL_FIELD])
_legislators = {}
# This will be a cache of legislator objects used in
# the committees.html template. Includes ids in each
# legislator's _all_ids field (if it exists.)
for obj in legislators:
if 'all_ids' in obj:
for _id in obj['_all_ids']:
_legislators[_id] = obj
else:
_legislators[obj['_id']] = obj
del legislators
for com in committees:
com._legislators = _legislators
return committees
|
Return an iterable of committees with all the
legislators cached for reference in the Committee model.
So do a "select_related" operation on committee members.
|
def nic_remove(self, nic):
"""
Detach a nic from a bridge
:param nic: nic name to detach
"""
args = {
'nic': nic,
}
self._nic_remove_chk.check(args)
return self._client.json('bridge.nic-remove', args)
|
Detach a nic from a bridge
:param nic: nic name to detach
|
def cov_trob(x, wt=None, cor=False, center=True, nu=5, maxit=25,
tol=0.01):
"""
Covariance Estimation for Multivariate t Distribution
Estimates a covariance or correlation matrix assuming the
data came from a multivariate t distribution: this provides
some degree of robustness to outlier without giving a high
breakdown point.
**credit**: This function a port of the R function
``MASS::cov.trob``.
Parameters
----------
x : array
data matrix. Missing values (NaNs) are not allowed.
wt : array
A vector of weights for each case: these are treated as
if the case i actually occurred ``wt[i]`` times.
cor : bool
Flag to choose between returning the correlation
(``cor=True``) or covariance (``cor=False``) matrix.
center : array or bool
A logical value or a numeric vector providing the location
about which the covariance is to be taken.
If ``center=False``, no centering is done; if
``center=True`` the MLE of the location vector is used.
nu : int
'degrees of freedom' for the multivariate t distribution.
Must exceed 2 (so that the covariance matrix is finite).
maxit : int
Maximum number of iterations in fitting.
tol : float
Convergence tolerance for fitting.
Returns
-------
out : dict
A dictionary with with the following key-value
- ``cov`` : the fitted covariance matrix.
- ``center`` : the estimated or specified location vector.
- ``wt`` : the specified weights: only returned if the
wt argument was given.
- ``n_obs`` : the number of cases used in the fitting.
- ``cor`` : the fitted correlation matrix: only returned
if ``cor=True``.
- ``call`` : The matched call.
- ``iter`` : The number of iterations used.
References
----------
- J. T. Kent, D. E. Tyler and Y. Vardi (1994) A curious likelihood
identity for the multivariate t-distribution. *Communications in
Statistics-Simulation and Computation* **23**, 441-453.
- Venables, W. N. and Ripley, B. D. (1999) *Modern Applied
Statistics with S-PLUS*. Third Edition. Springer.
"""
def test_values(x):
if pd.isnull(x).any() or np.isinf(x).any():
raise ValueError("Missing or infinite values in 'x'")
def scale_simp(x, center, n, p):
return x - np.repeat([center], n, axis=0)
x = np.asarray(x)
n, p = x.shape
test_values(x)
# wt
miss_wt = wt is None
if not miss_wt:
wt = np.asarray(wt)
wt0 = wt
if len(wt) != n:
raise ValueError(
"length of 'wt' must equal number of observations.")
if any(wt < 0):
raise ValueError("Negative weights not allowed.")
if not np.sum(wt):
raise ValueError("No positive weights.")
x = x[wt > 0, :]
wt = wt[wt > 0]
n, _ = x.shape
else:
wt = np.ones(n)
wt = wt[:, np.newaxis]
# loc
loc = np.sum(wt*x, axis=0) / wt.sum()
try:
_len = len(center)
except TypeError:
if isinstance(center, bool) and not center:
loc = np.zeros(p)
else:
if _len != p:
raise ValueError("'center' is not the right length")
loc = p
use_loc = isinstance(center, bool) and center
w = wt * (1 + p/nu)
for iteration in range(maxit):
w0 = w
X = scale_simp(x, loc, n, p)
_, s, v = linalg.svd(np.sqrt(w/np.sum(w)) * X)
# wX = X @ v.T @ np.diag(np.full(p, 1/s))
wX = np.dot(np.dot(X, v.T), np.diag(np.full(p, 1/s)))
# Q = np.squeeze((wX**2) @ np.ones(p))
Q = np.squeeze(np.dot(wX**2, np.ones(p)))
w = (wt * (nu + p)) / (nu + Q)[:, np.newaxis]
if use_loc:
loc = np.sum(w*x, axis=0) / w.sum()
if all(np.abs(w-w0) < tol):
break
else:
if ((np.mean(w) - np.mean(wt) > tol) or
(np.abs(np.mean(w * Q)/p - 1) > tol)):
warn("Probable convergence failure.", PlotnineWarning)
_a = np.sqrt(w) * X
# cov = (_a.T @ _a) / np.sum(wt)
cov = np.dot(_a.T, _a) / np.sum(wt)
if miss_wt:
ans = dict(cov=cov, center=loc, n_obs=n)
else:
ans = dict(cov=cov, center=loc, wt=wt0, n_obs=n)
if cor:
sd = np.sqrt(np.diag(cov))
cor = (cov/sd)/np.repeat([sd], p, axis=0).T
ans['cor'] = cor
ans['iter'] = iteration
return ans
|
Covariance Estimation for Multivariate t Distribution
Estimates a covariance or correlation matrix assuming the
data came from a multivariate t distribution: this provides
some degree of robustness to outlier without giving a high
breakdown point.
**credit**: This function a port of the R function
``MASS::cov.trob``.
Parameters
----------
x : array
data matrix. Missing values (NaNs) are not allowed.
wt : array
A vector of weights for each case: these are treated as
if the case i actually occurred ``wt[i]`` times.
cor : bool
Flag to choose between returning the correlation
(``cor=True``) or covariance (``cor=False``) matrix.
center : array or bool
A logical value or a numeric vector providing the location
about which the covariance is to be taken.
If ``center=False``, no centering is done; if
``center=True`` the MLE of the location vector is used.
nu : int
'degrees of freedom' for the multivariate t distribution.
Must exceed 2 (so that the covariance matrix is finite).
maxit : int
Maximum number of iterations in fitting.
tol : float
Convergence tolerance for fitting.
Returns
-------
out : dict
A dictionary with with the following key-value
- ``cov`` : the fitted covariance matrix.
- ``center`` : the estimated or specified location vector.
- ``wt`` : the specified weights: only returned if the
wt argument was given.
- ``n_obs`` : the number of cases used in the fitting.
- ``cor`` : the fitted correlation matrix: only returned
if ``cor=True``.
- ``call`` : The matched call.
- ``iter`` : The number of iterations used.
References
----------
- J. T. Kent, D. E. Tyler and Y. Vardi (1994) A curious likelihood
identity for the multivariate t-distribution. *Communications in
Statistics-Simulation and Computation* **23**, 441-453.
- Venables, W. N. and Ripley, B. D. (1999) *Modern Applied
Statistics with S-PLUS*. Third Edition. Springer.
|
def newComment(content):
"""Creation of a new node containing a comment. """
ret = libxml2mod.xmlNewComment(content)
if ret is None:raise treeError('xmlNewComment() failed')
return xmlNode(_obj=ret)
|
Creation of a new node containing a comment.
|
def l2traceroute_input_protocolType_IP_l4_dest_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
l2traceroute = ET.Element("l2traceroute")
config = l2traceroute
input = ET.SubElement(l2traceroute, "input")
protocolType = ET.SubElement(input, "protocolType")
IP = ET.SubElement(protocolType, "IP")
l4_dest_port = ET.SubElement(IP, "l4-dest-port")
l4_dest_port.text = kwargs.pop('l4_dest_port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _update_data(self, name, value, timestamp, interval, config, conn):
'''Support function for insert. Should be called within a transaction'''
i_time = config['i_calc'].to_bucket(timestamp)
if not config['coarse']:
r_time = config['r_calc'].to_bucket(timestamp)
else:
r_time = None
stmt = self._table.update().where(
and_(
self._table.c.name==name,
self._table.c.interval==interval,
self._table.c.i_time==i_time,
self._table.c.r_time==r_time)
).values({self._table.c.value: value})
rval = conn.execute( stmt )
return rval.rowcount
|
Support function for insert. Should be called within a transaction
|
def _write_stream(self, src, dst, size=None, size_limit=None,
chunk_size=None, progress_callback=None):
"""Get helper to save stream from src to dest + compute checksum.
:param src: Source stream.
:param dst: Destination stream.
:param size: If provided, this exact amount of bytes will be
written to the destination file.
:param size_limit: ``FileSizeLimit`` instance to limit number of bytes
to write.
"""
chunk_size = chunk_size_or_default(chunk_size)
algo, m = self._init_hash()
bytes_written = 0
while 1:
# Check that size limits aren't bypassed
check_sizelimit(size_limit, bytes_written, size)
chunk = src.read(chunk_size)
if not chunk:
if progress_callback:
progress_callback(bytes_written, bytes_written)
break
dst.write(chunk)
bytes_written += len(chunk)
if m:
m.update(chunk)
if progress_callback:
progress_callback(None, bytes_written)
check_size(bytes_written, size)
return bytes_written, '{0}:{1}'.format(
algo, m.hexdigest()) if m else None
|
Get helper to save stream from src to dest + compute checksum.
:param src: Source stream.
:param dst: Destination stream.
:param size: If provided, this exact amount of bytes will be
written to the destination file.
:param size_limit: ``FileSizeLimit`` instance to limit number of bytes
to write.
|
def __Logout(si):
"""
Disconnect (logout) service instance
@param si: Service instance (returned from Connect)
"""
try:
if si:
content = si.RetrieveContent()
content.sessionManager.Logout()
except Exception as e:
pass
|
Disconnect (logout) service instance
@param si: Service instance (returned from Connect)
|
def lagrange_polynomial(abscissas, sort="GR"):
"""
Create Lagrange polynomials.
Args:
abscissas (numpy.ndarray):
Sample points where the Lagrange polynomials shall be defined.
Example:
>>> print(chaospy.around(lagrange_polynomial([-10, 10]), 4))
[-0.05q0+0.5, 0.05q0+0.5]
>>> print(chaospy.around(lagrange_polynomial([-1, 0, 1]), 4))
[0.5q0^2-0.5q0, -q0^2+1.0, 0.5q0^2+0.5q0]
>>> poly = lagrange_polynomial([[1, 0, 1], [0, 1, 2]])
>>> print(chaospy.around(poly, 4))
[0.5q0-0.5q1+0.5, -q0+1.0, 0.5q0+0.5q1-0.5]
>>> print(numpy.around(poly([1, 0, 1], [0, 1, 2]), 4))
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]]
"""
abscissas = numpy.asfarray(abscissas)
if len(abscissas.shape) == 1:
abscissas = abscissas.reshape(1, abscissas.size)
dim, size = abscissas.shape
order = 1
while chaospy.bertran.terms(order, dim) <= size:
order += 1
indices = numpy.array(chaospy.bertran.bindex(0, order-1, dim, sort)[:size])
idx, idy = numpy.mgrid[:size, :size]
matrix = numpy.prod(abscissas.T[idx]**indices[idy], -1)
det = numpy.linalg.det(matrix)
if det == 0:
raise numpy.linalg.LinAlgError("invertible matrix required")
vec = chaospy.poly.basis(0, order-1, dim, sort)[:size]
coeffs = numpy.zeros((size, size))
if size == 1:
out = chaospy.poly.basis(0, 0, dim, sort)*abscissas.item()
elif size == 2:
coeffs = numpy.linalg.inv(matrix)
out = chaospy.poly.sum(vec*(coeffs.T), 1)
else:
for i in range(size):
for j in range(size):
coeffs[i, j] += numpy.linalg.det(matrix[1:, 1:])
matrix = numpy.roll(matrix, -1, axis=0)
matrix = numpy.roll(matrix, -1, axis=1)
coeffs /= det
out = chaospy.poly.sum(vec*(coeffs.T), 1)
return out
|
Create Lagrange polynomials.
Args:
abscissas (numpy.ndarray):
Sample points where the Lagrange polynomials shall be defined.
Example:
>>> print(chaospy.around(lagrange_polynomial([-10, 10]), 4))
[-0.05q0+0.5, 0.05q0+0.5]
>>> print(chaospy.around(lagrange_polynomial([-1, 0, 1]), 4))
[0.5q0^2-0.5q0, -q0^2+1.0, 0.5q0^2+0.5q0]
>>> poly = lagrange_polynomial([[1, 0, 1], [0, 1, 2]])
>>> print(chaospy.around(poly, 4))
[0.5q0-0.5q1+0.5, -q0+1.0, 0.5q0+0.5q1-0.5]
>>> print(numpy.around(poly([1, 0, 1], [0, 1, 2]), 4))
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]]
|
def second_order_diff(arr, x):
"""Compute second order difference of an array.
A 2nd order forward difference is used for the first point, 2nd order
central difference for interior, and 2nd order backward difference for last
point, returning an array the same length as the input array.
"""
# Convert to array, so this will work with pandas Series
arr = np.array(arr)
# Calculate dx for forward diff point
dxf = (x[2] - x[0])/2
# Calculate dx for backward diff point
dxb = (x[-1] - x[-3])/2
# Calculate dx array for central difference
dx = (x[2:] - x[:-2])/2
# For first data point, use 2nd order forward difference
first = (-3*arr[0] + 4*arr[1] - arr[2])/(2*dxf)
# For last point, use 2nd order backward difference
last = (3*arr[-1] - 4*arr[-2] + arr[-3])/(2*dxb)
# For all interior points, use 2nd order central difference
interior = (arr[2:] - arr[:-2])/(2*dx)
# Create entire array
darr = np.concatenate(([first], interior, [last]))
return darr
|
Compute second order difference of an array.
A 2nd order forward difference is used for the first point, 2nd order
central difference for interior, and 2nd order backward difference for last
point, returning an array the same length as the input array.
|
def project(self, projection):
'''
Return coordinates transformed to a given projection
Projection should be a basemap or pyproj projection object or similar
'''
x, y = projection(self.lon.decimal_degree, self.lat.decimal_degree)
return (x, y)
|
Return coordinates transformed to a given projection
Projection should be a basemap or pyproj projection object or similar
|
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
|
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
|
def paddedInt(i):
'''
return a string that contains `i`, left-padded with 0's up to PAD_LEN digits
'''
i_str = str(i)
pad = PAD_LEN - len(i_str)
return (pad * "0") + i_str
|
return a string that contains `i`, left-padded with 0's up to PAD_LEN digits
|
def constant_compare(a, b):
"""
Compares two byte strings in constant time to see if they are equal
:param a:
The first byte string
:param b:
The second byte string
:return:
A boolean if the two byte strings are equal
"""
if not isinstance(a, byte_cls):
raise TypeError(pretty_message(
'''
a must be a byte string, not %s
''',
type_name(a)
))
if not isinstance(b, byte_cls):
raise TypeError(pretty_message(
'''
b must be a byte string, not %s
''',
type_name(b)
))
if len(a) != len(b):
return False
if sys.version_info < (3,):
a = [ord(char) for char in a]
b = [ord(char) for char in b]
result = 0
for x, y in zip(a, b):
result |= x ^ y
return result == 0
|
Compares two byte strings in constant time to see if they are equal
:param a:
The first byte string
:param b:
The second byte string
:return:
A boolean if the two byte strings are equal
|
def get_repos(self):
"""
Gets the repos for the organization and builds the URL/headers for
getting timestamps of stargazers.
"""
print 'Getting repos.'
#Uses the developer API. Note this could change.
headers = {'Accept': 'application/vnd.github.v3.star+json', 'Authorization': 'token ' + self.token}
temp_count = 0
for repo in self.org_retrieved.iter_repos():
temp_count += 1
url = ('https://api.github.com/repos/' + self.organization_name + '/' + repo.name)
self.repos[repo.name] = self.get_stargazers(url=url, headers=headers)
self.calc_stargazers(start_count=650)
print 'total count: \t' + str(self.total_count)
print str(temp_count) + ' repos'
|
Gets the repos for the organization and builds the URL/headers for
getting timestamps of stargazers.
|
def download(url, save_to_file=True, save_dir=".", filename=None,
block_size=64000, overwrite=False, quiet=False):
"""
Download a given URL to either file or memory
:param url: Full url (with protocol) of path to download
:param save_to_file: boolean if it should be saved to file or not
:param save_dir: location of saved file, default is current working dir
:param filename: filename to save as
:param block_size: download chunk size
:param overwrite: overwrite file if it already exists
:param quiet: boolean to turn off logging for function
:return: save location (or content if not saved to file)
"""
if save_to_file:
if not filename:
filename = safe_filename(url.split('/')[-1])
if not filename:
filename = "downloaded_at_{}.file".format(time.time())
save_location = os.path.abspath(os.path.join(save_dir, filename))
if os.path.exists(save_location) and not overwrite:
logger.error("File {0} already exists".format(save_location))
return False
else:
save_location = "memory"
try:
request = urlopen(url)
except ValueError as err:
if not quiet and "unknown url type" in str(err):
logger.error("Please make sure URL is formatted correctly and"
" starts with http:// or other protocol")
raise err
except Exception as err:
if not quiet:
logger.error("Could not download {0} - {1}".format(url, err))
raise err
try:
kb_size = int(request.headers["Content-Length"]) / 1024
except Exception as err:
if not quiet:
logger.debug("Could not determine file size - {0}".format(err))
file_size = "(unknown size)"
else:
file_size = "({0:.1f} {1})".format(*(kb_size, "KB") if kb_size < 9999
else (kb_size / 1024, "MB"))
if not quiet:
logger.info("Downloading {0} {1} to {2}".format(url, file_size,
save_location))
if save_to_file:
with open(save_location, "wb") as f:
while True:
buffer = request.read(block_size)
if not buffer:
break
f.write(buffer)
return save_location
else:
return request.read()
|
Download a given URL to either file or memory
:param url: Full url (with protocol) of path to download
:param save_to_file: boolean if it should be saved to file or not
:param save_dir: location of saved file, default is current working dir
:param filename: filename to save as
:param block_size: download chunk size
:param overwrite: overwrite file if it already exists
:param quiet: boolean to turn off logging for function
:return: save location (or content if not saved to file)
|
def update_probes(self, progress):
"""
update the probe tree
"""
new_values = self.read_probes.probes_values
probe_count = len(self.read_probes.probes)
if probe_count > self.tree_probes.topLevelItemCount():
# when run for the first time, there are no probes in the tree, so we have to fill it first
self.fill_treewidget(self.tree_probes, new_values)
else:
for x in range(probe_count):
topLvlItem = self.tree_probes.topLevelItem(x)
for child_id in range(topLvlItem.childCount()):
child = topLvlItem.child(child_id)
child.value = new_values[topLvlItem.name][child.name]
child.setText(1, str(child.value))
if self.probe_to_plot is not None:
self.probe_to_plot.plot(self.matplotlibwidget_1.axes)
self.matplotlibwidget_1.draw()
if self.chk_probe_log.isChecked():
data = ','.join(list(np.array([[str(p) for p in list(p_dict.values())] for instr, p_dict in new_values.items()]).flatten()))
self.probe_file.write('{:s}\n'.format(data))
|
update the probe tree
|
def process(*args, **kwargs):
"""Runs the decorated function in a concurrent process,
taking care of the result and error management.
Decorated functions will return a concurrent.futures.Future object
once called.
The timeout parameter will set a maximum execution time
for the decorated function. If the execution exceeds the timeout,
the process will be stopped and the Future will raise TimeoutError.
"""
timeout = kwargs.get('timeout')
# decorator without parameters
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return _process_wrapper(args[0], timeout)
else:
# decorator with parameters
if timeout is not None and not isinstance(timeout, (int, float)):
raise TypeError('Timeout expected to be None or integer or float')
def decorating_function(function):
return _process_wrapper(function, timeout)
return decorating_function
|
Runs the decorated function in a concurrent process,
taking care of the result and error management.
Decorated functions will return a concurrent.futures.Future object
once called.
The timeout parameter will set a maximum execution time
for the decorated function. If the execution exceeds the timeout,
the process will be stopped and the Future will raise TimeoutError.
|
def check_url(url):
"""Check whether the given URL is dead or alive.
Returns a dict with four keys:
"url": The URL that was checked (string)
"alive": Whether the URL was working, True or False
"status": The HTTP status code of the response from the URL,
e.g. 200, 401, 500 (int)
"reason": The reason for the success or failure of the check,
e.g. "OK", "Unauthorized", "Internal Server Error" (string)
The "status" may be None if we did not get a valid HTTP response,
e.g. in the event of a timeout, DNS failure or invalid HTTP response.
The "reason" will always be a string, but may be a requests library
exception string rather than an HTTP reason string if we did not get a valid
HTTP response.
"""
result = {"url": url}
try:
response = requests.get(url)
result["status"] = response.status_code
result["reason"] = response.reason
response.raise_for_status() # Raise if status_code is not OK.
result["alive"] = True
except AttributeError as err:
if err.message == "'NoneType' object has no attribute 'encode'":
# requests seems to throw these for some invalid URLs.
result["alive"] = False
result["reason"] = "Invalid URL"
result["status"] = None
else:
raise
except requests.exceptions.RequestException as err:
result["alive"] = False
if "reason" not in result:
result["reason"] = str(err)
if "status" not in result:
# This can happen if the response is invalid HTTP, if we get a DNS
# failure, or a timeout, etc.
result["status"] = None
# We should always have these four fields in the result.
assert "url" in result
assert result.get("alive") in (True, False)
assert "status" in result
assert "reason" in result
return result
|
Check whether the given URL is dead or alive.
Returns a dict with four keys:
"url": The URL that was checked (string)
"alive": Whether the URL was working, True or False
"status": The HTTP status code of the response from the URL,
e.g. 200, 401, 500 (int)
"reason": The reason for the success or failure of the check,
e.g. "OK", "Unauthorized", "Internal Server Error" (string)
The "status" may be None if we did not get a valid HTTP response,
e.g. in the event of a timeout, DNS failure or invalid HTTP response.
The "reason" will always be a string, but may be a requests library
exception string rather than an HTTP reason string if we did not get a valid
HTTP response.
|
def path(self, which=None):
"""Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
errata
/repositories/<id>/errata
files
/repositories/<id>/files
packages
/repositories/<id>/packages
module_streams
/repositories/<id>/module_streams
puppet_modules
/repositories/<id>/puppet_modules
remove_content
/repositories/<id>/remove_content
sync
/repositories/<id>/sync
upload_content
/repositories/<id>/upload_content
import_uploads
/repositories/<id>/import_uploads
``super`` is called otherwise.
"""
if which in (
'errata',
'files',
'packages',
'module_streams',
'puppet_modules',
'remove_content',
'sync',
'import_uploads',
'upload_content'):
return '{0}/{1}'.format(
super(Repository, self).path(which='self'),
which
)
return super(Repository, self).path(which)
|
Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
errata
/repositories/<id>/errata
files
/repositories/<id>/files
packages
/repositories/<id>/packages
module_streams
/repositories/<id>/module_streams
puppet_modules
/repositories/<id>/puppet_modules
remove_content
/repositories/<id>/remove_content
sync
/repositories/<id>/sync
upload_content
/repositories/<id>/upload_content
import_uploads
/repositories/<id>/import_uploads
``super`` is called otherwise.
|
async def _loadNodeValu(self, full, valu):
'''
Load a node from storage into the tree.
( used by initialization routines to build the tree)
'''
node = self.root
for path in iterpath(full):
name = path[-1]
step = node.kids.get(name)
if step is None:
step = await self._initNodePath(node, path, None)
node = step
node.valu = valu
return node
|
Load a node from storage into the tree.
( used by initialization routines to build the tree)
|
def initialize_request(self, request, *args, **kwargs):
"""
Returns the initial request object.
"""
parser_context = self.get_parser_context(request)
return Request(
request,
parsers=self.get_parsers(),
authenticators=self.get_authenticators(),
negotiator=self.get_content_negotiator(),
parser_context=parser_context
)
|
Returns the initial request object.
|
def transform(self, X, y=None):
'''
:X: list of dict
:y: labels
'''
return [{
new_feature: self._fisher_pval(x, old_features)
for new_feature, old_features in self.feature_groups.items()
if len(set(x.keys()) & set(old_features))
} for x in X]
|
:X: list of dict
:y: labels
|
def handle_internal_commands(command):
"""Run repl-internal commands.
Repl-internal commands are all commands starting with ":".
"""
if command.startswith(":"):
target = _get_registered_target(command[1:], default=None)
if target:
return target()
|
Run repl-internal commands.
Repl-internal commands are all commands starting with ":".
|
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
data = dict(mode='fetch', in_path=in_path)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
self.socket.send(data)
response = self.socket.recv()
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
response = response['data']
response = base64.b64decode(response)
fh = open(out_path, "w")
fh.write(response)
fh.close()
|
save a remote file to the specified path
|
def sections(self):
"""List with tuples of section names and positions.
Positions of section names are measured by cumulative word count.
"""
sections = []
for match in texutils.section_pattern.finditer(self.text):
textbefore = self.text[0:match.start()]
wordsbefore = nlputils.wordify(textbefore)
numwordsbefore = len(wordsbefore)
sections.append((numwordsbefore, match.group(1)))
self._sections = sections
return sections
|
List with tuples of section names and positions.
Positions of section names are measured by cumulative word count.
|
def cancel_download_task(self, task_id, expires=None, **kwargs):
"""取消离线下载任务.
:param task_id: 要取消的任务ID号。
:type task_id: str
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
"""
data = {
'expires': expires,
'task_id': task_id,
}
return self._request('services/cloud_dl', 'cancle_task',
data=data, **kwargs)
|
取消离线下载任务.
:param task_id: 要取消的任务ID号。
:type task_id: str
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
|
def bibtex(self):
"""Represent the source in BibTeX format.
:return: string encoding the source in BibTeX syntax.
"""
m = max(itertools.chain(map(len, self), [0]))
fields = (" %s = {%s}" % (k.ljust(m), self[k]) for k in self)
return "@%s{%s,\n%s\n}" % (
getattr(self.genre, 'value', self.genre), self.id, ",\n".join(fields))
|
Represent the source in BibTeX format.
:return: string encoding the source in BibTeX syntax.
|
def validate(cpf_number):
"""This function validates a CPF number.
This function uses calculation package to calculate both digits
and then validates the number.
:param cpf_number: a CPF number to be validated. Only numbers.
:type cpf_number: string
:return: Bool -- True for a valid number, False otherwise.
"""
_cpf = compat.clear_punctuation(cpf_number)
if (len(_cpf) != 11 or
len(set(_cpf)) == 1):
return False
first_part = _cpf[:9]
second_part = _cpf[:10]
first_digit = _cpf[9]
second_digit = _cpf[10]
if (first_digit == calc.calculate_first_digit(first_part) and
second_digit == calc.calculate_second_digit(second_part)):
return True
return False
|
This function validates a CPF number.
This function uses calculation package to calculate both digits
and then validates the number.
:param cpf_number: a CPF number to be validated. Only numbers.
:type cpf_number: string
:return: Bool -- True for a valid number, False otherwise.
|
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to %s seconds'
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
retry_msg = msg % random_retry
log.debug('%s (randomized)', msg % random_retry)
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: %s or '
'return_retry_timer_max: %s). Both must be positive '
'integers.',
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer'])
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg, self.opts.get('return_retry_timer'))
return self.opts.get('return_retry_timer')
|
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
|
def _add_case(self, case_obj):
"""Add a case to the database
If the case already exists exception is raised
Args:
case_obj(Case)
"""
if self.case(case_obj['_id']):
raise IntegrityError("Case %s already exists in database" % case_obj['_id'])
return self.case_collection.insert_one(case_obj)
|
Add a case to the database
If the case already exists exception is raised
Args:
case_obj(Case)
|
def set_npn_advertise_callback(self, callback):
"""
Specify a callback function that will be called when offering `Next
Protocol Negotiation
<https://technotes.googlecode.com/git/nextprotoneg.html>`_ as a server.
:param callback: The callback function. It will be invoked with one
argument, the :class:`Connection` instance. It should return a
list of bytestrings representing the advertised protocols, like
``[b'http/1.1', b'spdy/2']``.
.. versionadded:: 0.15
"""
_warn_npn()
self._npn_advertise_helper = _NpnAdvertiseHelper(callback)
self._npn_advertise_callback = self._npn_advertise_helper.callback
_lib.SSL_CTX_set_next_protos_advertised_cb(
self._context, self._npn_advertise_callback, _ffi.NULL)
|
Specify a callback function that will be called when offering `Next
Protocol Negotiation
<https://technotes.googlecode.com/git/nextprotoneg.html>`_ as a server.
:param callback: The callback function. It will be invoked with one
argument, the :class:`Connection` instance. It should return a
list of bytestrings representing the advertised protocols, like
``[b'http/1.1', b'spdy/2']``.
.. versionadded:: 0.15
|
def _load_json(self, filename):
"""Load sensors from json file."""
with open(filename, 'r') as file_handle:
self._sensors.update(json.load(
file_handle, cls=MySensorsJSONDecoder))
|
Load sensors from json file.
|
def slaveof(master_host=None, master_port=None, host=None, port=None, db=None,
password=None):
'''
Make the server a slave of another instance, or promote it as master
CLI Example:
.. code-block:: bash
# Become slave of redis-n01.example.com:6379
salt '*' redis.slaveof redis-n01.example.com 6379
salt '*' redis.slaveof redis-n01.example.com
# Become master
salt '*' redis.slaveof
'''
if master_host and not master_port:
master_port = 6379
server = _connect(host, port, db, password)
return server.slaveof(master_host, master_port)
|
Make the server a slave of another instance, or promote it as master
CLI Example:
.. code-block:: bash
# Become slave of redis-n01.example.com:6379
salt '*' redis.slaveof redis-n01.example.com 6379
salt '*' redis.slaveof redis-n01.example.com
# Become master
salt '*' redis.slaveof
|
def ungroup_state(self, state_id):
""" Ungroup state with state id state_id into its parent and remain internal linkage in parent.
Interconnecting transitions and data flows to parent and other child states are preserved except:
- a transition that is going from income to outcome directly and
- a data-flow that is linking input and output directly.
:param state_id: State that is to be ungrouped.
:return:
"""
state = self.states[state_id]
assert isinstance(state, ContainerState)
from rafcon.core.states.barrier_concurrency_state import BarrierConcurrencyState, UNIQUE_DECIDER_STATE_ID
if isinstance(state, BarrierConcurrencyState):
state.remove_state(state_id=UNIQUE_DECIDER_STATE_ID, force=True)
[related_transitions, related_data_flows] = self.related_linkage_state(state_id)
# ingoing logical linkage to rebuild -> related_transitions['external']['ingoing']
# outgoing logical linkage to rebuild -> related_transitions['external']['outgoing']
# ingoing data linkage to rebuild
ingoing_data_linkage_for_port = {}
for df in related_data_flows['internal']['ingoing']:
if (df.from_state, df.from_key) in ingoing_data_linkage_for_port:
ingoing_data_linkage_for_port[(df.from_state, df.from_key)]['internal'].append(df)
else:
ingoing_data_linkage_for_port[(df.from_state, df.from_key)] = {'external': [], 'internal': [df]}
if not ingoing_data_linkage_for_port[(df.from_state, df.from_key)]['external']:
for ext_df in self.data_flows.values():
if (ext_df.to_state, ext_df.to_key) == (df.from_state, df.from_key):
ingoing_data_linkage_for_port[(df.from_state, df.from_key)]['external'].append(ext_df)
# outgoing data linkage to rebuild
outgoing_data_linkage_for_port = {}
for df in related_data_flows['internal']['outgoing']:
if (df.to_state, df.to_key) in outgoing_data_linkage_for_port:
outgoing_data_linkage_for_port[(df.to_state, df.to_key)]['internal'].append(df)
else:
outgoing_data_linkage_for_port[(df.to_state, df.to_key)] = {'external': [], 'internal': [df]}
if not outgoing_data_linkage_for_port[(df.to_state, df.to_key)]['external']:
for ext_df in self.data_flows.values():
if (ext_df.from_state, ext_df.from_key) == (df.to_state, df.to_key):
outgoing_data_linkage_for_port[(df.to_state, df.to_key)]['external'].append(ext_df)
# hold states and scoped variables to rebuild
child_states = [state.remove_state(s_id, recursive=False, destroy=False) for s_id in list(state.states.keys())]
child_scoped_variables = [sv for sv_id, sv in list(state.scoped_variables.items())]
# remove state that should be ungrouped
old_state = self.remove_state(state_id, recursive=False, destroy=False)
# fill elements into parent state and remember id mapping from child to parent state to map other properties
state_id_dict = {}
sv_id_dict = {}
enclosed_df_id_dict = {}
enclosed_t_id_dict = {}
# re-create states
old_state_ids = [state.state_id for state in child_states]
for child_state in child_states:
old_state_id = child_state.state_id
# needed to change state id here because not handled in add state and to avoid old state ids
new_id = None
if child_state.state_id in list(self.states.keys()):
new_id = state_id_generator(used_state_ids=list(self.states.keys()) + old_state_ids + [self.state_id])
child_state.change_state_id(new_id)
new_state_id = self.add_state(child_state)
if new_id is not None and not new_id == new_state_id:
logger.error("In ungroup state the changed state id should not be changed again by add_state because it"
" could become a old_state_id again and screw data flows and transitions.")
# remember new and old state id relations
state_id_dict[old_state_id] = new_state_id
# re-create scoped variables
for sv in child_scoped_variables:
name = sv.name
if name in [parent_sv.name for parent_sv in self.scoped_variables.values()]:
name = state_id + name
new_sv_id = self.add_scoped_variable(name, sv.data_type, sv.default_value)
sv_id_dict[sv.data_port_id] = new_sv_id
# re-create transitions
for t in related_transitions['internal']['enclosed']:
new_t_id = self.add_transition(state_id_dict[t.from_state], t.from_outcome,
state_id_dict[t.to_state], t.to_outcome)
enclosed_t_id_dict[t.transition_id] = new_t_id
assert len(related_transitions['internal']['ingoing']) <= 1
if related_transitions['internal']['ingoing']:
ingoing_t = related_transitions['internal']['ingoing'][0]
for t in related_transitions['external']['ingoing']:
self.add_transition(t.from_state, t.from_outcome, state_id_dict[ingoing_t.to_state],
ingoing_t.to_outcome)
for ext_t in related_transitions['external']['outgoing']:
for t in related_transitions['internal']['outgoing']:
if (t.to_state, t.to_outcome) == (ext_t.from_state, ext_t.from_outcome):
try:
self.add_transition(state_id_dict[t.from_state], t.from_outcome,
ext_t.to_state, ext_t.to_outcome)
except ValueError:
from rafcon.core.states.barrier_concurrency_state import BarrierConcurrencyState
if not isinstance(self, BarrierConcurrencyState):
logger.exception("Error while recreation of logical linkage.")
# re-create data flow linkage
for df in related_data_flows['internal']['enclosed']:
# print("enclosed: ", df)
new_df_id = self.add_data_flow(self.state_id if state_id == df.from_state else state_id_dict[df.from_state],
sv_id_dict[df.from_key] if state_id == df.from_state else df.from_key,
self.state_id if state_id == df.to_state else state_id_dict[df.to_state],
sv_id_dict[df.to_key] if state_id == df.to_state else df.to_key)
enclosed_df_id_dict[df.data_flow_id] = new_df_id
for data_port_linkage in ingoing_data_linkage_for_port.values():
for ext_df in data_port_linkage['external']:
for df in data_port_linkage['internal']:
# print("ingoing: ", ext_df, df)
if df.to_state not in state_id_dict and df.to_state == state_id:
self.add_data_flow(ext_df.from_state, ext_df.from_key, self.state_id, sv_id_dict[df.to_key])
else:
self.add_data_flow(ext_df.from_state, ext_df.from_key, state_id_dict[df.to_state], df.to_key)
for data_port_linkage in outgoing_data_linkage_for_port.values():
for ext_df in data_port_linkage['external']:
for df in data_port_linkage['internal']:
# print("outgoing: ", ext_df, df)
if df.from_state not in state_id_dict and df.from_state == state_id:
self.add_data_flow(self.state_id, sv_id_dict[df.from_key], ext_df.to_state, ext_df.to_key)
else:
self.add_data_flow(state_id_dict[df.from_state], df.from_key, ext_df.to_state, ext_df.to_key)
self.ungroup_state.__func__.state_id_dict = state_id_dict
self.ungroup_state.__func__.sv_id_dict = sv_id_dict
self.ungroup_state.__func__.enclosed_df_id_dict = enclosed_df_id_dict
self.ungroup_state.__func__.enclosed_t_id_dict = enclosed_t_id_dict
old_state.destroy(recursive=True)
return old_state
|
Ungroup state with state id state_id into its parent and remain internal linkage in parent.
Interconnecting transitions and data flows to parent and other child states are preserved except:
- a transition that is going from income to outcome directly and
- a data-flow that is linking input and output directly.
:param state_id: State that is to be ungrouped.
:return:
|
def index():
"""Basic test view."""
identity = g.identity
actions = {}
for action in access.actions.values():
actions[action.value] = DynamicPermission(action).allows(identity)
if current_user.is_anonymous:
return render_template("invenio_access/open.html",
actions=actions,
identity=identity)
else:
return render_template("invenio_access/limited.html",
message='',
actions=actions,
identity=identity)
|
Basic test view.
|
def apply_operation_to(self, path):
"""Add `a:lnTo` element to *path* for this line segment.
Returns the `a:lnTo` element newly added to the path.
"""
return path.add_lnTo(
self._x - self._freeform_builder.shape_offset_x,
self._y - self._freeform_builder.shape_offset_y
)
|
Add `a:lnTo` element to *path* for this line segment.
Returns the `a:lnTo` element newly added to the path.
|
def handle(self):
"The actual service to which the user has connected."
if self.TELNET_ISSUE:
self.writeline(self.TELNET_ISSUE)
if not self.authentication_ok():
return
if self.DOECHO:
self.writeline(self.WELCOME)
self.session_start()
while self.RUNSHELL:
raw_input = self.readline(prompt=self.PROMPT).strip()
self.input = self.input_reader(self, raw_input)
self.raw_input = self.input.raw
if self.input.cmd:
cmd = self.input.cmd.upper()
params = self.input.params
if self.COMMANDS.has_key(cmd):
try:
self.COMMANDS[cmd](params)
except:
log.exception('Error calling %s.' % cmd)
(t, p, tb) = sys.exc_info()
if self.handleException(t, p, tb):
break
else:
self.writeerror("Unknown command '%s'" % cmd)
log.debug("Exiting handler")
|
The actual service to which the user has connected.
|
def pexpireat(self, name, when):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer representing unix time in milliseconds (unix time * 1000)
or a Python datetime object.
"""
with self.pipe as pipe:
return pipe.pexpireat(self.redis_key(name), when)
|
Set an expire flag on key ``name``. ``when`` can be represented
as an integer representing unix time in milliseconds (unix time * 1000)
or a Python datetime object.
|
def to_representation(self, value):
"""Project outgoing native value."""
value = apply_subfield_projection(self, value, deep=True)
return super().to_representation(value)
|
Project outgoing native value.
|
def get_html_output(self):
""" Return line generator. """
def html_splitlines(lines):
# this cool function was taken from trac.
# http://projects.edgewall.com/trac/
open_tag_re = re.compile(r'<(\w+)(\s.*)?[^/]?>')
close_tag_re = re.compile(r'</(\w+)>')
open_tags = []
for line in lines:
for tag in open_tags:
line = tag.group(0) + line
open_tags = []
for tag in open_tag_re.finditer(line):
open_tags.append(tag)
open_tags.reverse()
for ctag in close_tag_re.finditer(line):
for otag in open_tags:
if otag.group(1) == ctag.group(1):
open_tags.remove(otag)
break
for tag in open_tags:
line += '</%s>' % tag.group(1)
yield line
if self.error:
return escape(self.raw).splitlines()
return list(html_splitlines(self.out.getvalue().splitlines()))
|
Return line generator.
|
def lambda_handler(event, context=None, settings_name="zappa_settings"): # NoQA
"""
An AWS Lambda function which parses specific API Gateway input into a WSGI request.
The request get fed it to Django, processes the Django response, and returns that
back to the API Gateway.
"""
time_start = datetime.datetime.now()
# If in DEBUG mode, log all raw incoming events.
if settings.DEBUG:
logger.info('Zappa Event: {}'.format(event))
# This is a normal HTTP request
if event.get('method', None):
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(event, script_name=settings.SCRIPT_NAME)
# We are always on https on Lambda, so tell our wsgi app that.
environ['HTTPS'] = 'on'
environ['wsgi.url_scheme'] = 'https'
wrap_me = get_wsgi_application()
app = ZappaWSGIMiddleware(wrap_me)
# Execute the application
response = Response.from_app(app, environ)
response.content = response.data
# Prepare the special dictionary which will be returned to the API GW.
returnme = {'Content': response.data}
# Pack the WSGI response into our special dictionary.
for (header_name, header_value) in response.headers:
returnme[header_name] = header_value
returnme['Status'] = response.status_code
# To ensure correct status codes, we need to
# pack the response as a deterministic B64 string and raise it
# as an error to match our APIGW regex.
# The DOCTYPE ensures that the page still renders in the browser.
exception = None
if response.status_code in ERROR_CODES:
content = u"<!DOCTYPE html>" + unicode(response.status_code) + unicode('<meta charset="utf-8" />') + response.data.encode('utf-8')
b64_content = base64.b64encode(content)
exception = (b64_content)
# Internal are changed to become relative redirects
# so they still work for apps on raw APIGW and on a domain.
elif 300 <= response.status_code < 400 and response.has_header('Location'):
location = returnme['Location']
location = '/' + location.replace("http://zappa/", "")
exception = location
# Calculate the total response time,
# and log it in the Common Log format.
time_end = datetime.datetime.now()
delta = time_end - time_start
response_time_ms = delta.total_seconds() * 1000
common_log(environ, response, response_time=response_time_ms)
# Finally, return the response to API Gateway.
if exception:
raise Exception(exception)
else:
return returnme
# This is a management command invocation.
elif event.get('command', None):
from django.core import management
# Couldn't figure out how to get the value into stdout with StringIO..
# Read the log for now. :[]
management.call_command(*event['command'].split(' '))
return {}
elif event.get('detail'):
module, function = event['detail'].rsplit('.', 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
# Execute the function!
app_function()
return
else:
logger.error('Unhandled event: {}'.format(json.dumps(event)))
|
An AWS Lambda function which parses specific API Gateway input into a WSGI request.
The request get fed it to Django, processes the Django response, and returns that
back to the API Gateway.
|
def new_signal(celf, path, iface, name) :
"creates a new DBUS.MESSAGE_TYPE_SIGNAL message."
result = dbus.dbus_message_new_signal(path.encode(), iface.encode(), name.encode())
if result == None :
raise CallFailed("dbus_message_new_signal")
#end if
return \
celf(result)
|
creates a new DBUS.MESSAGE_TYPE_SIGNAL message.
|
def build_data_table(
energy,
flux,
flux_error=None,
flux_error_lo=None,
flux_error_hi=None,
energy_width=None,
energy_lo=None,
energy_hi=None,
ul=None,
cl=None,
):
"""
Read data into data dict.
Parameters
----------
energy : :class:`~astropy.units.Quantity` array instance
Observed photon energy array [physical type ``energy``]
flux : :class:`~astropy.units.Quantity` array instance
Observed flux array [physical type ``flux`` or ``differential flux``]
flux_error, flux_error_hi, flux_error_lo : :class:`~astropy.units.Quantity` array instance
68% CL gaussian uncertainty of the flux [physical type ``flux`` or
``differential flux``]. Either ``flux_error`` (symmetrical uncertainty)
or ``flux_error_hi`` and ``flux_error_lo`` (asymmetrical uncertainties)
must be provided.
energy_width, energy_lo, energy_hi : :class:`~astropy.units.Quantity` array instance, optional
Width of the energy bins [physical type ``energy``]. Either
``energy_width`` (bin width) or ``energy_lo`` and ``energy_hi``
(Energies of the lower and upper bin edges) can be provided. If none
are provided, ``generate_energy_edges`` will be used.
ul : boolean or int array, optional
Boolean array indicating which of the flux values given in ``flux``
correspond to upper limits.
cl : float, optional
Confidence level of the flux upper limits given by ``ul``.
Returns
-------
data : :class:`astropy.table.QTable`
Data stored in an astropy Table.
"""
table = QTable()
if cl is not None:
cl = validate_scalar("cl", cl)
table.meta["keywords"] = {"cl": {"value": cl}}
table["energy"] = energy
if energy_width is not None:
table["energy_width"] = energy_width
elif energy_lo is not None and energy_hi is not None:
table["energy_lo"] = energy_lo
table["energy_hi"] = energy_hi
table["flux"] = flux
if flux_error is not None:
table["flux_error"] = flux_error
elif flux_error_lo is not None and flux_error_hi is not None:
table["flux_error_lo"] = flux_error_lo
table["flux_error_hi"] = flux_error_hi
else:
raise TypeError("Flux error not provided!")
if ul is not None:
ul = np.array(ul, dtype=np.int)
table["ul"] = ul
table.meta["comments"] = ["Table generated with naima.build_data_table"]
# test table units, format, etc
validate_data_table(table)
return table
|
Read data into data dict.
Parameters
----------
energy : :class:`~astropy.units.Quantity` array instance
Observed photon energy array [physical type ``energy``]
flux : :class:`~astropy.units.Quantity` array instance
Observed flux array [physical type ``flux`` or ``differential flux``]
flux_error, flux_error_hi, flux_error_lo : :class:`~astropy.units.Quantity` array instance
68% CL gaussian uncertainty of the flux [physical type ``flux`` or
``differential flux``]. Either ``flux_error`` (symmetrical uncertainty)
or ``flux_error_hi`` and ``flux_error_lo`` (asymmetrical uncertainties)
must be provided.
energy_width, energy_lo, energy_hi : :class:`~astropy.units.Quantity` array instance, optional
Width of the energy bins [physical type ``energy``]. Either
``energy_width`` (bin width) or ``energy_lo`` and ``energy_hi``
(Energies of the lower and upper bin edges) can be provided. If none
are provided, ``generate_energy_edges`` will be used.
ul : boolean or int array, optional
Boolean array indicating which of the flux values given in ``flux``
correspond to upper limits.
cl : float, optional
Confidence level of the flux upper limits given by ``ul``.
Returns
-------
data : :class:`astropy.table.QTable`
Data stored in an astropy Table.
|
def send_and_require(self,
send,
regexps,
not_there=False,
shutit_pexpect_child=None,
echo=None,
note=None,
loglevel=logging.INFO):
"""Send string and require the item in the output.
See send_until
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.send_and_require(send,
regexps,
not_there=not_there,
echo=echo,
note=note,
loglevel=loglevel)
|
Send string and require the item in the output.
See send_until
|
def createPortForm(self, req, tag):
"""
Create and return a L{LiveForm} for adding a new L{TCPPort} or
L{SSLPort} to the site store.
"""
def port(s):
n = int(s)
if n < 0 or n > 65535:
raise ValueError(s)
return n
factories = []
for f in self.store.parent.powerupsFor(IProtocolFactoryFactory):
factories.append((f.__class__.__name__.decode('ascii'),
f,
False))
f = LiveForm(
self.portConf.createPort,
[Parameter('portNumber', TEXT_INPUT, port, 'Port Number',
'Integer 0 <= n <= 65535 giving the TCP port to bind.'),
Parameter('interface', TEXT_INPUT, unicode, 'Interface',
'Hostname to bind to, or blank for all interfaces.'),
Parameter('ssl', CHECKBOX_INPUT, bool, 'SSL',
'Select to indicate port should use SSL.'),
# Text area? File upload? What?
Parameter('certPath', TEXT_INPUT, unicode, 'Certificate Path',
'Path to a certificate file on the server, if SSL is to be used.'),
ChoiceParameter('factory', factories, 'Protocol Factory',
'Which pre-existing protocol factory to associate with this port.')])
f.setFragmentParent(self)
# f.docFactory = webtheme.getLoader(f.fragmentName)
return tag[f]
|
Create and return a L{LiveForm} for adding a new L{TCPPort} or
L{SSLPort} to the site store.
|
def is_equivalent(self, other, ignore=False):
"""
Return ``True`` if the IPA string is equivalent to the ``other`` object.
The ``other`` object can be:
1. a Unicode string,
2. a list of IPAChar objects, and
3. another IPAString.
:param variant other: the object to be compared against
:param bool ignore: if other is a Unicode string, ignore Unicode characters not IPA valid
:rtype: bool
"""
def is_equivalent_to_list_of_ipachars(other):
"""
Return ``True`` if the list of IPAChar objects
in the canonical representation of the string
is the same as the given list.
:param list other: list of IPAChar objects
:rtype: bool
"""
my_ipa_chars = self.canonical_representation.ipa_chars
if len(my_ipa_chars) != len(other):
return False
for i in range(len(my_ipa_chars)):
if not my_ipa_chars[i].is_equivalent(other[i]):
return False
return True
if is_unicode_string(other):
try:
return is_equivalent_to_list_of_ipachars(IPAString(unicode_string=other, ignore=ignore).ipa_chars)
except:
return False
if is_list_of_ipachars(other):
try:
return is_equivalent_to_list_of_ipachars(other)
except:
return False
if isinstance(other, IPAString):
return is_equivalent_to_list_of_ipachars(other.canonical_representation.ipa_chars)
return False
|
Return ``True`` if the IPA string is equivalent to the ``other`` object.
The ``other`` object can be:
1. a Unicode string,
2. a list of IPAChar objects, and
3. another IPAString.
:param variant other: the object to be compared against
:param bool ignore: if other is a Unicode string, ignore Unicode characters not IPA valid
:rtype: bool
|
def _prepare_request(reddit_session, url, params, data, auth, files,
method=None):
"""Return a requests Request object that can be "prepared"."""
# Requests using OAuth for authorization must switch to using the oauth
# domain.
if getattr(reddit_session, '_use_oauth', False):
bearer = 'bearer {0}'.format(reddit_session.access_token)
headers = {'Authorization': bearer}
config = reddit_session.config
for prefix in (config.api_url, config.permalink_url):
if url.startswith(prefix):
if config.log_requests >= 1:
msg = 'substituting {0} for {1} in url\n'.format(
config.oauth_url, prefix)
sys.stderr.write(msg)
url = config.oauth_url + url[len(prefix):]
break
else:
headers = {}
headers.update(reddit_session.http.headers)
if method:
pass
elif data or files:
method = 'POST'
else:
method = 'GET'
# Log the request if logging is enabled
if reddit_session.config.log_requests >= 1:
sys.stderr.write('{0}: {1}\n'.format(method, url))
if reddit_session.config.log_requests >= 2:
if params:
sys.stderr.write('params: {0}\n'.format(params))
if data:
sys.stderr.write('data: {0}\n'.format(data))
if auth:
sys.stderr.write('auth: {0}\n'.format(auth))
# Prepare request
request = Request(method=method, url=url, headers=headers, params=params,
auth=auth, cookies=reddit_session.http.cookies)
if method == 'GET':
return request
# Most POST requests require adding `api_type` and `uh` to the data.
if data is True:
data = {}
if isinstance(data, dict):
if not auth:
data.setdefault('api_type', 'json')
if reddit_session.modhash:
data.setdefault('uh', reddit_session.modhash)
else:
request.headers.setdefault('Content-Type', 'application/json')
request.data = data
request.files = files
return request
|
Return a requests Request object that can be "prepared".
|
def generate_protocol(self,sweep=None):
"""
Create (x,y) points necessary to graph protocol for the current sweep.
"""
#TODO: make a line protocol that's plottable
if sweep is None:
sweep = self.currentSweep
if sweep is None:
sweep = 0
if not self.channel in self.header['dictEpochInfoPerDAC'].keys():
self.protoX=[0,self.sweepSize]
self.protoY=[self.holding,self.holding]
self.protoSeqX=self.protoX
self.protoSeqY=self.protoY
return
proto=self.header['dictEpochInfoPerDAC'][self.channel]
self.protoX=[] #plottable Xs
self.protoY=[] #plottable Ys
self.protoX.append(0)
self.protoY.append(self.holding)
for step in proto:
dX = proto[step]['lEpochInitDuration']
Y = proto[step]['fEpochInitLevel']+proto[step]['fEpochLevelInc']*sweep
self.protoX.append(self.protoX[-1])
self.protoY.append(Y) #go to new Y
self.protoX.append(self.protoX[-1]+dX) #take it to the new X
self.protoY.append(Y) #update the new Y #TODO: fix for ramps
if self.header['listDACInfo'][0]['nInterEpisodeLevel']: #nInterEpisodeLevel
finalVal=self.protoY[-1] #last holding
else:
finalVal=self.holding #regular holding
self.protoX.append(self.protoX[-1])
self.protoY.append(finalVal)
self.protoX.append(self.sweepSize)
self.protoY.append(finalVal)
for i in range(1,len(self.protoX)-1): #correct for weird ABF offset issue.
self.protoX[i]=self.protoX[i]+self.offsetX
self.protoSeqY=[self.protoY[0]]
self.protoSeqX=[self.protoX[0]]
for i in range(1,len(self.protoY)):
if not self.protoY[i]==self.protoY[i-1]:
self.protoSeqY.append(self.protoY[i])
self.protoSeqX.append(self.protoX[i])
if self.protoY[0]!=self.protoY[1]:
self.protoY.insert(1,self.protoY[0])
self.protoX.insert(1,self.protoX[1])
self.protoY.insert(1,self.protoY[0])
self.protoX.insert(1,self.protoX[0]+self.offsetX/2)
self.protoSeqY.append(finalVal)
self.protoSeqX.append(self.sweepSize)
self.protoX=np.array(self.protoX)
self.protoY=np.array(self.protoY)
|
Create (x,y) points necessary to graph protocol for the current sweep.
|
def _conv_general_shape_tuple(self, lhs_shape, rhs_shape, window_strides,
padding, dimension_numbers):
"""Generalized computation of conv shape."""
lhs_perm, rhs_perm, out_perm = self._conv_general_permutations(
dimension_numbers)
lhs_trans = onp.take(lhs_shape, lhs_perm)
rhs_trans = onp.take(rhs_shape, rhs_perm)
out_trans = self._conv_shape_tuple(
lhs_trans, rhs_trans, window_strides, padding)
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
|
Generalized computation of conv shape.
|
def _set_index(self, schema, name, fields, **index_options):
"""
https://www.sqlite.org/lang_createindex.html
"""
query_str = "CREATE {}INDEX IF NOT EXISTS '{}_{}' ON {} ({})".format(
'UNIQUE ' if index_options.get('unique', False) else '',
schema,
name,
self._normalize_table_name(schema),
', '.join((self._normalize_name(f) for f in fields))
)
return self._query(query_str, ignore_result=True, **index_options)
|
https://www.sqlite.org/lang_createindex.html
|
def _new_object(self, objtype, name=None):
r"""
"""
if objtype.startswith('net'):
obj = openpnm.network.GenericNetwork(project=self, name=name)
elif objtype.startswith('geo'):
obj = openpnm.geometry.GenericGeometry(project=self, name=name)
elif objtype.startswith('pha'):
obj = openpnm.phases.GenericPhase(project=self, name=name)
elif objtype.startswith('phy'):
obj = openpnm.physics.GenericPhysics(project=self, name=name)
elif objtype.startswith('alg'):
obj = openpnm.algorithm.GenericAlgorithm(project=self, name=name)
else:
obj = openpnm.core.Base(project=self, name=name)
return obj
|
r"""
|
def _value_is_dynamic(self,obj,objtype=None):
"""
Return True if the parameter is actually dynamic (i.e. the
value is being generated).
"""
return hasattr(super(Dynamic,self).__get__(obj,objtype),'_Dynamic_last')
|
Return True if the parameter is actually dynamic (i.e. the
value is being generated).
|
def load_stock_prices(self):
""" Load latest prices for securities """
from pricedb import SecuritySymbol
info = StocksInfo(self.config)
for item in self.model.stocks:
symbol = SecuritySymbol("", "")
symbol.parse(item.symbol)
price: PriceModel = info.load_latest_price(symbol)
if not price:
# Use a dummy price of 1, effectively keeping the original amount.
price = PriceModel()
price.currency = self.config.get(ConfigKeys.default_currency)
price.value = Decimal(1)
item.price = price.value
if isinstance(item, Stock):
item.currency = price.currency
# Do not set currency for Cash balance records.
info.close_databases()
|
Load latest prices for securities
|
def export(self, path, session):
"""Exports the module with the variables from the session in `path`.
Note that it is the module definition in the ModuleSpec used to create this
module that gets exported. The session is only used to provide the value
of variables.
Args:
path: path where to export the module to.
session: session where to export the variables from.
Raises:
RuntimeError: if there is an issue during the export.
"""
if self._graph is not tf_v1.get_default_graph():
raise RuntimeError("default graph differs from the graph where the "
"module was instantiated.")
if self._graph is not session.graph:
raise RuntimeError("session graph differs from the graph where the "
"module was instantiated.")
self._impl.export(path, session)
|
Exports the module with the variables from the session in `path`.
Note that it is the module definition in the ModuleSpec used to create this
module that gets exported. The session is only used to provide the value
of variables.
Args:
path: path where to export the module to.
session: session where to export the variables from.
Raises:
RuntimeError: if there is an issue during the export.
|
def pixel_to_icrs_coords(x, y, wcs):
"""
Convert pixel coordinates to ICRS Right Ascension and Declination.
This is merely a convenience function to extract RA and Dec. from a
`~astropy.coordinates.SkyCoord` instance so they can be put in
separate columns in a `~astropy.table.Table`.
Parameters
----------
x : float or array-like
The x pixel coordinate.
y : float or array-like
The y pixel coordinate.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use to convert from pixel coordinates
to ICRS world coordinates.
`~astropy.table.Table`.
Returns
-------
ra : `~astropy.units.Quantity`
The ICRS Right Ascension in degrees.
dec : `~astropy.units.Quantity`
The ICRS Declination in degrees.
"""
icrs_coords = pixel_to_skycoord(x, y, wcs).icrs
icrs_ra = icrs_coords.ra.degree * u.deg
icrs_dec = icrs_coords.dec.degree * u.deg
return icrs_ra, icrs_dec
|
Convert pixel coordinates to ICRS Right Ascension and Declination.
This is merely a convenience function to extract RA and Dec. from a
`~astropy.coordinates.SkyCoord` instance so they can be put in
separate columns in a `~astropy.table.Table`.
Parameters
----------
x : float or array-like
The x pixel coordinate.
y : float or array-like
The y pixel coordinate.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use to convert from pixel coordinates
to ICRS world coordinates.
`~astropy.table.Table`.
Returns
-------
ra : `~astropy.units.Quantity`
The ICRS Right Ascension in degrees.
dec : `~astropy.units.Quantity`
The ICRS Declination in degrees.
|
def residual_block(x, hparams):
"""A stack of convolution blocks with residual connection."""
k = (hparams.kernel_height, hparams.kernel_width)
dilations_and_kernels = [((1, 1), k) for _ in range(3)]
y = common_layers.subseparable_conv_block(
x,
hparams.hidden_size,
dilations_and_kernels,
padding="SAME",
separability=0,
name="residual_block")
x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm")
return tf.nn.dropout(x, 1.0 - hparams.dropout)
|
A stack of convolution blocks with residual connection.
|
def mark_backward(output_tensor, used_node_names):
"""Function to propagate backwards in the graph and mark nodes as used.
Traverses recursively through the graph from the end tensor, through the op
that generates the tensor, and then to the input tensors that feed the op.
Nodes encountered are stored in used_node_names.
Args:
output_tensor: A Tensor which we start the propagation.
used_node_names: A list of strings, stores the name of nodes we've marked as
visited.
"""
op = output_tensor.op
if op.name in used_node_names:
return
used_node_names.add(op.name)
for input_tensor in op.inputs:
mark_backward(input_tensor, used_node_names)
for control_input_op in op.control_inputs:
used_node_names.add(control_input_op.name)
for input_tensor in control_input_op.inputs:
mark_backward(input_tensor, used_node_names)
|
Function to propagate backwards in the graph and mark nodes as used.
Traverses recursively through the graph from the end tensor, through the op
that generates the tensor, and then to the input tensors that feed the op.
Nodes encountered are stored in used_node_names.
Args:
output_tensor: A Tensor which we start the propagation.
used_node_names: A list of strings, stores the name of nodes we've marked as
visited.
|
def convert_pattern_to_pil(pattern, version=1):
"""Convert Pattern to PIL Image."""
from PIL import Image
mode = get_pil_mode(pattern.image_mode.name, False)
# The order is different here.
size = pattern.data.rectangle[3], pattern.data.rectangle[2]
channels = [
_create_channel(size, c.get_data(version), c.pixel_depth).convert('L')
for c in pattern.data.channels if c.is_written
]
if len(channels) == len(mode) + 1:
mode += 'A' # TODO: Perhaps doesn't work for some modes.
if mode == 'P':
image = channels[0]
image.putpalette([x for rgb in pattern.color_table for x in rgb])
else:
image = Image.merge(mode, channels)
if mode == 'CMYK':
image = image.point(lambda x: 255 - x)
return image
|
Convert Pattern to PIL Image.
|
def concatenate_fields(fields, dim):
'Create an INstanceAttribute from a list of InstnaceFields'
if len(fields) == 0:
raise ValueError('fields cannot be an empty list')
if len(set((f.name, f.shape, f.dtype) for f in fields)) != 1:
raise ValueError('fields should have homogeneous name, shape and dtype')
tpl = fields[0]
attr = InstanceAttribute(tpl.name, shape=tpl.shape, dtype=tpl.dtype,
dim=dim, alias=None)
attr.value = np.array([f.value for f in fields], dtype=tpl.dtype)
return attr
|
Create an INstanceAttribute from a list of InstnaceFields
|
def update_environment(self, environment, environment_ids):
"""
Method to update environment
:param environment_ids: Ids of Environment
"""
uri = 'api/v3/environment/%s/' % environment_ids
data = dict()
data['environments'] = list()
data['environments'].append(environment)
return super(ApiEnvironment, self).put(uri, data)
|
Method to update environment
:param environment_ids: Ids of Environment
|
def _AddEvents(cls, Class):
"""Adds events based on the attributes of the given ``...Events`` class.
:Parameters:
Class : class
An `...Events` class whose methods define events that may occur in the
instances of the current class.
"""
def make_event(event):
return property(lambda self: self._GetDefaultEventHandler(event),
lambda self, Value: self._SetDefaultEventHandler(event, Value))
for event in dir(Class):
if not event.startswith('_'):
setattr(cls, 'On%s' % event, make_event(event))
cls._EventNames.append(event)
|
Adds events based on the attributes of the given ``...Events`` class.
:Parameters:
Class : class
An `...Events` class whose methods define events that may occur in the
instances of the current class.
|
def parse_unstruct(unstruct):
"""
Convert an unstructured event JSON to a list containing one Elasticsearch-compatible key-value pair
For example, the JSON
{
"data": {
"data": {
"key": "value"
},
"schema": "iglu:com.snowplowanalytics.snowplow/link_click/jsonschema/1-0-1"
},
"schema": "iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0"
}
would become
[
(
"unstruct_com_snowplowanalytics_snowplow_link_click_1", {
"key": "value"
}
)
]
"""
my_json = json.loads(unstruct)
data = my_json['data']
schema = data['schema']
if 'data' in data:
inner_data = data['data']
else:
raise SnowplowEventTransformationException(["Could not extract inner data field from unstructured event"])
fixed_schema = fix_schema("unstruct_event", schema)
return [(fixed_schema, inner_data)]
|
Convert an unstructured event JSON to a list containing one Elasticsearch-compatible key-value pair
For example, the JSON
{
"data": {
"data": {
"key": "value"
},
"schema": "iglu:com.snowplowanalytics.snowplow/link_click/jsonschema/1-0-1"
},
"schema": "iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0"
}
would become
[
(
"unstruct_com_snowplowanalytics_snowplow_link_click_1", {
"key": "value"
}
)
]
|
def get_settings_from_interface(iface):
"""Get the configuration settings associated to a list of schema
interfaces
:param iface: The schema interface from which we want to get its
fields
:return: Dictionary with iface name as key and as value a dictionary
with the setting names (keys) linked to that schema and its
values.
"""
settings = {}
schema_id = iface.getName()
settings[schema_id] = {}
schema = getAdapter(api.get_portal(), iface)
for setting in getFieldNames(iface):
value = getattr(schema, setting, None)
if is_json_serializable(value):
settings[schema_id][setting] = value
return settings
|
Get the configuration settings associated to a list of schema
interfaces
:param iface: The schema interface from which we want to get its
fields
:return: Dictionary with iface name as key and as value a dictionary
with the setting names (keys) linked to that schema and its
values.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.