code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def add_job(self, queue_name, job, timeout=200, replicate=None, delay=None,
retry=None, ttl=None, maxlen=None, asynchronous=None):
"""
Add a job to a queue.
ADDJOB queue_name job <ms-timeout> [REPLICATE <count>] [DELAY <sec>]
[RETRY <sec>] [TTL <sec>] [MAXLEN <count>] [ASYNC]
:param queue_name: is the name of the queue, any string, basically.
:param job: is a string representing the job.
:param timeout: is the command timeout in milliseconds.
:param replicate: count is the number of nodes the job should be
replicated to.
:param delay: sec is the number of seconds that should elapse
before the job is queued by any server.
:param retry: sec period after which, if no ACK is received, the
job is put again into the queue for delivery. If RETRY is 0,
the job has an at-most-once delivery semantics.
:param ttl: sec is the max job life in seconds. After this time,
the job is deleted even if it was not successfully delivered.
:param maxlen: count specifies that if there are already count
messages queued for the specified queue name, the message is
refused and an error reported to the client.
:param asynchronous: asks the server to let the command return ASAP and
replicate the job to other nodes in the background. The job
gets queued ASAP, while normally the job is put into the queue
only when the client gets a positive reply. Changing the name of this
argument as async is reserved keyword in python 3.7
:returns: job_id
"""
command = ['ADDJOB', queue_name, job, timeout]
if replicate:
command += ['REPLICATE', replicate]
if delay:
command += ['DELAY', delay]
if retry is not None:
command += ['RETRY', retry]
if ttl:
command += ['TTL', ttl]
if maxlen:
command += ['MAXLEN', maxlen]
if asynchronous:
command += ['ASYNC']
# TODO(canardleteer): we need to handle "-PAUSE" messages more
# appropriately, for now it's up to the person using the library
# to handle a generic ResponseError on their own.
logger.debug("sending job - %s", command)
job_id = self.execute_command(*command)
logger.debug("sent job - %s", command)
logger.debug("job_id: %s " % job_id)
return job_id
|
Add a job to a queue.
ADDJOB queue_name job <ms-timeout> [REPLICATE <count>] [DELAY <sec>]
[RETRY <sec>] [TTL <sec>] [MAXLEN <count>] [ASYNC]
:param queue_name: is the name of the queue, any string, basically.
:param job: is a string representing the job.
:param timeout: is the command timeout in milliseconds.
:param replicate: count is the number of nodes the job should be
replicated to.
:param delay: sec is the number of seconds that should elapse
before the job is queued by any server.
:param retry: sec period after which, if no ACK is received, the
job is put again into the queue for delivery. If RETRY is 0,
the job has an at-most-once delivery semantics.
:param ttl: sec is the max job life in seconds. After this time,
the job is deleted even if it was not successfully delivered.
:param maxlen: count specifies that if there are already count
messages queued for the specified queue name, the message is
refused and an error reported to the client.
:param asynchronous: asks the server to let the command return ASAP and
replicate the job to other nodes in the background. The job
gets queued ASAP, while normally the job is put into the queue
only when the client gets a positive reply. Changing the name of this
argument as async is reserved keyword in python 3.7
:returns: job_id
|
def check_lines(self, lines, i):
"""check lines have less than a maximum number of characters
"""
max_chars = self.config.max_line_length
ignore_long_line = self.config.ignore_long_lines
def check_line(line, i):
if not line.endswith("\n"):
self.add_message("missing-final-newline", line=i)
else:
# exclude \f (formfeed) from the rstrip
stripped_line = line.rstrip("\t\n\r\v ")
if not stripped_line and _EMPTY_LINE in self.config.no_space_check:
# allow empty lines
pass
elif line[len(stripped_line) :] not in ("\n", "\r\n"):
self.add_message(
"trailing-whitespace", line=i, col_offset=len(stripped_line)
)
# Don't count excess whitespace in the line length.
line = stripped_line
mobj = OPTION_RGX.search(line)
if mobj and "=" in line:
front_of_equal, _, back_of_equal = mobj.group(1).partition("=")
if front_of_equal.strip() == "disable":
if "line-too-long" in {
_msg_id.strip() for _msg_id in back_of_equal.split(",")
}:
return None
line = line.rsplit("#", 1)[0].rstrip()
if len(line) > max_chars and not ignore_long_line.search(line):
self.add_message("line-too-long", line=i, args=(len(line), max_chars))
return i + 1
unsplit_ends = {
"\v",
"\x0b",
"\f",
"\x0c",
"\x1c",
"\x1d",
"\x1e",
"\x85",
"\u2028",
"\u2029",
}
unsplit = []
for line in lines.splitlines(True):
if line[-1] in unsplit_ends:
unsplit.append(line)
continue
if unsplit:
unsplit.append(line)
line = "".join(unsplit)
unsplit = []
i = check_line(line, i)
if i is None:
break
if unsplit:
check_line("".join(unsplit), i)
|
check lines have less than a maximum number of characters
|
def serialize(self) -> dict:
"""
Serialize the message for sending to slack API
Returns:
serialized message
"""
data = {**self}
if "attachments" in self:
data["attachments"] = json.dumps(self["attachments"])
return data
|
Serialize the message for sending to slack API
Returns:
serialized message
|
def _construct_request(self):
"""
Utility for constructing the request header and connection
"""
if self.parsed_endpoint.scheme == 'https':
conn = httplib.HTTPSConnection(self.parsed_endpoint.netloc)
else:
conn = httplib.HTTPConnection(self.parsed_endpoint.netloc)
head = {
"Accept": "application/json",
"User-Agent": USER_AGENT,
API_TOKEN_HEADER_NAME: self.api_token,
}
if self.api_version in ['0.1', '0.01a']:
head[API_VERSION_HEADER_NAME] = self.api_version
return conn, head
|
Utility for constructing the request header and connection
|
def readAsync(self, fileName, callback, **kwargs):
"""
Interprets the specified file asynchronously, interpreting it as a
model or a script file. As a side effect, it invalidates all entities
(as the passed file can contain any arbitrary command); the lists of
entities will be re-populated lazily (at first access).
Args:
fileName: Path to the file (Relative to the current working
directory or absolute).
callback: Callback to be executed when the file has been
interpreted.
"""
if self._langext is not None:
with open(fileName, 'r') as fin:
newmodel = self._langext.translate(fin.read(), **kwargs)
with open(fileName+'.translated', 'w') as fout:
fout.write(newmodel)
fileName += '.translated'
def async_call():
self._lock.acquire()
try:
self._impl.read(fileName)
self._errorhandler_wrapper.check()
except Exception:
self._lock.release()
raise
else:
self._lock.release()
callback.run()
Thread(target=async_call).start()
|
Interprets the specified file asynchronously, interpreting it as a
model or a script file. As a side effect, it invalidates all entities
(as the passed file can contain any arbitrary command); the lists of
entities will be re-populated lazily (at first access).
Args:
fileName: Path to the file (Relative to the current working
directory or absolute).
callback: Callback to be executed when the file has been
interpreted.
|
def top_stories(self, limit=5, first=None, last=None, json=False):
"""
Get the top story objects list
params :
limit = (default | 5) number of story objects needed
json = (default | False)
The method uses asynchronous grequest form gevent
"""
story_ids = requests.get(TOP_STORIES_URL).json()
story_urls = []
for story_id in story_ids:
url = API_BASE + "item/" + str(story_id) + '.json'
story_urls.append(url)
if first and last:
story_urls = story_urls[first:last]
if limit != 5:
story_urls[:limit] # default not given
else:
story_urls = story_urls[:limit]
# try:
# response_queue = fetch_parallel(story_urls)
# if json:
# while not response_queue.empty():
# yield response_queue.get()
# else:
# while not response_queue.empty():
# yield story_parser(response_queue.get())
# except AttributeError:
# Exception("Too many requests worker!!")
# using gevent
response_list = fetch_event(story_urls)
if json:
yield response_list
else:
for response in response_list:
yield story_parser(response)
|
Get the top story objects list
params :
limit = (default | 5) number of story objects needed
json = (default | False)
The method uses asynchronous grequest form gevent
|
def _replay_index(replay_dir):
"""Output information for a directory of replays."""
run_config = run_configs.get()
replay_dir = run_config.abs_replay_path(replay_dir)
print("Checking: ", replay_dir)
with run_config.start(want_rgb=False) as controller:
print("-" * 60)
print(",".join((
"filename",
"build",
"map_name",
"game_duration_loops",
"players",
"P1-outcome",
"P1-race",
"P1-apm",
"P2-race",
"P2-apm",
)))
try:
bad_replays = []
for file_path in run_config.replay_paths(replay_dir):
file_name = os.path.basename(file_path)
try:
info = controller.replay_info(run_config.replay_data(file_path))
except remote_controller.RequestError as e:
bad_replays.append("%s: %s" % (file_name, e))
continue
if info.HasField("error"):
print("failed:", file_name, info.error, info.error_details)
bad_replays.append(file_name)
else:
out = [
file_name,
info.base_build,
info.map_name,
info.game_duration_loops,
len(info.player_info),
sc_pb.Result.Name(info.player_info[0].player_result.result),
sc_common.Race.Name(info.player_info[0].player_info.race_actual),
info.player_info[0].player_apm,
]
if len(info.player_info) >= 2:
out += [
sc_common.Race.Name(
info.player_info[1].player_info.race_actual),
info.player_info[1].player_apm,
]
print(u",".join(str(s) for s in out))
except KeyboardInterrupt:
pass
finally:
if bad_replays:
print("\n")
print("Replays with errors:")
print("\n".join(bad_replays))
|
Output information for a directory of replays.
|
def run(self):
"""Called when a file is changed to re-run the tests with nose."""
if self.auto_clear:
os.system('cls' if os.name == 'nt' else 'auto_clear')
else:
print
print 'Running unit tests...'
if self.auto_clear:
print
subprocess.call('nosetests', cwd=self.directory)
|
Called when a file is changed to re-run the tests with nose.
|
def get_meta(cls):
"""
Collect all members of any contained :code:`Meta` class declarations from the given class or any of its base classes.
(Sub class values take precedence.)
:type cls: class
:rtype: Struct
"""
merged_attributes = Struct()
for class_ in reversed(cls.mro()):
if hasattr(class_, 'Meta'):
for key, value in class_.Meta.__dict__.items():
merged_attributes[key] = value
return merged_attributes
|
Collect all members of any contained :code:`Meta` class declarations from the given class or any of its base classes.
(Sub class values take precedence.)
:type cls: class
:rtype: Struct
|
def initialize(self):
"""Instantiates the cache area to be ready for updates"""
if self.collname not in self.current_kv_names():
r = self.request('post',
self.url+"storage/collections/config",
headers={'content-type': 'application/json'},
data={'name': self.collname})
r.raise_for_status()
# initialize schema
re = self.request('post',
self.url+"storage/collections/config/"+self.collname,
headers = {'content-type': 'application/json'},
data=self.schema)
re.raise_for_status()
logger.info("initialized Splunk Key Value Collection %s with schema %s"\
% (self.collname, str(self.schema)))
if self.collname not in self.current_kv_names():
raise EnvironmentError('expected %s in list of kv collections %s' % (self.collname, str(self.current_kv_names())))
|
Instantiates the cache area to be ready for updates
|
def UploadFile(self, fd, offset=0, amount=None):
"""Uploads chunks of a given file descriptor to the transfer store flow.
Args:
fd: A file descriptor to upload.
offset: An integer offset at which the file upload should start on.
amount: An upper bound on number of bytes to stream. If it is `None` then
the whole file is uploaded.
Returns:
A `BlobImageDescriptor` object.
"""
return self._UploadChunkStream(
self._streamer.StreamFile(fd, offset=offset, amount=amount))
|
Uploads chunks of a given file descriptor to the transfer store flow.
Args:
fd: A file descriptor to upload.
offset: An integer offset at which the file upload should start on.
amount: An upper bound on number of bytes to stream. If it is `None` then
the whole file is uploaded.
Returns:
A `BlobImageDescriptor` object.
|
def wrap_value(value, include_empty=False):
"""
:return: the value wrapped in a list unless it is already iterable (and not a dict);
if so, empty values will be filtered out by default, and an empty list is returned.
"""
if value is None:
return [None] if include_empty else []
elif hasattr(value, '__len__') and len(value) == 0:
return [value] if include_empty else []
elif isinstance(value, _wrap_types):
return [value]
elif not hasattr(value, '__iter__'):
return [value]
return value if include_empty else filter_empty(value, [])
|
:return: the value wrapped in a list unless it is already iterable (and not a dict);
if so, empty values will be filtered out by default, and an empty list is returned.
|
def recv_result_from_workers(self):
""" Receives a results from the MPI worker pool and send it out via 0mq
Returns:
--------
result: task result from the workers
"""
info = MPI.Status()
result = self.comm.recv(source=MPI.ANY_SOURCE, tag=RESULT_TAG, status=info)
logger.debug("Received result from workers: {}".format(result))
return result
|
Receives a results from the MPI worker pool and send it out via 0mq
Returns:
--------
result: task result from the workers
|
def register_reference(self, dispatcher, node):
"""
Register this identifier to the current scope, and mark it as
referenced in the current scope.
"""
# the identifier node itself will be mapped to the current scope
# for the resolve to work
# This should probably WARN about the node object being already
# assigned to an existing scope that isn't current_scope.
self.identifiers[node] = self.current_scope
self.current_scope.reference(node.value)
|
Register this identifier to the current scope, and mark it as
referenced in the current scope.
|
def imagetransformer_base_10l_16h_big_dr01_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.unconditional = False
hparams.layer_prepostprocess_dropout = 0.1
return hparams
|
big 1d model for conditional image generation.
|
def spin_px(self):
"""Returns the x-component of the spin of the primary mass."""
return conversions.primary_spin(self.mass1, self.mass2, self.spin1x,
self.spin2x)
|
Returns the x-component of the spin of the primary mass.
|
def update(self, event):
"""
All messages from the Protocol get passed through this method. This
allows the client to have an up-to-date state for the client.
However, this method doesn't actually update right away. Instead, the
acutal update happens in another thread, potentially later, in order to
allow user code to handle the event faster.
"""
# Create our own copy of the event data, as we'll be pushing that to
# another data structure and we don't want it mangled by user code later
# on.
event = event.copy()
# Now just defer the work to later.
reactor.callInThread(self._update_deferred, event)
|
All messages from the Protocol get passed through this method. This
allows the client to have an up-to-date state for the client.
However, this method doesn't actually update right away. Instead, the
acutal update happens in another thread, potentially later, in order to
allow user code to handle the event faster.
|
def detect_xid_devices(self):
"""
For all of the com ports connected to the computer, send an
XID command '_c1'. If the device response with '_xid', it is
an xid device.
"""
self.__xid_cons = []
for c in self.__com_ports:
device_found = False
for b in [115200, 19200, 9600, 57600, 38400]:
con = XidConnection(c, b)
try:
con.open()
except SerialException:
continue
con.flush_input()
con.flush_output()
returnval = con.send_xid_command("_c1", 5).decode('ASCII')
if returnval.startswith('_xid'):
device_found = True
self.__xid_cons.append(con)
if(returnval != '_xid0'):
# set the device into XID mode
con.send_xid_command('c10')
con.flush_input()
con.flush_output()
# be sure to reset the timer to avoid the 4.66 hours
# problem. (refer to XidConnection.xid_input_found to
# read about the 4.66 hours)
con.send_xid_command('e1')
con.send_xid_command('e5')
con.close()
if device_found:
break
|
For all of the com ports connected to the computer, send an
XID command '_c1'. If the device response with '_xid', it is
an xid device.
|
def parse(filename_url_or_file, guess_charset=True, parser=None):
"""Parse a filename, URL, or file-like object into an HTML document
tree. Note: this returns a tree, not an element. Use
``parse(...).getroot()`` to get the document root.
"""
if parser is None:
parser = html_parser
if not isinstance(filename_url_or_file, _strings):
fp = filename_url_or_file
elif _looks_like_url(filename_url_or_file):
fp = urlopen(filename_url_or_file)
else:
fp = open(filename_url_or_file, 'rb')
return parser.parse(fp, useChardet=guess_charset)
|
Parse a filename, URL, or file-like object into an HTML document
tree. Note: this returns a tree, not an element. Use
``parse(...).getroot()`` to get the document root.
|
def get_packet(self, generation_time, sequence_number):
"""
Gets a single packet by its identifying key (gentime, seqNum).
:param ~datetime.datetime generation_time: When the packet was generated (packet time)
:param int sequence_number: Sequence number of the packet
:rtype: .Packet
"""
url = '/archive/{}/packets/{}/{}'.format(
self._instance, to_isostring(generation_time), sequence_number)
response = self._client.get_proto(url)
message = yamcs_pb2.TmPacketData()
message.ParseFromString(response.content)
return Packet(message)
|
Gets a single packet by its identifying key (gentime, seqNum).
:param ~datetime.datetime generation_time: When the packet was generated (packet time)
:param int sequence_number: Sequence number of the packet
:rtype: .Packet
|
def create_long_form_weights(model_obj, wide_weights, rows_to_obs=None):
"""
Converts an array of weights with one element per observation (wide-format)
to an array of weights with one element per observation per available
alternative (long-format).
Parameters
----------
model_obj : an instance or sublcass of the MNDC class.
Should be the model object that corresponds to the model we are
constructing the bootstrap confidence intervals for.
wide_weights : 1D or 2D ndarray.
Should contain one element or one column per observation in
`model_obj.data`, depending on whether `wide_weights` is 1D or 2D
respectively. These elements should be the weights for optimizing the
model's objective function for estimation.
rows_to_obs : 2D scipy sparse array.
A mapping matrix of zeros and ones, were `rows_to_obs[i, j]` is one if
row `i` of the long-format data belongs to observation `j` and zero
otherwise.
Returns
-------
long_weights : 1D or 2D ndarray.
Should contain one element or one column per observation in
`model_obj.data`, depending on whether `wide_weights` is 1D or 2D
respectively. These elements should be the weights from `wide_weights`,
simply mapping each observation's weight to the corresponding row in
the long-format data.
"""
# Ensure argument validity
check_validity_of_long_form_args(model_obj, wide_weights, rows_to_obs)
# Get a rows_to_obs mapping matrix.
if rows_to_obs is None:
rows_to_obs = model_obj.get_mappings_for_fit()['rows_to_obs']
# Create a 2D version of
wide_weights_2d =\
wide_weights if wide_weights.ndim == 2 else wide_weights[:, None]
long_weights = rows_to_obs.dot(wide_weights_2d)
if wide_weights.ndim == 1:
long_weights = long_weights.sum(axis=1)
return long_weights
|
Converts an array of weights with one element per observation (wide-format)
to an array of weights with one element per observation per available
alternative (long-format).
Parameters
----------
model_obj : an instance or sublcass of the MNDC class.
Should be the model object that corresponds to the model we are
constructing the bootstrap confidence intervals for.
wide_weights : 1D or 2D ndarray.
Should contain one element or one column per observation in
`model_obj.data`, depending on whether `wide_weights` is 1D or 2D
respectively. These elements should be the weights for optimizing the
model's objective function for estimation.
rows_to_obs : 2D scipy sparse array.
A mapping matrix of zeros and ones, were `rows_to_obs[i, j]` is one if
row `i` of the long-format data belongs to observation `j` and zero
otherwise.
Returns
-------
long_weights : 1D or 2D ndarray.
Should contain one element or one column per observation in
`model_obj.data`, depending on whether `wide_weights` is 1D or 2D
respectively. These elements should be the weights from `wide_weights`,
simply mapping each observation's weight to the corresponding row in
the long-format data.
|
def remove_extra_packages(self, packages, dry_run=False):
""" Remove all packages missing from list """
removal_list = self.determine_extra_packages(packages)
if not removal_list:
print("No packages to be removed")
else:
if dry_run:
print("The following packages would be removed:\n %s\n" %
"\n ".join(removal_list))
else:
print("Removing packages\n")
args = [
"pip",
"uninstall",
"-y",
]
args.extend(list(removal_list))
subprocess.check_call(args)
|
Remove all packages missing from list
|
def login_required(fn):
"""Requires login before proceeding, but does not prompt the user to login. Decorator should
be used only on Click CLI commands.
Notes
-----
Different means of authentication will be attempted in this order:
1. An API key present in the Click context object from a previous successful authentication.
2. A bearer token (ONE_CODEX_BEARER_TOKEN) in the environment.
3. An API key (ONE_CODEX_API_KEY) in the environment.
4. An API key in the credentials file (~/.onecodex).
"""
@wraps(fn)
def login_wrapper(ctx, *args, **kwargs):
base_url = os.environ.get("ONE_CODEX_API_BASE", "https://app.onecodex.com")
api_kwargs = {"telemetry": ctx.obj["TELEMETRY"]}
api_key_prior_login = ctx.obj.get("API_KEY")
bearer_token_env = os.environ.get("ONE_CODEX_BEARER_TOKEN")
api_key_env = os.environ.get("ONE_CODEX_API_KEY")
api_key_creds_file = _login(base_url, silent=True)
if api_key_prior_login is not None:
api_kwargs["api_key"] = api_key_prior_login
elif bearer_token_env is not None:
api_kwargs["bearer_token"] = bearer_token_env
elif api_key_env is not None:
api_kwargs["api_key"] = api_key_env
elif api_key_creds_file is not None:
api_kwargs["api_key"] = api_key_creds_file
else:
click.echo(
"The command you specified requires authentication. Please login first.\n", err=True
)
ctx.exit()
ctx.obj["API"] = Api(**api_kwargs)
return fn(ctx, *args, **kwargs)
return login_wrapper
|
Requires login before proceeding, but does not prompt the user to login. Decorator should
be used only on Click CLI commands.
Notes
-----
Different means of authentication will be attempted in this order:
1. An API key present in the Click context object from a previous successful authentication.
2. A bearer token (ONE_CODEX_BEARER_TOKEN) in the environment.
3. An API key (ONE_CODEX_API_KEY) in the environment.
4. An API key in the credentials file (~/.onecodex).
|
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day,
sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
|
Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
|
def put_skeleton_files_on_disk(metadata_type, where, github_template=None, params={}):
"""
Generates file based on jinja2 templates
"""
api_name = params["api_name"]
file_name = github_template["file_name"]
template_source = config.connection.get_plugin_client_setting('mm_template_source', 'joeferraro/MavensMate-Templates/master')
template_location = config.connection.get_plugin_client_setting('mm_template_location', 'remote')
try:
if template_location == 'remote':
if 'linux' in sys.platform:
template_body = os.popen("wget https://raw.githubusercontent.com/{0}/{1}/{2} -q -O -".format(template_source, metadata_type, file_name)).read()
else:
template_body = urllib2.urlopen("https://raw.githubusercontent.com/{0}/{1}/{2}".format(template_source, metadata_type, file_name)).read()
else:
template_body = get_file_as_string(os.path.join(template_source,metadata_type,file_name))
except:
template_body = get_file_as_string(os.path.join(config.base_path,config.support_dir,"templates","github-local",metadata_type,file_name))
template = env.from_string(template_body)
file_body = template.render(params)
metadata_type = get_meta_type_by_name(metadata_type)
os.makedirs("{0}/{1}".format(where, metadata_type['directoryName']))
f = open("{0}/{1}/{2}".format(where, metadata_type['directoryName'], api_name+"."+metadata_type['suffix']), 'w')
f.write(file_body)
f.close()
template = env.get_template('meta.html')
file_body = template.render(api_name=api_name, sfdc_api_version=SFDC_API_VERSION,meta_type=metadata_type['xmlName'])
f = open("{0}/{1}/{2}".format(where, metadata_type['directoryName'], api_name+"."+metadata_type['suffix'])+"-meta.xml", 'w')
f.write(file_body)
f.close()
|
Generates file based on jinja2 templates
|
def _input_as_lines(self, data):
"""Write sequence of lines to temp file, return filename
data: a sequence to be written to a file, each element of the
sequence will compose a line in the file
* Note: '\n' will be stripped off the end of each sequence
element before writing to a file in order to avoid
multiple new lines accidentally be written to a file
"""
self._input_filename = self.getTmpFilename(
self.WorkingDir, suffix='.fasta')
with open(self._input_filename, 'w') as f:
# Use lazy iteration instead of list comprehension to
# prevent reading entire file into memory
for line in data:
f.write(str(line).strip('\n'))
f.write('\n')
return self._input_filename
|
Write sequence of lines to temp file, return filename
data: a sequence to be written to a file, each element of the
sequence will compose a line in the file
* Note: '\n' will be stripped off the end of each sequence
element before writing to a file in order to avoid
multiple new lines accidentally be written to a file
|
def xml(self, text=TEXT):
""" Generate an XML output from the report data. """
def convert(line):
xml = " <item>\n"
for f in line.index:
xml += " <field name=\"%s\">%s</field>\n" % (f, line[f])
xml += " </item>\n"
return xml
return "<items>\n" + '\n'.join(self._data.apply(convert, axis=1)) + \
"</items>"
|
Generate an XML output from the report data.
|
def update(self, scopes=[], add_scopes=[], rm_scopes=[], note='',
note_url=''):
"""Update this authorization.
:param list scopes: (optional), replaces the authorization scopes with
these
:param list add_scopes: (optional), scopes to be added
:param list rm_scopes: (optional), scopes to be removed
:param str note: (optional), new note about authorization
:param str note_url: (optional), new note URL about this authorization
:returns: bool
"""
success = False
json = None
if scopes:
d = {'scopes': scopes}
json = self._json(self._post(self._api, data=d), 200)
if add_scopes:
d = {'add_scopes': add_scopes}
json = self._json(self._post(self._api, data=d), 200)
if rm_scopes:
d = {'remove_scopes': rm_scopes}
json = self._json(self._post(self._api, data=d), 200)
if note or note_url:
d = {'note': note, 'note_url': note_url}
json = self._json(self._post(self._api, data=d), 200)
if json:
self._update_(json)
success = True
return success
|
Update this authorization.
:param list scopes: (optional), replaces the authorization scopes with
these
:param list add_scopes: (optional), scopes to be added
:param list rm_scopes: (optional), scopes to be removed
:param str note: (optional), new note about authorization
:param str note_url: (optional), new note URL about this authorization
:returns: bool
|
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
|
The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
|
def dl_file(url, dest, chunk_size=6553):
"""Download `url` to `dest`"""
import urllib3
http = urllib3.PoolManager()
r = http.request('GET', url, preload_content=False)
with dest.open('wb') as out:
while True:
data = r.read(chunk_size)
if data is None or len(data) == 0:
break
out.write(data)
r.release_conn()
|
Download `url` to `dest`
|
def read_source_models(fnames, converter, monitor):
"""
:param fnames:
list of source model files
:param converter:
a SourceConverter instance
:param monitor:
a :class:`openquake.performance.Monitor` instance
:yields:
SourceModel instances
"""
for fname in fnames:
if fname.endswith(('.xml', '.nrml')):
sm = to_python(fname, converter)
elif fname.endswith('.hdf5'):
sm = sourceconverter.to_python(fname, converter)
else:
raise ValueError('Unrecognized extension in %s' % fname)
sm.fname = fname
yield sm
|
:param fnames:
list of source model files
:param converter:
a SourceConverter instance
:param monitor:
a :class:`openquake.performance.Monitor` instance
:yields:
SourceModel instances
|
def make_config_data(*, guided):
"""
Makes the data necessary to construct a functional config file
"""
config_data = {}
config_data[INCLUDE_DIRS_KEY] = _make_include_dirs(guided=guided)
config_data[RUNTIME_DIRS_KEY] = _make_runtime_dirs(guided=guided)
config_data[RUNTIME_KEY] = _make_runtime()
return config_data
|
Makes the data necessary to construct a functional config file
|
def _open_connection(self):
"""Open a connection to the easyfire unit."""
if (self._mode == PROP_MODE_SERIAL):
self._serial = serial.Serial(self._serial_device, self._serial_speed)
elif (self._mode == PROP_MODE_TCP):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((self._ip, self._port))
elif (self._mode == PROP_MODE_FILE):
self._file = open(self._file_path, "r")
|
Open a connection to the easyfire unit.
|
def add_task_status(self, name, **attrs):
"""
Add a Task status to the project and returns a
:class:`TaskStatus` object.
:param name: name of the :class:`TaskStatus`
:param attrs: optional attributes for :class:`TaskStatus`
"""
return TaskStatuses(self.requester).create(self.id, name, **attrs)
|
Add a Task status to the project and returns a
:class:`TaskStatus` object.
:param name: name of the :class:`TaskStatus`
:param attrs: optional attributes for :class:`TaskStatus`
|
def decorator_with_args(func, return_original=False, target_pos=0):
"""Enable a function to work with a decorator with arguments
Args:
func (callable): The input function.
return_original (bool): Whether the resultant decorator returns
the decorating target unchanged. If True, will return the
target unchanged. Otherwise, return the returned value from
*func*. Default to False. This is useful for converting a
non-decorator function to a decorator. See examples below.
Return:
callable: a decorator with arguments.
Examples:
>>> @decorator_with_args
... def register_plugin(plugin, arg1=1):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
... return plugin # note register_plugin is an ordinary decorator
>>> @register_plugin(arg1=10)
... def plugin1(): pass
Registering plugin1 with arg1=10
>>> @decorator_with_args(return_original=True)
... def register_plugin_xx(plugin, arg1=1):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
... # Note register_plugin_xxx does not return plugin, so it cannot
... # be used as a decorator directly before applying
... # decorator_with_args.
>>> @register_plugin_xx(arg1=10)
... def plugin1(): pass
Registering plugin1 with arg1=10
>>> plugin1()
>>> @decorator_with_args(return_original=True)
... def register_plugin_xxx(plugin, arg1=1): pass
>>> # use result decorator as a function
>>> register_plugin_xxx(plugin=plugin1, arg1=10)
<function plugin1...>
>>> @decorator_with_args(return_original=True, target_pos=1)
... def register_plugin_xxxx(arg1, plugin, arg2=10):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
>>> @register_plugin_xxxx(100)
... def plugin2(): pass
Registering plugin2 with arg1=100
"""
if sys.version_info[0] >= 3:
target_name = inspect.getfullargspec(func).args[target_pos]
else:
target_name = inspect.getargspec(func).args[target_pos]
@functools.wraps(func)
def wrapper(*args, **kwargs):
if len(args) > target_pos:
res = func(*args, **kwargs)
return args[target_pos] if return_original else res
elif len(args) <= 0 and target_name in kwargs:
res = func(*args, **kwargs)
return kwargs[target_name] if return_original else res
else:
return wrap_with_args(*args, **kwargs)
def wrap_with_args(*args, **kwargs):
def wrapped_with_args(target):
kwargs2 = dict()
kwargs2[target_name] = target
kwargs2.update(kwargs)
res = func(*args, **kwargs2)
return target if return_original else res
return wrapped_with_args
return wrapper
|
Enable a function to work with a decorator with arguments
Args:
func (callable): The input function.
return_original (bool): Whether the resultant decorator returns
the decorating target unchanged. If True, will return the
target unchanged. Otherwise, return the returned value from
*func*. Default to False. This is useful for converting a
non-decorator function to a decorator. See examples below.
Return:
callable: a decorator with arguments.
Examples:
>>> @decorator_with_args
... def register_plugin(plugin, arg1=1):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
... return plugin # note register_plugin is an ordinary decorator
>>> @register_plugin(arg1=10)
... def plugin1(): pass
Registering plugin1 with arg1=10
>>> @decorator_with_args(return_original=True)
... def register_plugin_xx(plugin, arg1=1):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
... # Note register_plugin_xxx does not return plugin, so it cannot
... # be used as a decorator directly before applying
... # decorator_with_args.
>>> @register_plugin_xx(arg1=10)
... def plugin1(): pass
Registering plugin1 with arg1=10
>>> plugin1()
>>> @decorator_with_args(return_original=True)
... def register_plugin_xxx(plugin, arg1=1): pass
>>> # use result decorator as a function
>>> register_plugin_xxx(plugin=plugin1, arg1=10)
<function plugin1...>
>>> @decorator_with_args(return_original=True, target_pos=1)
... def register_plugin_xxxx(arg1, plugin, arg2=10):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
>>> @register_plugin_xxxx(100)
... def plugin2(): pass
Registering plugin2 with arg1=100
|
def initialize(self, length=None):
"""see ``__init__``"""
if length is None:
length = len(self.bounds)
max_i = min((len(self.bounds) - 1, length - 1))
self._lb = array([self.bounds[min((i, max_i))][0]
if self.bounds[min((i, max_i))][0] is not None
else -np.Inf
for i in xrange(length)], copy=False)
self._ub = array([self.bounds[min((i, max_i))][1]
if self.bounds[min((i, max_i))][1] is not None
else np.Inf
for i in xrange(length)], copy=False)
lb = self._lb
ub = self._ub
# define added values for lower and upper bound
self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
|
see ``__init__``
|
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
if self._format_string_attribute_names is None:
self._format_string_attribute_names = []
for format_string_piece in self.FORMAT_STRING_PIECES:
attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
format_string_piece)
if attribute_names:
self._format_string_attribute_names.extend(attribute_names)
return set(self._format_string_attribute_names)
|
Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
|
def generate_address(self):
"""
Creates a Bitcoin address from the public key.
Details of the steps for creating the address are outlined in this link:
https://en.bitcoin.it/wiki/Technical_background_of_version_1_Bitcoin_addresses
The last step is Base58Check encoding, which is similar to Base64 encoding but
slightly different to create a more human-readable string where '1' and 'l' won't
get confused. More on Base64Check encoding here:
https://en.bitcoin.it/wiki/Base58Check_encoding
"""
binary_pubkey = binascii.unhexlify(self.public_key)
binary_digest_sha256 = hashlib.sha256(binary_pubkey).digest()
binary_digest_ripemd160 = hashlib.new('ripemd160', binary_digest_sha256).digest()
binary_version_byte = bytes([0])
binary_with_version_key = binary_version_byte + binary_digest_ripemd160
checksum_intermed = hashlib.sha256(binary_with_version_key).digest()
checksum_intermed = hashlib.sha256(checksum_intermed).digest()
checksum = checksum_intermed[:4]
binary_address = binary_digest_ripemd160 + checksum
leading_zero_bytes = 0
for char in binary_address:
if char == 0:
leading_zero_bytes += 1
inp = binary_address + checksum
result = 0
while len(inp) > 0:
result *= 256
result += inp[0]
inp = inp[1:]
result_bytes = bytes()
while result > 0:
curcode = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'[result % 58]
result_bytes = bytes([ord(curcode)]) + result_bytes
result //= 58
pad_size = 0 - len(result_bytes)
padding_element = b'1'
if pad_size > 0:
result_bytes = padding_element * pad_size + result_bytes
result = ''.join([chr(y) for y in result_bytes])
address = '1' * leading_zero_bytes + result
return address
|
Creates a Bitcoin address from the public key.
Details of the steps for creating the address are outlined in this link:
https://en.bitcoin.it/wiki/Technical_background_of_version_1_Bitcoin_addresses
The last step is Base58Check encoding, which is similar to Base64 encoding but
slightly different to create a more human-readable string where '1' and 'l' won't
get confused. More on Base64Check encoding here:
https://en.bitcoin.it/wiki/Base58Check_encoding
|
def cli(env, zone):
"""Delete zone."""
manager = SoftLayer.DNSManager(env.client)
zone_id = helpers.resolve_id(manager.resolve_ids, zone, name='zone')
if not (env.skip_confirmations or formatting.no_going_back(zone)):
raise exceptions.CLIAbort("Aborted.")
manager.delete_zone(zone_id)
|
Delete zone.
|
def structure(cls):
# type: () -> Text
"""Get the part structure, as a DNA regex pattern.
The structure of most parts can be obtained automatically from the
part signature and the restriction enzyme used in the Golden Gate
assembly.
Warning:
If overloading this method, the returned pattern must include 3
capture groups to capture the following features:
1. The upstream (5') overhang sequence
2. The vector placeholder sequence
3. The downstream (3') overhang sequence
"""
if cls.signature is NotImplemented:
raise NotImplementedError("no signature defined")
up = cls.cutter.elucidate()
down = str(Seq(up).reverse_complement())
ovhg = cls.cutter.ovhgseq
upsig, downsig = cls.signature
if cls.cutter.is_5overhang():
upsite = "^{}_".format(ovhg)
downsite = "_{}^".format(Seq(ovhg).reverse_complement())
else:
upsite = "_{}^".format(ovhg)
downsite = "^{}_".format(Seq(ovhg).reverse_complement())
if issubclass(cls, AbstractModule):
return "".join(
[
up.replace(upsite, "({})(".format(upsig)),
"N*",
down.replace(downsite, ")({})".format(downsig)),
]
)
elif issubclass(cls, AbstractVector):
return "".join(
[
down.replace(downsite, "({})(".format(downsig)),
"N*",
up.replace(upsite, ")({})".format(upsig)),
]
)
else:
raise RuntimeError("Part must be either a module or a vector!")
|
Get the part structure, as a DNA regex pattern.
The structure of most parts can be obtained automatically from the
part signature and the restriction enzyme used in the Golden Gate
assembly.
Warning:
If overloading this method, the returned pattern must include 3
capture groups to capture the following features:
1. The upstream (5') overhang sequence
2. The vector placeholder sequence
3. The downstream (3') overhang sequence
|
def map_address(self, session, map_space, map_base, map_size,
access=False, suggested=None):
"""Maps the specified memory space into the process's address space.
Corresponds to viMapAddress function of the VISA library.
:param session: Unique logical identifier to a session.
:param map_space: Specifies the address space to map. (Constants.*SPACE*)
:param map_base: Offset (in bytes) of the memory to be mapped.
:param map_size: Amount of memory to map (in bytes).
:param access:
:param suggested: If not Constants.VI_NULL (0), the operating system attempts to map the memory to the address
specified in suggested. There is no guarantee, however, that the memory will be mapped to
that address. This operation may map the memory into an address region different from
suggested.
:return: address in your process space where the memory was mapped, return value of the library call.
:rtype: address, :class:`pyvisa.constants.StatusCode`
"""
raise NotImplementedError
|
Maps the specified memory space into the process's address space.
Corresponds to viMapAddress function of the VISA library.
:param session: Unique logical identifier to a session.
:param map_space: Specifies the address space to map. (Constants.*SPACE*)
:param map_base: Offset (in bytes) of the memory to be mapped.
:param map_size: Amount of memory to map (in bytes).
:param access:
:param suggested: If not Constants.VI_NULL (0), the operating system attempts to map the memory to the address
specified in suggested. There is no guarantee, however, that the memory will be mapped to
that address. This operation may map the memory into an address region different from
suggested.
:return: address in your process space where the memory was mapped, return value of the library call.
:rtype: address, :class:`pyvisa.constants.StatusCode`
|
def save(self, fname):
""" saves a grid to file as ASCII text """
try:
with open(fname, "w") as f:
f.write(str(self))
except Exception as ex:
print('ERROR = cant save grid results to ' + fname + str(ex))
|
saves a grid to file as ASCII text
|
def main(args=None, vc=None, cwd=None, apply_config=False):
"""PEP8 clean only the parts of the files touched since the last commit, a
previous commit or branch."""
import signal
try: # pragma: no cover
# Exit on broken pipe.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
if args is None:
args = []
try:
# Note: argparse on py 2.6 you can't pass a set
# TODO neater solution for this!
args_set = set(args)
except TypeError:
args_set = args # args is a Namespace
if '--version' in args_set or getattr(args_set, 'version', 0):
print(version)
return 0
if '--list-fixes' in args_set or getattr(args_set, 'list_fixes', 0):
from autopep8 import supported_fixes
for code, description in sorted(supported_fixes()):
print('{code} - {description}'.format(
code=code, description=description))
return 0
try:
try:
args = parse_args(args, apply_config=apply_config)
except TypeError:
pass # args is already a Namespace (testing)
if args.from_diff: # pragma: no cover
r = Radius.from_diff(args.from_diff.read(),
options=args, cwd=cwd)
else:
r = Radius(rev=args.rev, options=args, vc=vc, cwd=cwd)
except NotImplementedError as e: # pragma: no cover
print(e)
return 1
except CalledProcessError as c: # pragma: no cover
# cut off usage and exit
output = c.output.splitlines()[0]
print(output)
return c.returncode
any_changes = r.fix()
if any_changes and args.error_status:
return 1
return 0
except KeyboardInterrupt: # pragma: no cover
return 1
|
PEP8 clean only the parts of the files touched since the last commit, a
previous commit or branch.
|
def _precedence_parens(self, node, child, is_left=True):
"""Wrap child in parens only if required to keep same semantics"""
if self._should_wrap(node, child, is_left):
return "(%s)" % child.accept(self)
return child.accept(self)
|
Wrap child in parens only if required to keep same semantics
|
def _PrintWarningsDetails(self, storage):
"""Prints the details of the warnings.
Args:
storage (BaseStore): storage.
"""
if not storage.HasWarnings():
self._output_writer.Write('No warnings stored.\n\n')
return
for index, warning in enumerate(storage.GetWarnings()):
title = 'Warning: {0:d}'.format(index)
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, title=title)
table_view.AddRow(['Message', warning.message])
table_view.AddRow(['Parser chain', warning.parser_chain])
path_specification = warning.path_spec.comparable
for path_index, line in enumerate(path_specification.split('\n')):
if not line:
continue
if path_index == 0:
table_view.AddRow(['Path specification', line])
else:
table_view.AddRow(['', line])
table_view.Write(self._output_writer)
|
Prints the details of the warnings.
Args:
storage (BaseStore): storage.
|
def getInterfaceInAllSpeeds(interface, endpoint_list, class_descriptor_list=()):
"""
Produce similar fs, hs and ss interface and endpoints descriptors.
Should be useful for devices desiring to work in all 3 speeds with maximum
endpoint wMaxPacketSize. Reduces data duplication from descriptor
declarations.
Not intended to cover fancy combinations.
interface (dict):
Keyword arguments for
getDescriptor(USBInterfaceDescriptor, ...)
in all speeds.
bNumEndpoints must not be provided.
endpoint_list (list of dicts)
Each dict represents an endpoint, and may contain the following items:
- "endpoint": required, contains keyword arguments for
getDescriptor(USBEndpointDescriptorNoAudio, ...)
or
getDescriptor(USBEndpointDescriptor, ...)
The with-audio variant is picked when its extra fields are assigned a
value.
wMaxPacketSize may be missing, in which case it will be set to the
maximum size for given speed and endpoint type.
bmAttributes must be provided.
If bEndpointAddress is zero (excluding direction bit) on the first
endpoint, endpoints will be assigned their rank in this list,
starting at 1. Their direction bit is preserved.
If bInterval is present on a INT or ISO endpoint, it must be in
millisecond units (but may not be an integer), and will be converted
to the nearest integer millisecond for full-speed descriptor, and
nearest possible interval for high- and super-speed descriptors.
If bInterval is present on a BULK endpoint, it is set to zero on
full-speed descriptor and used as provided on high- and super-speed
descriptors.
- "superspeed": optional, contains keyword arguments for
getDescriptor(USBSSEPCompDescriptor, ...)
- "superspeed_iso": optional, contains keyword arguments for
getDescriptor(USBSSPIsocEndpointDescriptor, ...)
Must be provided and non-empty only when endpoint is isochronous and
"superspeed" dict has "bmAttributes" bit 7 set.
class_descriptor (list of descriptors of any type)
Descriptors to insert in all speeds between the interface descriptor and
endpoint descriptors.
Returns a 3-tuple of lists:
- fs descriptors
- hs descriptors
- ss descriptors
"""
interface = getDescriptor(
USBInterfaceDescriptor,
bNumEndpoints=len(endpoint_list),
**interface
)
class_descriptor_list = list(class_descriptor_list)
fs_list = [interface] + class_descriptor_list
hs_list = [interface] + class_descriptor_list
ss_list = [interface] + class_descriptor_list
need_address = (
endpoint_list[0]['endpoint'].get(
'bEndpointAddress',
0,
) & ~ch9.USB_DIR_IN == 0
)
for index, endpoint in enumerate(endpoint_list, 1):
endpoint_kw = endpoint['endpoint'].copy()
transfer_type = endpoint_kw[
'bmAttributes'
] & ch9.USB_ENDPOINT_XFERTYPE_MASK
fs_max, hs_max, ss_max = _MAX_PACKET_SIZE_DICT[transfer_type]
if need_address:
endpoint_kw['bEndpointAddress'] = index | (
endpoint_kw.get('bEndpointAddress', 0) & ch9.USB_DIR_IN
)
klass = (
USBEndpointDescriptor
if 'bRefresh' in endpoint_kw or 'bSynchAddress' in endpoint_kw else
USBEndpointDescriptorNoAudio
)
interval = endpoint_kw.pop('bInterval', _MARKER)
if interval is _MARKER:
fs_interval = hs_interval = 0
else:
if transfer_type == ch9.USB_ENDPOINT_XFER_BULK:
fs_interval = 0
hs_interval = interval
else: # USB_ENDPOINT_XFER_ISOC or USB_ENDPOINT_XFER_INT
fs_interval = max(1, min(255, round(interval)))
# 8 is the number of microframes in a millisecond
hs_interval = max(
1,
min(16, int(round(1 + math.log(interval * 8, 2)))),
)
packet_size = endpoint_kw.pop('wMaxPacketSize', _MARKER)
if packet_size is _MARKER:
fs_packet_size = fs_max
hs_packet_size = hs_max
ss_packet_size = ss_max
else:
fs_packet_size = min(fs_max, packet_size)
hs_packet_size = min(hs_max, packet_size)
ss_packet_size = min(ss_max, packet_size)
fs_list.append(getDescriptor(
klass,
wMaxPacketSize=fs_max,
bInterval=fs_interval,
**endpoint_kw
))
hs_list.append(getDescriptor(
klass,
wMaxPacketSize=hs_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_list.append(getDescriptor(
klass,
wMaxPacketSize=ss_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_companion_kw = endpoint.get('superspeed', _EMPTY_DICT)
ss_list.append(getDescriptor(
USBSSEPCompDescriptor,
**ss_companion_kw
))
ssp_iso_kw = endpoint.get('superspeed_iso', _EMPTY_DICT)
if bool(ssp_iso_kw) != (
endpoint_kw.get('bmAttributes', 0) &
ch9.USB_ENDPOINT_XFERTYPE_MASK ==
ch9.USB_ENDPOINT_XFER_ISOC and
bool(ch9.USB_SS_SSP_ISOC_COMP(
ss_companion_kw.get('bmAttributes', 0),
))
):
raise ValueError('Inconsistent isochronous companion')
if ssp_iso_kw:
ss_list.append(getDescriptor(
USBSSPIsocEndpointDescriptor,
**ssp_iso_kw
))
return (fs_list, hs_list, ss_list)
|
Produce similar fs, hs and ss interface and endpoints descriptors.
Should be useful for devices desiring to work in all 3 speeds with maximum
endpoint wMaxPacketSize. Reduces data duplication from descriptor
declarations.
Not intended to cover fancy combinations.
interface (dict):
Keyword arguments for
getDescriptor(USBInterfaceDescriptor, ...)
in all speeds.
bNumEndpoints must not be provided.
endpoint_list (list of dicts)
Each dict represents an endpoint, and may contain the following items:
- "endpoint": required, contains keyword arguments for
getDescriptor(USBEndpointDescriptorNoAudio, ...)
or
getDescriptor(USBEndpointDescriptor, ...)
The with-audio variant is picked when its extra fields are assigned a
value.
wMaxPacketSize may be missing, in which case it will be set to the
maximum size for given speed and endpoint type.
bmAttributes must be provided.
If bEndpointAddress is zero (excluding direction bit) on the first
endpoint, endpoints will be assigned their rank in this list,
starting at 1. Their direction bit is preserved.
If bInterval is present on a INT or ISO endpoint, it must be in
millisecond units (but may not be an integer), and will be converted
to the nearest integer millisecond for full-speed descriptor, and
nearest possible interval for high- and super-speed descriptors.
If bInterval is present on a BULK endpoint, it is set to zero on
full-speed descriptor and used as provided on high- and super-speed
descriptors.
- "superspeed": optional, contains keyword arguments for
getDescriptor(USBSSEPCompDescriptor, ...)
- "superspeed_iso": optional, contains keyword arguments for
getDescriptor(USBSSPIsocEndpointDescriptor, ...)
Must be provided and non-empty only when endpoint is isochronous and
"superspeed" dict has "bmAttributes" bit 7 set.
class_descriptor (list of descriptors of any type)
Descriptors to insert in all speeds between the interface descriptor and
endpoint descriptors.
Returns a 3-tuple of lists:
- fs descriptors
- hs descriptors
- ss descriptors
|
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
|
return an array of blocks that potentially have different dtypes
|
def __get_keys(self, name='master', passphrase=None):
'''
Returns a key object for a key in the pki-dir
'''
path = os.path.join(self.opts['pki_dir'],
name + '.pem')
if not os.path.exists(path):
log.info('Generating %s keys: %s', name, self.opts['pki_dir'])
gen_keys(self.opts['pki_dir'],
name,
self.opts['keysize'],
self.opts.get('user'),
passphrase)
if HAS_M2:
key_error = RSA.RSAError
else:
key_error = ValueError
try:
key = get_rsa_key(path, passphrase)
except key_error as e:
message = 'Unable to read key: {0}; passphrase may be incorrect'.format(path)
log.error(message)
raise MasterExit(message)
log.debug('Loaded %s key: %s', name, path)
return key
|
Returns a key object for a key in the pki-dir
|
def list_provincie_adapter(obj, request):
"""
Adapter for rendering a list of
:class:`crabpy.gateway.crab.Provincie` to json.
"""
return {
'niscode': obj.niscode,
'naam': obj.naam,
'gewest': {
'id': obj.gewest.id,
'naam': obj.gewest.naam
}
}
|
Adapter for rendering a list of
:class:`crabpy.gateway.crab.Provincie` to json.
|
def _get_available_placements(D, tt):
"""
Called from: _prompt_placement()
Get a list of possible places that we can put the new model data into.
If no model exists yet, we'll use something like chron0model0. If other models exist,
we'll go for the n+1 entry.
ex: chron0model0 already exists, so we'll look to chron0model1 next.
:param dict D: Metadata
:param str tt: Table Type
:return list _options: Possible placements
"""
_options = []
try:
for _pc in ["paleoData", "chronData"]:
if _pc in D:
# for each entry in pc
for section_name, section_data in D[_pc].items():
# looking for open spots for measurement tables
if tt == "measurement":
if "measurementTable" in section_data:
_options.append(_get_available_placements_1(section_data["measurementTable"], section_name, "measurement"))
# looking for open spots for model tables
else:
# Is there a model? Need model data to keep going
if "model" in section_data:
# this is for adding a whole model (all 4 tables, ens/dist/sum/method)
if tt == "model":
_options.append(_get_available_placements_1(section_data["model"], section_name, "model"))
else:
# for adding individual model tables
for _k, _v in section_data["model"]:
# keys here are stored as "<type>Table", so add "Table" to each table type
_tt_table = "{}Table".format(tt)
# does this table exist?
if _tt_table in _v:
# Get the first available position for this section
_options.append(
_get_available_placements_1(_v[_tt_table], _k, tt))
else:
# Doesn't currently exist. Make the first option index 0.
_options.append("{}{}0".format(_k, tt))
# no models present, so we automatically default placement options to the 0 index.
else:
if tt == "model":
# adding a whole model, so no need to be specific
_options.append("{}model0".format(section_name))
else:
# adding a specific table, so the position is more specific also
_options.append("{}model0{}0".format(section_name, tt))
except Exception as e:
sys.exit("Looking for open table positions: Unable to find placement options, {}".format(e))
# remove empty names
_options = [i for i in _options if i]
# Is the whole list empty? that's not good.
if not _options:
sys.exit("Error: No available positions found to place new data. Something went wrong.")
return _options
|
Called from: _prompt_placement()
Get a list of possible places that we can put the new model data into.
If no model exists yet, we'll use something like chron0model0. If other models exist,
we'll go for the n+1 entry.
ex: chron0model0 already exists, so we'll look to chron0model1 next.
:param dict D: Metadata
:param str tt: Table Type
:return list _options: Possible placements
|
def matrix_to_gl(matrix):
"""
Convert a numpy row- major homogenous transformation matrix
to a flat column- major GLfloat transformation.
Parameters
-------------
matrix : (4,4) float
Row- major homogenous transform
Returns
-------------
glmatrix : (16,) gl.GLfloat
Transform in pyglet format
"""
matrix = np.asanyarray(matrix, dtype=np.float64)
if matrix.shape != (4, 4):
raise ValueError('matrix must be (4,4)!')
# switch to column major and flatten to (16,)
column = matrix.T.flatten()
# convert to GLfloat
glmatrix = (gl.GLfloat * 16)(*column)
return glmatrix
|
Convert a numpy row- major homogenous transformation matrix
to a flat column- major GLfloat transformation.
Parameters
-------------
matrix : (4,4) float
Row- major homogenous transform
Returns
-------------
glmatrix : (16,) gl.GLfloat
Transform in pyglet format
|
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
|
return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
|
def isLoopback (self, ifname):
"""Check whether interface is a loopback device.
@param ifname: interface name
@type ifname: string
"""
# since not all systems have IFF_LOOPBACK as a flag defined,
# the ifname is tested first
if ifname.startswith('lo'):
return True
return (self.getFlags(ifname) & self.IFF_LOOPBACK) != 0
|
Check whether interface is a loopback device.
@param ifname: interface name
@type ifname: string
|
def fftconvolve(in1, in2, mode="full", axis=None):
""" Convolve two N-dimensional arrays using FFT. See convolve.
This is a fix of scipy.signal.fftconvolve, adding an axis argument and
importing locally the stuff only needed for this function
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complexfloating) or
np.issubdtype(in2.dtype, np.complexfloating))
if axis is None:
size = s1 + s2 - 1
fslice = tuple([slice(0, int(sz)) for sz in size])
else:
equal_shapes = s1 == s2
# allow equal_shapes[axis] to be False
equal_shapes[axis] = True
assert equal_shapes.all(), 'Shape mismatch on non-convolving axes'
size = s1[axis] + s2[axis] - 1
fslice = [slice(l) for l in s1]
fslice[axis] = slice(0, int(size))
fslice = tuple(fslice)
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
if axis is None:
IN1 = fftpack.fftn(in1, fsize)
IN1 *= fftpack.fftn(in2, fsize)
ret = fftpack.ifftn(IN1)[fslice].copy()
else:
IN1 = fftpack.fft(in1, fsize, axis=axis)
IN1 *= fftpack.fft(in2, fsize, axis=axis)
ret = fftpack.ifft(IN1, axis=axis)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1, axis=0) > np.product(s2, axis=0):
osize = s1
else:
osize = s2
return signaltools._centered(ret, osize)
elif mode == "valid":
return signaltools._centered(ret, abs(s2 - s1) + 1)
|
Convolve two N-dimensional arrays using FFT. See convolve.
This is a fix of scipy.signal.fftconvolve, adding an axis argument and
importing locally the stuff only needed for this function
|
def add_port_profile_to_delete_table(self, profile_name, device_id):
"""Adds a port profile to the delete table."""
if not self.has_port_profile_to_delete(profile_name, device_id):
port_profile = ucsm_model.PortProfileDelete(
profile_id=profile_name, device_id=device_id)
with self.session.begin(subtransactions=True):
self.session.add(port_profile)
return port_profile
|
Adds a port profile to the delete table.
|
def _find_line_start_index(self, index):
"""
For the index of a character at a certain line, calculate the index of
the first character on that line.
Return (row, index) tuple.
"""
indexes = self._line_start_indexes
pos = bisect.bisect_right(indexes, index) - 1
return pos, indexes[pos]
|
For the index of a character at a certain line, calculate the index of
the first character on that line.
Return (row, index) tuple.
|
def submitted_projects(raw_df):
"""
Return all submitted projects.
"""
df = raw_df.astype({'PRONAC': str, 'CgcCpf': str})
submitted_projects = df.groupby('CgcCpf')[
'PRONAC'
].agg(['unique', 'nunique'])
submitted_projects.columns = ['pronac_list', 'num_pronacs']
return submitted_projects
|
Return all submitted projects.
|
def _get_errors(self):
"""
Gets errors from HTTP response
"""
errors = self.json.get('data').get('failures')
if errors:
logger.error(errors)
return errors
|
Gets errors from HTTP response
|
def list(region=None, key=None, keyid=None, profile=None):
'''
List all trails
Returns list of trails
CLI Example:
.. code-block:: yaml
policies:
- {...}
- {...}
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
trails = conn.describe_trails()
if not bool(trails.get('trailList')):
log.warning('No trails found')
return {'trails': trails.get('trailList', [])}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
|
List all trails
Returns list of trails
CLI Example:
.. code-block:: yaml
policies:
- {...}
- {...}
|
def wrapper__unignore(self, type_):
"""
Stop selectively ignoring certain types when wrapping attributes.
:param class type: The class/type definition to stop ignoring.
:rtype list(type): The current list of ignored types
"""
if type_ in self.__exclusion_list:
self.__exclusion_list.remove( type_ )
return self.__exclusion_list
|
Stop selectively ignoring certain types when wrapping attributes.
:param class type: The class/type definition to stop ignoring.
:rtype list(type): The current list of ignored types
|
def find_substring_edge(self, substring, suffix_tree_id):
"""Returns an edge that matches the given substring.
"""
suffix_tree = self.suffix_tree_repo[suffix_tree_id]
started = datetime.datetime.now()
edge, ln = find_substring_edge(substring=substring, suffix_tree=suffix_tree, edge_repo=self.edge_repo)
# if edge is not None:
# print("Got edge for substring '{}': {}".format(substring, edge))
# else:
# print("No edge for substring '{}'".format(substring))
print(" - searched for edge in {} for substring: '{}'".format(datetime.datetime.now() - started, substring))
return edge, ln
|
Returns an edge that matches the given substring.
|
def get_series(self, series):
"""
Returns a census series API handler.
"""
if series == "acs1":
return self.census.acs1dp
elif series == "acs5":
return self.census.acs5
elif series == "sf1":
return self.census.sf1
elif series == "sf3":
return self.census.sf3
else:
return None
|
Returns a census series API handler.
|
def parts(xs, number = None, length = None):
"""
Split a list into either the specified number of parts or
a number of parts each of the specified length. The elements
are distributed somewhat evenly among the parts if possible.
>>> list(parts([1,2,3,4,5,6,7], length=1))
[[1], [2], [3], [4], [5], [6], [7]]
>>> list(parts([1,2,3,4,5,6,7], length=2))
[[1, 2], [3, 4], [5, 6], [7]]
>>> list(parts([1,2,3,4,5,6,7], length=3))
[[1, 2, 3], [4, 5, 6], [7]]
>>> list(parts([1,2,3,4,5,6,7], length=4))
[[1, 2, 3, 4], [5, 6, 7]]
>>> list(parts([1,2,3,4,5,6,7], length=5))
[[1, 2, 3, 4, 5], [6, 7]]
>>> list(parts([1,2,3,4,5,6,7], length=6))
[[1, 2, 3, 4, 5, 6], [7]]
>>> list(parts([1,2,3,4,5,6,7], length=7))
[[1, 2, 3, 4, 5, 6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 1))
[[1, 2, 3, 4, 5, 6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 2))
[[1, 2, 3], [4, 5, 6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 3))
[[1, 2], [3, 4], [5, 6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 4))
[[1], [2, 3], [4, 5], [6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 5))
[[1], [2], [3], [4, 5], [6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 6))
[[1], [2], [3], [4], [5], [6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 7))
[[1], [2], [3], [4], [5], [6], [7]]
>>> list(parts([1,2,3,4,5,6,7], 7, [1,1,1,1,1,1,1]))
[[1], [2], [3], [4], [5], [6], [7]]
>>> list(parts([1,2,3,4,5,6,7], length=[1,1,1,1,1,1,1]))
[[1], [2], [3], [4], [5], [6], [7]]
>>> list(parts([1,2,3,4,5,6], length=[2,2,2]))
[[1, 2], [3, 4], [5, 6]]
>>> list(parts([1,2,3,4,5,6], length=[1,2,3]))
[[1], [2, 3], [4, 5, 6]]
>>> list(parts([1,2,3,4,5,6], 2, 3))
[[1, 2, 3], [4, 5, 6]]
>>> list(parts([1,2,3,4,5,6], number=3, length=2))
[[1, 2], [3, 4], [5, 6]]
>>> list(parts([1,2,3,4,5,6], 2, length=[1,2,3]))
Traceback (most recent call last):
...
PartsError: 'Number of parts does not match number of part lengths specified in input.'
>>> list(parts([1,2,3,4,5,6,7], number=3, length=2))
Traceback (most recent call last):
...
PartsError: 'List cannot be split into 3 parts each of length 2.'
"""
if number is not None and type(number) is not int:
raise PartsError("Number of parts must be an integer.")
if length is not None:
if type(length) is not int:
if type(length) is not list or (not all([type(l) is int for l in length])):
raise PartsError("Length parameter must be an integer or list of integers.")
if number is not None and length is None:
number = max(1, min(len(xs), number)) # Number should be reasonable.
length = len(xs) // number
i = 0
# Produce parts by updating length after each part to ensure
# an even distribution.
while number > 0 and i < len(xs):
number -= 1
if number == 0:
yield xs[i:]
break
else:
yield xs[i:i + length]
i += length
length = (len(xs) - i) // number
elif number is None and length is not None:
if type(length) is int:
length = max(1, length)
for i in range(0, len(xs), length): # Yield parts of specified length.
yield xs[i:i + length]
else: # Length is a list of integers.
xs_index = 0
len_index = 0
while xs_index < len(xs):
if xs_index + length[len_index] <= len(xs):
yield xs[xs_index:xs_index + length[len_index]]
xs_index += length[len_index]
len_index += 1
else:
raise PartsError("Cannot return part of requested length; list too short.")
elif number is not None and length is not None:
if type(length) is int:
if length * number != len(xs):
raise PartsError("List cannot be split into " + str(number) + " parts each of length " + str(length) + ".")
length = max(1, length)
for i in range(0, len(xs), length): # Yield parts of specified length.
yield xs[i:i + length]
else: # Length is a list of integers.
if len(length) == number:
xs_index = 0
len_index = 0
while xs_index < len(xs):
if xs_index + length[len_index] <= len(xs):
yield xs[xs_index:xs_index + length[len_index]]
xs_index += length[len_index]
len_index += 1
else:
raise PartsError("Cannot return part of requested length; list too short.")
else:
raise PartsError("Number of parts does not match number of part lengths specified in input.")
else: # Neither is specified.
raise PartsError("Must specify number of parts or length of each part.")
|
Split a list into either the specified number of parts or
a number of parts each of the specified length. The elements
are distributed somewhat evenly among the parts if possible.
>>> list(parts([1,2,3,4,5,6,7], length=1))
[[1], [2], [3], [4], [5], [6], [7]]
>>> list(parts([1,2,3,4,5,6,7], length=2))
[[1, 2], [3, 4], [5, 6], [7]]
>>> list(parts([1,2,3,4,5,6,7], length=3))
[[1, 2, 3], [4, 5, 6], [7]]
>>> list(parts([1,2,3,4,5,6,7], length=4))
[[1, 2, 3, 4], [5, 6, 7]]
>>> list(parts([1,2,3,4,5,6,7], length=5))
[[1, 2, 3, 4, 5], [6, 7]]
>>> list(parts([1,2,3,4,5,6,7], length=6))
[[1, 2, 3, 4, 5, 6], [7]]
>>> list(parts([1,2,3,4,5,6,7], length=7))
[[1, 2, 3, 4, 5, 6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 1))
[[1, 2, 3, 4, 5, 6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 2))
[[1, 2, 3], [4, 5, 6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 3))
[[1, 2], [3, 4], [5, 6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 4))
[[1], [2, 3], [4, 5], [6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 5))
[[1], [2], [3], [4, 5], [6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 6))
[[1], [2], [3], [4], [5], [6, 7]]
>>> list(parts([1,2,3,4,5,6,7], 7))
[[1], [2], [3], [4], [5], [6], [7]]
>>> list(parts([1,2,3,4,5,6,7], 7, [1,1,1,1,1,1,1]))
[[1], [2], [3], [4], [5], [6], [7]]
>>> list(parts([1,2,3,4,5,6,7], length=[1,1,1,1,1,1,1]))
[[1], [2], [3], [4], [5], [6], [7]]
>>> list(parts([1,2,3,4,5,6], length=[2,2,2]))
[[1, 2], [3, 4], [5, 6]]
>>> list(parts([1,2,3,4,5,6], length=[1,2,3]))
[[1], [2, 3], [4, 5, 6]]
>>> list(parts([1,2,3,4,5,6], 2, 3))
[[1, 2, 3], [4, 5, 6]]
>>> list(parts([1,2,3,4,5,6], number=3, length=2))
[[1, 2], [3, 4], [5, 6]]
>>> list(parts([1,2,3,4,5,6], 2, length=[1,2,3]))
Traceback (most recent call last):
...
PartsError: 'Number of parts does not match number of part lengths specified in input.'
>>> list(parts([1,2,3,4,5,6,7], number=3, length=2))
Traceback (most recent call last):
...
PartsError: 'List cannot be split into 3 parts each of length 2.'
|
def unfinished_objects(self):
'''
Leaves only versions of those objects that has some version with
`_end == None` or with `_end > right cutoff`.
'''
mask = self._end_isnull
if self._rbound is not None:
mask = mask | (self._end > self._rbound)
oids = set(self[mask]._oid.tolist())
return self[self._oid.apply(lambda oid: oid in oids)]
|
Leaves only versions of those objects that has some version with
`_end == None` or with `_end > right cutoff`.
|
def _extract_battery_info_from_acpi(self):
"""
Get the battery info from acpi
# Example acpi -bi raw output (Discharging):
Battery 0: Discharging, 94%, 09:23:28 remaining
Battery 0: design capacity 5703 mAh, last full capacity 5283 mAh = 92%
Battery 1: Unknown, 98%
Battery 1: design capacity 1880 mAh, last full capacity 1370 mAh = 72%
# Example Charging
Battery 0: Charging, 96%, 00:20:40 until charged
Battery 0: design capacity 5566 mAh, last full capacity 5156 mAh = 92%
Battery 1: Unknown, 98%
Battery 1: design capacity 1879 mAh, last full capacity 1370 mAh = 72%
"""
def _parse_battery_info(acpi_battery_lines):
battery = {}
battery["percent_charged"] = int(
findall("(?<= )(\d+)(?=%)", acpi_battery_lines[0])[0]
)
battery["charging"] = "Charging" in acpi_battery_lines[0]
battery["capacity"] = int(
findall("(?<= )(\d+)(?= mAh)", acpi_battery_lines[1])[1]
)
# ACPI only shows time remaining if battery is discharging or
# charging
try:
battery["time_remaining"] = "".join(
findall(
"(?<=, )(\d+:\d+:\d+)(?= remaining)|"
"(?<=, )(\d+:\d+:\d+)(?= until)",
acpi_battery_lines[0],
)[0]
)
except IndexError:
battery["time_remaining"] = FULLY_CHARGED
return battery
acpi_list = self.py3.command_output(["acpi", "-b", "-i"]).splitlines()
# Separate the output because each pair of lines corresponds to a
# single battery. Now the list index will correspond to the index of
# the battery we want to look at
acpi_list = [acpi_list[i : i + 2] for i in range(0, len(acpi_list) - 1, 2)]
return [_parse_battery_info(battery) for battery in acpi_list]
|
Get the battery info from acpi
# Example acpi -bi raw output (Discharging):
Battery 0: Discharging, 94%, 09:23:28 remaining
Battery 0: design capacity 5703 mAh, last full capacity 5283 mAh = 92%
Battery 1: Unknown, 98%
Battery 1: design capacity 1880 mAh, last full capacity 1370 mAh = 72%
# Example Charging
Battery 0: Charging, 96%, 00:20:40 until charged
Battery 0: design capacity 5566 mAh, last full capacity 5156 mAh = 92%
Battery 1: Unknown, 98%
Battery 1: design capacity 1879 mAh, last full capacity 1370 mAh = 72%
|
def assert_close(a, b, rtol=1e-07, atol=0, context=None):
"""
Compare for equality up to a given precision two composite objects
which may contain floats. NB: if the objects are or contain generators,
they are exhausted.
:param a: an object
:param b: another object
:param rtol: relative tolerance
:param atol: absolute tolerance
"""
if isinstance(a, float) or isinstance(a, numpy.ndarray) and a.shape:
# shortcut
numpy.testing.assert_allclose(a, b, rtol, atol)
return
if isinstance(a, (str, bytes, int)):
# another shortcut
assert a == b, (a, b)
return
if hasattr(a, '_slots_'): # record-like objects
assert a._slots_ == b._slots_
for x in a._slots_:
assert_close(getattr(a, x), getattr(b, x), rtol, atol, x)
return
if hasattr(a, 'keys'): # dict-like objects
assert a.keys() == b.keys()
for x in a:
if x != '__geom__':
assert_close(a[x], b[x], rtol, atol, x)
return
if hasattr(a, '__dict__'): # objects with an attribute dictionary
assert_close(vars(a), vars(b), context=a)
return
if hasattr(a, '__iter__'): # iterable objects
xs, ys = list(a), list(b)
assert len(xs) == len(ys), ('Lists of different lenghts: %d != %d'
% (len(xs), len(ys)))
for x, y in zip(xs, ys):
assert_close(x, y, rtol, atol, x)
return
if a == b: # last attempt to avoid raising the exception
return
ctx = '' if context is None else 'in context ' + repr(context)
raise AssertionError('%r != %r %s' % (a, b, ctx))
|
Compare for equality up to a given precision two composite objects
which may contain floats. NB: if the objects are or contain generators,
they are exhausted.
:param a: an object
:param b: another object
:param rtol: relative tolerance
:param atol: absolute tolerance
|
def _parse_sigmak(line, lines):
"""Parse Energy, Re sigma xx, Im sigma xx, Re sigma zz, Im sigma zz"""
split_line = line.split()
energy = float(split_line[0])
re_sigma_xx = float(split_line[1])
im_sigma_xx = float(split_line[2])
re_sigma_zz = float(split_line[3])
im_sigma_zz = float(split_line[4])
return {"energy": energy, "re_sigma_xx": re_sigma_xx, "im_sigma_xx": im_sigma_xx, "re_sigma_zz": re_sigma_zz,
"im_sigma_zz": im_sigma_zz}
|
Parse Energy, Re sigma xx, Im sigma xx, Re sigma zz, Im sigma zz
|
def build_specfile_filesection(spec, files):
""" builds the %file section of the specfile
"""
str = '%files\n'
if 'X_RPM_DEFATTR' not in spec:
spec['X_RPM_DEFATTR'] = '(-,root,root)'
str = str + '%%defattr %s\n' % spec['X_RPM_DEFATTR']
supported_tags = {
'PACKAGING_CONFIG' : '%%config %s',
'PACKAGING_CONFIG_NOREPLACE' : '%%config(noreplace) %s',
'PACKAGING_DOC' : '%%doc %s',
'PACKAGING_UNIX_ATTR' : '%%attr %s',
'PACKAGING_LANG_' : '%%lang(%s) %s',
'PACKAGING_X_RPM_VERIFY' : '%%verify %s',
'PACKAGING_X_RPM_DIR' : '%%dir %s',
'PACKAGING_X_RPM_DOCDIR' : '%%docdir %s',
'PACKAGING_X_RPM_GHOST' : '%%ghost %s', }
for file in files:
# build the tagset
tags = {}
for k in list(supported_tags.keys()):
try:
v = file.GetTag(k)
if v:
tags[k] = v
except AttributeError:
pass
# compile the tagset
str = str + SimpleTagCompiler(supported_tags, mandatory=0).compile( tags )
str = str + ' '
str = str + file.GetTag('PACKAGING_INSTALL_LOCATION')
str = str + '\n\n'
return str
|
builds the %file section of the specfile
|
def get_content_type(self):
"""mime type of the attachment part"""
ctype = self.part.get_content_type()
# replace underspecified mime description by a better guess
if ctype in ['octet/stream', 'application/octet-stream',
'application/octetstream']:
ctype = guess_mimetype(self.get_data())
return ctype
|
mime type of the attachment part
|
def compress(func):
"""Compress result with deflate algorithm if the client ask for it."""
def wrapper(*args, **kwargs):
"""Wrapper that take one function and return the compressed result."""
ret = func(*args, **kwargs)
logger.debug('Receive {} {} request with header: {}'.format(
request.method,
request.url,
['{}: {}'.format(h, request.headers.get(h)) for h in request.headers.keys()]
))
if 'deflate' in request.headers.get('Accept-Encoding', ''):
response.headers['Content-Encoding'] = 'deflate'
ret = deflate_compress(ret)
else:
response.headers['Content-Encoding'] = 'identity'
return ret
def deflate_compress(data, compress_level=6):
"""Compress given data using the DEFLATE algorithm"""
# Init compression
zobj = zlib.compressobj(compress_level,
zlib.DEFLATED,
zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
zlib.Z_DEFAULT_STRATEGY)
# Return compressed object
return zobj.compress(b(data)) + zobj.flush()
return wrapper
|
Compress result with deflate algorithm if the client ask for it.
|
def href(*args, **kw):
"""
Simple function for URL generation. Position arguments are used for the
URL path and keyword arguments are used for the url parameters.
"""
result = [(request.script_root if request else "") + "/"]
for idx, arg in enumerate(args):
result.append(("/" if idx else "") + url_quote(arg))
if kw:
result.append("?" + url_encode(kw))
return "".join(result)
|
Simple function for URL generation. Position arguments are used for the
URL path and keyword arguments are used for the url parameters.
|
def audits(self, ticket=None, include=None, **kwargs):
"""
Retrieve TicketAudits. If ticket is passed, return the tickets for a specific audit.
If ticket_id is None, a TicketAuditGenerator is returned to handle pagination. The way this generator
works is a different to the other Zenpy generators as it is cursor based, allowing you to change the
direction that you are consuming objects. This is done with the reversed() python method.
For example:
.. code-block:: python
for audit in reversed(zenpy_client.tickets.audits()):
print(audit)
See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/core/ticket_audits#pagination>`__ for
information on additional parameters.
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param ticket: Ticket object or id
"""
if ticket is not None:
return self._query_zendesk(self.endpoint.audits, 'ticket_audit', id=ticket, include=include)
else:
return self._query_zendesk(self.endpoint.audits.cursor, 'ticket_audit', include=include, **kwargs)
|
Retrieve TicketAudits. If ticket is passed, return the tickets for a specific audit.
If ticket_id is None, a TicketAuditGenerator is returned to handle pagination. The way this generator
works is a different to the other Zenpy generators as it is cursor based, allowing you to change the
direction that you are consuming objects. This is done with the reversed() python method.
For example:
.. code-block:: python
for audit in reversed(zenpy_client.tickets.audits()):
print(audit)
See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/core/ticket_audits#pagination>`__ for
information on additional parameters.
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param ticket: Ticket object or id
|
def get_gpd_line(self,transcript_name=None,gene_name=None,direction=None):
"""Get the genpred format string representation of the mapping"""
return transcript_to_gpd_line(self,transcript_name=transcript_name,gene_name=gene_name,direction=direction)
|
Get the genpred format string representation of the mapping
|
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
|
Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
|
def better_print(self, printer=None):
"""
Print the value using a *printer*.
:param printer: Callable used to print the value, by default: :func:`pprint.pprint`
"""
printer = printer or pprint.pprint
printer(self.value)
|
Print the value using a *printer*.
:param printer: Callable used to print the value, by default: :func:`pprint.pprint`
|
def _send_reliable_message(self, msg):
"""Send msg to LightwaveRF hub."""
result = False
max_retries = 15
trans_id = next(LWLink.transaction_id)
msg = "%d,%s" % (trans_id, msg)
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \
as write_sock, \
socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \
as read_sock:
write_sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
read_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_BROADCAST, 1)
read_sock.settimeout(self.SOCKET_TIMEOUT)
read_sock.bind(('0.0.0.0', self.RX_PORT))
while max_retries:
max_retries -= 1
write_sock.sendto(msg.encode(
'UTF-8'), (LWLink.link_ip, self.TX_PORT))
result = False
while True:
response, dummy = read_sock.recvfrom(1024)
response = response.decode('UTF-8')
if "Not yet registered." in response:
_LOGGER.error("Not yet registered")
self.register()
result = True
break
if response.startswith("%d,OK" % trans_id):
result = True
break
if response.startswith("%d,ERR" % trans_id):
_LOGGER.error(response)
break
_LOGGER.info(response)
if result:
break
time.sleep(0.25)
except socket.timeout:
_LOGGER.error("LW broker timeout!")
return result
except Exception as ex:
_LOGGER.error(ex)
raise
if result:
_LOGGER.info("LW broker OK!")
else:
_LOGGER.error("LW broker fail!")
return result
|
Send msg to LightwaveRF hub.
|
def delete_node_1ton(node_list, begin, node, end): # type: ([],LinkedNode, LinkedNode, LinkedNode)->[]
"""
delete the node which has 1-input and n-output
"""
if end is None:
assert end is not None
end = node.successor
elif not isinstance(end, list):
end = [end]
if any(e_.in_or_out for e_ in end):
# if the end is output node, the output name will be kept to avoid the model output name updating.
begin.out_redirect(node.single_input, node.single_output)
else:
for ne_ in end:
target_var_name = node.single_input
# since the output info never be updated, except the final.
assert target_var_name in begin.output.values()
ne_.in_redirect(node.single_output, target_var_name)
begin.successor = [v_ for v_ in begin.successor if v_ != node] + node.successor
for ne_ in end:
ne_.precedence = [begin if v_ == node else v_ for v_ in ne_.precedence]
node_list.remove(node)
return node_list
|
delete the node which has 1-input and n-output
|
def ticket_fields(self):
"""
| Comment: ids of all ticket fields which are in this ticket form
"""
if self.api and self.ticket_field_ids:
return self.api._get_ticket_fields(self.ticket_field_ids)
|
| Comment: ids of all ticket fields which are in this ticket form
|
def delete_cookie(self, key, path='/', domain=None):
"""Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here.
"""
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
|
Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here.
|
def tf_import_demo_experience(self, states, internals, actions, terminal, reward):
"""
Imports a single experience to memory.
"""
return self.demo_memory.store(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
|
Imports a single experience to memory.
|
def matrixfromDicts(dicts):
"""
Give a list of dicts (or list of list of dicts) return a structured array.
Headings will be sorted in alphabetical order.
"""
if 'numpy' in str(type(dicts)):
return dicts #already an array?
names=set([])
dicts=dictFlat(dicts)
for item in dicts:
names=names.union(list(item.keys()))
names=sorted(list(names))
data=np.empty((len(dicts),len(names)),dtype=float)*np.nan
for y in range(len(dicts)):
for key in dicts[y].keys():
for x in range(len(names)):
if names[x] in dicts[y]:
data[y,x]=dicts[y][names[x]]
if len(dicts):
data=np.core.records.fromarrays(data.transpose(),names=names)
return data
|
Give a list of dicts (or list of list of dicts) return a structured array.
Headings will be sorted in alphabetical order.
|
def _schema_to_json_file_object(self, schema_list, file_obj):
"""Helper function for schema_to_json that takes a schema list and file
object and writes the schema list to the file object with json.dump
"""
json.dump(schema_list, file_obj, indent=2, sort_keys=True)
|
Helper function for schema_to_json that takes a schema list and file
object and writes the schema list to the file object with json.dump
|
def initFilter(input, filterInfo = None):
""" Initializes internal filter variables for further processing.
Returns a tuple (function to call,parameters for the filter call)
The filterInfo is a dict. Here is an example structure:
{fieldName: {'min': x,
'max': y,
'type': 'category', # or 'number'
'acceptValues': ['foo', 'bar'],
}
}
This returns the following:
(filterFunc, ((fieldIdx, fieldFilterFunc, filterDict),
...)
Where fieldIdx is the index of the field within each record
fieldFilterFunc returns True if the value is "OK" (within min, max or
part of acceptValues)
fieldDict is a dict containing 'type', 'min', max', 'acceptValues'
"""
if filterInfo is None:
return None
# Build an array of index/func to call on record[index]
filterList = []
for i, fieldName in enumerate(input.getFieldNames()):
fieldFilter = filterInfo.get(fieldName, None)
if fieldFilter == None:
continue
var = dict()
var['acceptValues'] = None
min = fieldFilter.get('min', None)
max = fieldFilter.get('max', None)
var['min'] = min
var['max'] = max
if fieldFilter['type'] == 'category':
var['acceptValues'] = fieldFilter['acceptValues']
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] in x['acceptValues'])
elif fieldFilter['type'] == 'number':
if min != None and max != None:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] >= x['min'] and x['value'] <= x['max'])
elif min != None:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] >= x['min'])
else:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] <= x['max'])
filterList.append((i, fp, var))
return (_filterRecord, filterList)
|
Initializes internal filter variables for further processing.
Returns a tuple (function to call,parameters for the filter call)
The filterInfo is a dict. Here is an example structure:
{fieldName: {'min': x,
'max': y,
'type': 'category', # or 'number'
'acceptValues': ['foo', 'bar'],
}
}
This returns the following:
(filterFunc, ((fieldIdx, fieldFilterFunc, filterDict),
...)
Where fieldIdx is the index of the field within each record
fieldFilterFunc returns True if the value is "OK" (within min, max or
part of acceptValues)
fieldDict is a dict containing 'type', 'min', max', 'acceptValues'
|
def get(self, date, page_no=1, page_size=40, fields=[]):
'''taobao.taobaoke.report.get 淘宝客报表查询
淘宝客报表查询'''
request = TOPRequest('taobao.taobaoke.items.get')
request['date'] = date
request['page_no'] = page_no
request['page_size'] = page_size
if not fields:
fields = self.fields
request['fields'] = fields
self.create(self.execute(request)['taobaoke_report'])
return self
|
taobao.taobaoke.report.get 淘宝客报表查询
淘宝客报表查询
|
def orientation(point_p, point_q, point_r):
"""
To find orientation of ordered triplet (p, q, r).
:param point_p:
:type point_p: models.Point
:param point_q:
:type point_q: models.Point
:param point_r:
:type point_r: models.Point
:return: 0: p, q and r are colinear
1: clockwise
2: counterclockwise
:rtype: int
"""
# Set https://www.geeksforgeeks.org/orientation-3-ordered-points/
# for details of below formula.
r = ((point_q.y - point_p.y) * (point_r.x - point_q.x) -
(point_q.x - point_p.x) * (point_r.y - point_q.y))
if r == 0:
return 0
return 1 if r > 0 else 2
|
To find orientation of ordered triplet (p, q, r).
:param point_p:
:type point_p: models.Point
:param point_q:
:type point_q: models.Point
:param point_r:
:type point_r: models.Point
:return: 0: p, q and r are colinear
1: clockwise
2: counterclockwise
:rtype: int
|
def yield_module_imports(root, checks=string_imports()):
"""
Gather all require and define calls from unbundled JavaScript source
files and yield all module names. The imports can either be of the
CommonJS or AMD syntax.
"""
if not isinstance(root, asttypes.Node):
raise TypeError('provided root must be a node')
for child in yield_function(root, deep_filter):
for f, condition in checks:
if condition(child):
for name in f(child):
yield name
continue
|
Gather all require and define calls from unbundled JavaScript source
files and yield all module names. The imports can either be of the
CommonJS or AMD syntax.
|
def tree(self):
"""Tree with branch lengths in codon substitutions per site.
The tree is a `Bio.Phylo.BaseTree.Tree` object.
This is the current tree after whatever optimizations have
been performed so far.
"""
bs = self.model.branchScale
for node in self._tree.find_clades():
if node != self._tree.root:
node.branch_length = self.t[self.name_to_nodeindex[node]] * bs
return self._tree
|
Tree with branch lengths in codon substitutions per site.
The tree is a `Bio.Phylo.BaseTree.Tree` object.
This is the current tree after whatever optimizations have
been performed so far.
|
def expand(self, line, do_expand, force=False, vislevels=0, level=-1):
"""Multi-purpose expand method from original STC class"""
lastchild = self.GetLastChild(line, level)
line += 1
while line <= lastchild:
if force:
if vislevels > 0:
self.ShowLines(line, line)
else:
self.HideLines(line, line)
elif do_expand:
self.ShowLines(line, line)
if level == -1:
level = self.GetFoldLevel(line)
if level & stc.STC_FOLDLEVELHEADERFLAG:
if force:
self.SetFoldExpanded(line, vislevels - 1)
line = self.expand(line, do_expand, force, vislevels - 1)
else:
expandsub = do_expand and self.GetFoldExpanded(line)
line = self.expand(line, expandsub, force, vislevels - 1)
else:
line += 1
return line
|
Multi-purpose expand method from original STC class
|
def _GetMessage(self, message_file_key, lcid, message_identifier):
"""Retrieves a specific message from a specific message table.
Args:
message_file_key (int): message file key.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
Raises:
RuntimeError: if more than one value is found in the database.
"""
table_name = 'message_table_{0:d}_0x{1:08x}'.format(message_file_key, lcid)
has_table = self._database_file.HasTable(table_name)
if not has_table:
return None
column_names = ['message_string']
condition = 'message_identifier == "0x{0:08x}"'.format(message_identifier)
values = list(self._database_file.GetValues(
[table_name], column_names, condition))
number_of_values = len(values)
if number_of_values == 0:
return None
if number_of_values == 1:
return values[0]['message_string']
raise RuntimeError('More than one value found in database.')
|
Retrieves a specific message from a specific message table.
Args:
message_file_key (int): message file key.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
Raises:
RuntimeError: if more than one value is found in the database.
|
def police_priority_map_exceed_map_pri2_exceed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop('name')
exceed = ET.SubElement(police_priority_map, "exceed")
map_pri2_exceed = ET.SubElement(exceed, "map-pri2-exceed")
map_pri2_exceed.text = kwargs.pop('map_pri2_exceed')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def scale_rows_by_largest_entry(S):
"""Scale each row in S by it's largest in magnitude entry.
Parameters
----------
S : csr_matrix
Returns
-------
S : csr_matrix
Each row has been scaled by it's largest in magnitude entry
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import scale_rows_by_largest_entry
>>> A = poisson( (4,), format='csr' )
>>> A.data[1] = 5.0
>>> A = scale_rows_by_largest_entry(A)
>>> A.todense()
matrix([[ 0.4, 1. , 0. , 0. ],
[-0.5, 1. , -0.5, 0. ],
[ 0. , -0.5, 1. , -0.5],
[ 0. , 0. , -0.5, 1. ]])
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
# Scale S by the largest magnitude entry in each row
largest_row_entry = np.zeros((S.shape[0],), dtype=S.dtype)
pyamg.amg_core.maximum_row_value(S.shape[0], largest_row_entry,
S.indptr, S.indices, S.data)
largest_row_entry[largest_row_entry != 0] =\
1.0 / largest_row_entry[largest_row_entry != 0]
S = scale_rows(S, largest_row_entry, copy=True)
return S
|
Scale each row in S by it's largest in magnitude entry.
Parameters
----------
S : csr_matrix
Returns
-------
S : csr_matrix
Each row has been scaled by it's largest in magnitude entry
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import scale_rows_by_largest_entry
>>> A = poisson( (4,), format='csr' )
>>> A.data[1] = 5.0
>>> A = scale_rows_by_largest_entry(A)
>>> A.todense()
matrix([[ 0.4, 1. , 0. , 0. ],
[-0.5, 1. , -0.5, 0. ],
[ 0. , -0.5, 1. , -0.5],
[ 0. , 0. , -0.5, 1. ]])
|
def group(self, base_dn, samaccountname, attributes=(), explicit_membership_only=False):
"""Produces a single, populated ADGroup object through the object factory.
Does not populate attributes for the caller instance.
sAMAccountName may not be present in group objects in modern AD schemas.
Searching by common name and object class (group) may be an alternative
approach if required in the future.
:param str base_dn: The base DN to search within
:param str samaccountname: The group's sAMAccountName
:param list attributes: Object attributes to populate, defaults to all
:return: A populated ADGroup object
:rtype: ADGroup
"""
groups = self.groups(base_dn, samaccountnames=[samaccountname], attributes=attributes,
explicit_membership_only=explicit_membership_only)
try:
# Usually we will find a match, but perhaps not always
return groups[0]
except IndexError:
logging.info("%s - unable to retrieve object from AD by sAMAccountName", samaccountname)
|
Produces a single, populated ADGroup object through the object factory.
Does not populate attributes for the caller instance.
sAMAccountName may not be present in group objects in modern AD schemas.
Searching by common name and object class (group) may be an alternative
approach if required in the future.
:param str base_dn: The base DN to search within
:param str samaccountname: The group's sAMAccountName
:param list attributes: Object attributes to populate, defaults to all
:return: A populated ADGroup object
:rtype: ADGroup
|
def to_cloudformation(self, **kwargs):
"""Returns the Lambda Permission resource allowing SNS to invoke the function this event source triggers.
:param dict kwargs: no existing resources need to be modified
:returns: a list of vanilla CloudFormation Resources, to which this SNS event expands
:rtype: list
"""
function = kwargs.get('function')
if not function:
raise TypeError("Missing required keyword argument: function")
return [self._construct_permission(function, source_arn=self.Topic),
self._inject_subscription(function, self.Topic, self.FilterPolicy)]
|
Returns the Lambda Permission resource allowing SNS to invoke the function this event source triggers.
:param dict kwargs: no existing resources need to be modified
:returns: a list of vanilla CloudFormation Resources, to which this SNS event expands
:rtype: list
|
def normalize_layout(layout, min_percentile=1, max_percentile=99, relative_margin=0.1):
"""Removes outliers and scales layout to between [0,1]."""
# compute percentiles
mins = np.percentile(layout, min_percentile, axis=(0))
maxs = np.percentile(layout, max_percentile, axis=(0))
# add margins
mins -= relative_margin * (maxs - mins)
maxs += relative_margin * (maxs - mins)
# `clip` broadcasts, `[None]`s added only for readability
clipped = np.clip(layout, mins, maxs)
# embed within [0,1] along both axes
clipped -= clipped.min(axis=0)
clipped /= clipped.max(axis=0)
return clipped
|
Removes outliers and scales layout to between [0,1].
|
def is_friend(self):
""":class:`bool`: Checks if the user is your friend.
.. note::
This only applies to non-bot accounts.
"""
r = self.relationship
if r is None:
return False
return r.type is RelationshipType.friend
|
:class:`bool`: Checks if the user is your friend.
.. note::
This only applies to non-bot accounts.
|
def num2tamilstr_american( *args ):
number = args[0]
""" work till 1000 trillion - 1 - i.e = 1e12*1e3 - 1.
turn number into a numeral, American style. Fractions upto 1e-30. """
if not any( filter( lambda T: isinstance( number, T), [int, str, unicode, long, float]) ) or isinstance(number,complex):
raise Exception('num2tamilstr_american input has to be long or integer')
if float(number) >= long(1e15):
raise Exception('num2tamilstr input is too large')
if float(number) < 0:
return u"- "+num2tamilstr_american( -float(number) )
units = (u'பூஜ்ஜியம்', u'ஒன்று', u'இரண்டு', u'மூன்று', u'நான்கு', u'ஐந்து', u'ஆறு', u'ஏழு', u'எட்டு', u'ஒன்பது', u'பத்து') # 0-10
hundreds = ( u'நூறு', u'இருநூறு', u'முன்னூறு', u'நாநூறு',u'ஐநூறு', u'அறுநூறு', u'எழுநூறு', u'எண்ணூறு', u'தொள்ளாயிரம்') #100 - 900
one_thousand_prefix = u'ஓர்'
thousands = (u'ஆயிரம்',u'ஆயிரத்து')
one_prefix = u'ஒரு'
mil = u'மில்லியன்'
million = (mil,mil)
bil = u'பில்லியன்'
billion = (bil,bil)
tril = u'டிரில்லியன்'
trillion = (tril,tril)
n_one = 1
n_ten = 10
n_hundred = 100
n_thousand = 1000
n_million = 1000*n_thousand
n_billion = long(1000*n_million)
n_trillion = long(1000*n_billion)
suffix_base = { n_trillion: trillion,
n_billion : billion,
n_million : million,
n_thousand : thousands}
num_map = {n_trillion : [one_prefix,trillion[0]],
n_billion : [one_prefix,billion[0]],
n_million : [one_prefix,million[0]],
n_thousand : [one_thousand_prefix, thousands[0]],
n_hundred : [hundreds[0]], #special
n_ten : [units[10]],
n_one : [units[1]]}
all_bases = [n_trillion,n_billion, n_million, n_thousand, n_hundred, n_ten,n_one]
allowed_bases = list(filter( lambda base: float(number) >= base, all_bases ))
# handle fractional parts
if float(number) > 0.0 and float(number) <= 1000.0:
return num2tamilstr(number)
if isinstance(number,str) or isinstance(number,unicode):
result = u""
number = number.strip()
assert(len(args) == 1)
assert(len(number) > 0)
is_negative = number[0] == "-"
if is_negative:
number = number[1:]
frac_part = u""
if number.find(".") >= 0:
rat_part,frac_part = number.split(".")
frac_part = num2tamilstr_american(u"0."+frac_part)
else:
rat_part = number
if len(rat_part) > 0:
result = num2tamilstr_american(float(rat_part))
result = result +u" "+ frac_part
return result.strip()
if len(allowed_bases) >= 1:
n_base = allowed_bases[0]
if number == n_base:
return u" ".join(num_map[n_base])
quotient_number = long( number/n_base )
residue_number = number - n_base*quotient_number
if n_base < n_thousand:
raise Exception("This can never happen")
else:
if ( quotient_number == 1 ):
if n_base == n_thousand:
numeral = one_thousand_prefix+u' '
else:
numeral = one_prefix+u' '
else:
numeral = num2tamilstr( quotient_number )
if n_base >= n_thousand:
suffix = suffix_base[n_base][long(residue_number >= 1)]
if residue_number == 0:
return numeral + u' ' + suffix
numeral = numeral + u' ' + suffix
residue_numeral = num2tamilstr_american( residue_number )
return numeral+u' '+residue_numeral
# number has to be zero
return units[0]
|
work till 1000 trillion - 1 - i.e = 1e12*1e3 - 1.
turn number into a numeral, American style. Fractions upto 1e-30.
|
def login(request, template_name='ci/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name, ''))
if request.method == "POST":
form = authentication_form(request, data=request.POST)
if form.is_valid():
# Ensure the user-originating redirection url is safe.
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
# Okay, security check complete. Get the user object from auth api.
user = form.get_user()
request.session['user_token'] = user["token"]
request.session['user_email'] = user["email"]
request.session['user_permissions'] = user["permissions"]
request.session['user_id'] = user["id"]
request.session['user_list'] = user["user_list"]
if not settings.HIDE_DASHBOARDS:
# Set user dashboards because they are slow to change
dashboards = ciApi.get_user_dashboards(user["id"])
dashboard_list = list(dashboards['results'])
if len(dashboard_list) > 0:
request.session['user_dashboards'] = \
dashboard_list[0]["dashboards"]
request.session['user_default_dashboard'] = \
dashboard_list[0]["default_dashboard"]["id"]
else:
request.session['user_dashboards'] = []
request.session['user_default_dashboard'] = None
# Get the user access tokens too and format for easy access
tokens = ciApi.get_user_service_tokens(
params={"user_id": user["id"]})
token_list = list(tokens['results'])
user_tokens = {}
if len(token_list) > 0:
for token in token_list:
user_tokens[token["service"]["name"]] = {
"token": token["token"],
"url": token["service"]["url"] + "/api/v1"
}
request.session['user_tokens'] = user_tokens
return HttpResponseRedirect(redirect_to)
else:
form = authentication_form(request)
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
return TemplateResponse(request, template_name, context)
|
Displays the login form and handles the login action.
|
def collapse(dataframe, groupe, var):
'''
Pour une variable, fonction qui calcule la moyenne pondérée au sein de chaque groupe.
'''
grouped = dataframe.groupby([groupe])
var_weighted_grouped = grouped.apply(lambda x: wavg(groupe = x, var = var))
return var_weighted_grouped
|
Pour une variable, fonction qui calcule la moyenne pondérée au sein de chaque groupe.
|
def __get_rectangle_description(block, pair):
"""!
@brief Create rectangle description for block in specific dimension.
@param[in] pair (tuple): Pair of coordinate index that should be displayed.
@param[in] block (bang_block): BANG-block that should be displayed
@return (tuple) Pair of corners that describes rectangle.
"""
max_corner, min_corner = block.get_spatial_block().get_corners()
max_corner = [max_corner[pair[0]], max_corner[pair[1]]]
min_corner = [min_corner[pair[0]], min_corner[pair[1]]]
if pair == (0, 0):
max_corner[1], min_corner[1] = 1.0, -1.0
return max_corner, min_corner
|
!
@brief Create rectangle description for block in specific dimension.
@param[in] pair (tuple): Pair of coordinate index that should be displayed.
@param[in] block (bang_block): BANG-block that should be displayed
@return (tuple) Pair of corners that describes rectangle.
|
def lookup(self, allowed_types, **kwargs):
"""Lookup an object of type (allowed_types). kwargs is sent
directly to the catalog.
"""
at = getToolByName(self, 'archetype_tool')
for portal_type in allowed_types:
catalog = at.catalog_map.get(portal_type, [None])[0]
catalog = getToolByName(self, catalog)
kwargs['portal_type'] = portal_type
brains = catalog(**kwargs)
if brains:
return brains
|
Lookup an object of type (allowed_types). kwargs is sent
directly to the catalog.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.