code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def stats(self) -> pd.DataFrame:
"""Statistics about flights contained in the structure.
Useful for a meaningful representation.
"""
cumul = []
for f in self:
info = {
"flight_id": f.flight_id,
"callsign": f.callsign,
"origin": f.origin,
"destination": f.destination,
"duration": f.stop - f.start, # type: ignore
}
cumul.append(info)
return (
pd.DataFrame.from_records(cumul)
.set_index("flight_id")
.sort_values("duration", ascending=False)
)
|
Statistics about flights contained in the structure.
Useful for a meaningful representation.
|
def get_suitable_slot_for_reference(self, reference):
"""Returns the suitable position for reference analyses, taking into
account if there is a WorksheetTemplate assigned to this worksheet.
By default, returns a new slot at the end of the worksheet unless there
is a slot defined for a reference of the same type (blank or control)
in the worksheet template's layout that hasn't been used yet.
:param reference: ReferenceSample the analyses will be created from
:return: suitable slot position for reference analyses
"""
if not IReferenceSample.providedBy(reference):
return -1
occupied = self.get_slot_positions(type='all') or [0]
wst = self.getWorksheetTemplate()
if not wst:
# No worksheet template assigned, add a new slot at the end of the
# worksheet with the reference analyses there
slot_to = max(occupied) + 1
return slot_to
# If there is a match with the layout defined in the Worksheet Template,
# use that slot instead of adding a new one at the end of the worksheet
slot_type = reference.getBlank() and 'b' or 'c'
layout = wst.getLayout()
for pos in layout:
if pos['type'] != slot_type:
continue
slot_to = int(pos['pos'])
if slot_to in occupied:
# Not an empty slot
continue
# This slot is empty, use it instead of adding a new slot at the end
# of the worksheet
return slot_to
# Add a new slot at the end of the worksheet, but take into account
# that a worksheet template is assigned, so we need to take care to
# not override slots defined by its layout
occupied.append(len(layout))
slot_to = max(occupied) + 1
return slot_to
|
Returns the suitable position for reference analyses, taking into
account if there is a WorksheetTemplate assigned to this worksheet.
By default, returns a new slot at the end of the worksheet unless there
is a slot defined for a reference of the same type (blank or control)
in the worksheet template's layout that hasn't been used yet.
:param reference: ReferenceSample the analyses will be created from
:return: suitable slot position for reference analyses
|
def deliver_tx(self, raw_transaction):
"""Validate the transaction before mutating the state.
Args:
raw_tx: a raw string (in bytes) transaction.
"""
self.abort_if_abci_chain_is_not_synced()
logger.debug('deliver_tx: %s', raw_transaction)
transaction = self.bigchaindb.is_valid_transaction(
decode_transaction(raw_transaction), self.block_transactions)
if not transaction:
logger.debug('deliver_tx: INVALID')
return ResponseDeliverTx(code=CodeTypeError)
else:
logger.debug('storing tx')
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return ResponseDeliverTx(code=CodeTypeOk)
|
Validate the transaction before mutating the state.
Args:
raw_tx: a raw string (in bytes) transaction.
|
def computational_form(data):
"""
Input Series of numbers, Series, or DataFrames repackaged
for calculation.
Parameters
----------
data : pandas.Series
Series of numbers, Series, DataFrames
Returns
-------
pandas.Series, DataFrame, or Panel
repacked data, aligned by indices, ready for calculation
"""
if isinstance(data.iloc[0], DataFrame):
dslice = Panel.from_dict(dict([(i,data.iloc[i])
for i in xrange(len(data))]))
elif isinstance(data.iloc[0], Series):
dslice = DataFrame(data.tolist())
dslice.index = data.index
else:
dslice = data
return dslice
|
Input Series of numbers, Series, or DataFrames repackaged
for calculation.
Parameters
----------
data : pandas.Series
Series of numbers, Series, DataFrames
Returns
-------
pandas.Series, DataFrame, or Panel
repacked data, aligned by indices, ready for calculation
|
def _convolve3_old(data, h, dev=None):
"""convolves 3d data with kernel h on the GPU Device dev
boundary conditions are clamping to edge.
h is converted to float32
if dev == None the default one is used
"""
if dev is None:
dev = get_device()
if dev is None:
raise ValueError("no OpenCLDevice found...")
dtype = data.dtype.type
dtypes_options = {np.float32: "",
np.uint16: "-D SHORTTYPE"}
if not dtype in dtypes_options:
raise TypeError("data type %s not supported yet, please convert to:" % dtype, list(dtypes_options.keys()))
prog = OCLProgram(abspath("kernels/convolve3.cl"),
build_options=dtypes_options[dtype])
hbuf = OCLArray.from_array(h.astype(np.float32))
img = OCLImage.from_array(data)
res = OCLArray.empty(data.shape, dtype=np.float32)
Ns = [np.int32(n) for n in data.shape + h.shape]
prog.run_kernel("convolve3d", img.shape, None,
img, hbuf.data, res.data,
*Ns)
return res.get()
|
convolves 3d data with kernel h on the GPU Device dev
boundary conditions are clamping to edge.
h is converted to float32
if dev == None the default one is used
|
def options(self):
"""
Reads all EMAIL_ options and set default values.
"""
config = self._config
o = {}
o.update(self._default_smtp_options)
o.update(self._default_message_options)
o.update(self._default_backend_options)
o.update(get_namespace(config, 'EMAIL_', valid_keys=o.keys()))
o['port'] = int(o['port'])
o['timeout'] = float(o['timeout'])
return o
|
Reads all EMAIL_ options and set default values.
|
def update(ctx, no_restart, no_rebuild):
"""Update a HFOS node"""
# 0. (NOT YET! MAKE A BACKUP OF EVERYTHING)
# 1. update repository
# 2. update frontend repository
# 3. (Not yet: update venv)
# 4. rebuild frontend
# 5. restart service
instance = ctx.obj['instance']
log('Pulling github updates')
run_process('.', ['git', 'pull', 'origin', 'master'])
run_process('./frontend', ['git', 'pull', 'origin', 'master'])
if not no_rebuild:
log('Rebuilding frontend')
install_frontend(instance, forcerebuild=True, install=False, development=True)
if not no_restart:
log('Restaring service')
if instance != 'hfos':
instance = 'hfos-' + instance
run_process('.', ['sudo', 'systemctl', 'restart', instance])
log('Done')
|
Update a HFOS node
|
def token_is_valid(self,):
"""Check the validity of the token :3600s
"""
elapsed_time = time.time() - self.token_time
logger.debug("ELAPSED TIME : {0}".format(elapsed_time))
if elapsed_time > 3540: # 1 minute before it expires
logger.debug("TOKEN HAS EXPIRED")
return False
logger.debug("TOKEN IS STILL VALID")
return True
|
Check the validity of the token :3600s
|
def _check_len(self, pkt):
"""Check for odd packet length and pad according to Cisco spec.
This padding is only used for checksum computation. The original
packet should not be altered."""
if len(pkt) % 2:
last_chr = pkt[-1]
if last_chr <= b'\x80':
return pkt[:-1] + b'\x00' + last_chr
else:
return pkt[:-1] + b'\xff' + chb(orb(last_chr) - 1)
else:
return pkt
|
Check for odd packet length and pad according to Cisco spec.
This padding is only used for checksum computation. The original
packet should not be altered.
|
def bsrchi(value, ndim, array):
"""
Do a binary search for a key value within an integer array,
assumed to be in increasing order. Return the index of the
matching array entry, or -1 if the key value is not found.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bsrchi_c.html
:param value: Value to find in array.
:type value: int
:param ndim: Dimension of array.
:type ndim: int
:param array: Array to be searched.
:type array: Array of ints
:return: index
:rtype: int
"""
value = ctypes.c_int(value)
ndim = ctypes.c_int(ndim)
array = stypes.toIntVector(array)
return libspice.bsrchi_c(value, ndim, array)
|
Do a binary search for a key value within an integer array,
assumed to be in increasing order. Return the index of the
matching array entry, or -1 if the key value is not found.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bsrchi_c.html
:param value: Value to find in array.
:type value: int
:param ndim: Dimension of array.
:type ndim: int
:param array: Array to be searched.
:type array: Array of ints
:return: index
:rtype: int
|
def load_config(self, filepath=None):
"""
checks if the file is a valid config file
Args:
filepath:
"""
# load config or default if invalid
def load_settings(filepath):
"""
loads a old_gui settings file (a json dictionary)
- path_to_file: path to file that contains the dictionary
Returns:
- instruments: depth 1 dictionary where keys are instrument names and values are instances of instruments
- scripts: depth 1 dictionary where keys are script names and values are instances of scripts
- probes: depth 1 dictionary where to be decided....?
"""
instruments_loaded = {}
probes_loaded = {}
scripts_loaded = {}
if filepath and os.path.isfile(filepath):
in_data = load_b26_file(filepath)
instruments = in_data['instruments'] if 'instruments' in in_data else {}
scripts = in_data['scripts'] if 'scripts' in in_data else {}
probes = in_data['probes'] if 'probes' in in_data else {}
try:
instruments_loaded, failed = Instrument.load_and_append(instruments)
if len(failed) > 0:
print(('WARNING! Following instruments could not be loaded: ', failed))
scripts_loaded, failed, instruments_loaded = Script.load_and_append(
script_dict=scripts,
instruments=instruments_loaded,
log_function=self.log,
data_path=self.gui_settings['data_folder'])
if len(failed) > 0:
print(('WARNING! Following scripts could not be loaded: ', failed))
probes_loaded, failed, instruments_loadeds = Probe.load_and_append(
probe_dict=probes,
probes=probes_loaded,
instruments=instruments_loaded)
self.log('Successfully loaded from previous save.')
except ImportError:
self.log('Could not load instruments or scripts from file.')
self.log('Opening with blank GUI.')
return instruments_loaded, scripts_loaded, probes_loaded
config = None
try:
config = load_b26_file(filepath)
config_settings = config['gui_settings']
if config_settings['gui_settings'] != filepath:
print((
'WARNING path to settings file ({:s}) in config file is different from path of settings file ({:s})'.format(
config_settings['gui_settings'], filepath)))
config_settings['gui_settings'] = filepath
except Exception as e:
if filepath:
self.log('The filepath was invalid --- could not load settings. Loading blank GUI.')
config_settings = self._DEFAULT_CONFIG
for x in self._DEFAULT_CONFIG.keys():
if x in config_settings:
if not os.path.exists(config_settings[x]):
try:
os.makedirs(config_settings[x])
except Exception:
config_settings[x] = self._DEFAULT_CONFIG[x]
os.makedirs(config_settings[x])
print(('WARNING: failed validating or creating path: set to default path'.format(config_settings[x])))
else:
config_settings[x] = self._DEFAULT_CONFIG[x]
os.makedirs(config_settings[x])
print(('WARNING: path {:s} not specified set to default {:s}'.format(x, config_settings[x])))
# check if file_name is a valid filename
if filepath is not None and os.path.exists(os.path.dirname(filepath)):
config_settings['gui_settings'] = filepath
self.gui_settings = config_settings
if(config):
self.gui_settings_hidden = config['gui_settings_hidden']
else:
self.gui_settings_hidden['script_source_folder'] = ''
self.instruments, self.scripts, self.probes = load_settings(filepath)
self.refresh_tree(self.tree_gui_settings, self.gui_settings)
self.refresh_tree(self.tree_scripts, self.scripts)
self.refresh_tree(self.tree_settings, self.instruments)
self._hide_parameters(filepath)
|
checks if the file is a valid config file
Args:
filepath:
|
def exclude(self, target, operation, role, value):
"""Exclude a `role` of `value` at `target`
Arguments:
target (str): Destination proxy model
operation (str): "add" or "remove" exclusion
role (str): Role to exclude
value (str): Value of `role` to exclude
"""
target = {"result": self.data["proxies"]["result"],
"instance": self.data["proxies"]["instance"],
"plugin": self.data["proxies"]["plugin"]}[target]
if operation == "add":
target.add_exclusion(role, value)
elif operation == "remove":
target.remove_exclusion(role, value)
else:
raise TypeError("operation must be either `add` or `remove`")
|
Exclude a `role` of `value` at `target`
Arguments:
target (str): Destination proxy model
operation (str): "add" or "remove" exclusion
role (str): Role to exclude
value (str): Value of `role` to exclude
|
def reshape(self, newshape, order='C'):
"""If axis 0 is unaffected by the reshape, then returns a Timeseries,
otherwise returns an ndarray. Preserves labels of axis j only if all
axes<=j are unaffected by the reshape.
See ``numpy.ndarray.reshape()`` for more information
"""
oldshape = self.shape
ar = np.asarray(self).reshape(newshape, order=order)
if (newshape is -1 and len(oldshape) is 1 or
(isinstance(newshape, numbers.Integral) and
newshape == oldshape[0]) or
(isinstance(newshape, Sequence) and
(newshape[0] == oldshape[0] or
(newshape[0] is -1 and np.array(oldshape[1:]).prod() ==
np.array(newshape[1:]).prod())))):
# then axis 0 is unaffected by the reshape
newlabels = [None] * ar.ndim
i = 1
while i < ar.ndim and i < self.ndim and ar.shape[i] == oldshape[i]:
newlabels[i] = self.labels[i]
i += 1
return Timeseries(ar, self.tspan, newlabels)
else:
return ar
|
If axis 0 is unaffected by the reshape, then returns a Timeseries,
otherwise returns an ndarray. Preserves labels of axis j only if all
axes<=j are unaffected by the reshape.
See ``numpy.ndarray.reshape()`` for more information
|
def update_variables(X, Z, U, prox_f, step_f, prox_g, step_g, L):
"""Update the primal and dual variables
Note: X, Z, U are updated inline
Returns: LX, R, S
"""
if not hasattr(prox_g, '__iter__'):
if prox_g is not None:
dX = step_f/step_g * L.T.dot(L.dot(X) - Z + U)
X[:] = prox_f(X - dX, step_f)
LX, R, S = do_the_mm(X, step_f, Z, U, prox_g, step_g, L)
else:
# fall back to simple fixed-point method for f
# see do_the_mm for normal definitions of LX,Z,R,S
S = -X.copy()
X[:] = prox_f(X, step_f)
LX = X
Z[:] = X[:]
R = np.zeros(X.shape, dtype=X.dtype)
S += X
else:
M = len(prox_g)
dX = np.sum([step_f/step_g[i] * L[i].T.dot(L[i].dot(X) - Z[i] + U[i]) for i in range(M)], axis=0)
X[:] = prox_f(X - dX, step_f)
LX = [None] * M
R = [None] * M
S = [None] * M
for i in range(M):
LX[i], R[i], S[i] = do_the_mm(X, step_f, Z[i], U[i], prox_g[i], step_g[i], L[i])
return LX, R, S
|
Update the primal and dual variables
Note: X, Z, U are updated inline
Returns: LX, R, S
|
def getclientloansurl(idclient, *args, **kwargs):
"""Request Client loans URL.
How to use it? By default MambuLoan uses getloansurl as the urlfunc.
Override that behaviour by sending getclientloansurl (this function)
as the urlfunc to the constructor of MambuLoans (note the final 's')
and voila! you get the Loans just for a certain client.
If idclient is set, you'll get a response adequate for a
MambuLoans object.
If not set, you'll get a Jar Jar Binks object, or something quite
strange and useless as JarJar. A MambuError must likely since I
haven't needed it for anything but for loans of one and just
one client.
See mambuloan module and pydoc for further information.
Currently implemented filter parameters:
* accountState
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future.
"""
getparams = []
if kwargs:
try:
if kwargs["fullDetails"] == True:
getparams.append("fullDetails=true")
else:
getparams.append("fullDetails=false")
except Exception as ex:
pass
try:
getparams.append("accountState=%s" % kwargs["accountState"])
except Exception as ex:
pass
clientidparam = "/" + idclient
url = getmambuurl(*args,**kwargs) + "clients" + clientidparam + "/loans" + ( "" if len(getparams) == 0 else "?" + "&".join(getparams) )
return url
|
Request Client loans URL.
How to use it? By default MambuLoan uses getloansurl as the urlfunc.
Override that behaviour by sending getclientloansurl (this function)
as the urlfunc to the constructor of MambuLoans (note the final 's')
and voila! you get the Loans just for a certain client.
If idclient is set, you'll get a response adequate for a
MambuLoans object.
If not set, you'll get a Jar Jar Binks object, or something quite
strange and useless as JarJar. A MambuError must likely since I
haven't needed it for anything but for loans of one and just
one client.
See mambuloan module and pydoc for further information.
Currently implemented filter parameters:
* accountState
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future.
|
def input(msg="", default="", title="Lackey Input", hidden=False):
""" Creates an input dialog with the specified message and default text.
If `hidden`, creates a password dialog instead. Returns the entered value. """
root = tk.Tk()
input_text = tk.StringVar()
input_text.set(default)
PopupInput(root, msg, title, hidden, input_text)
root.focus_force()
root.mainloop()
return str(input_text.get())
|
Creates an input dialog with the specified message and default text.
If `hidden`, creates a password dialog instead. Returns the entered value.
|
def upload(self, timeout=None):
"""
Call ``upload()`` after :py:func:`adding <CommandSequence.add>` or :py:func:`clearing <CommandSequence.clear>` mission commands.
After the return from ``upload()`` any writes are guaranteed to have completed (or thrown an
exception) and future reads will see their effects.
:param int timeout: The timeout for uploading the mission. No timeout if not provided or set to None.
"""
if self._vehicle._wpts_dirty:
self._vehicle._master.waypoint_clear_all_send()
start_time = time.time()
if self._vehicle._wploader.count() > 0:
self._vehicle._wp_uploaded = [False] * self._vehicle._wploader.count()
self._vehicle._master.waypoint_count_send(self._vehicle._wploader.count())
while False in self._vehicle._wp_uploaded:
if timeout and time.time() - start_time > timeout:
raise TimeoutError
time.sleep(0.1)
self._vehicle._wp_uploaded = None
self._vehicle._wpts_dirty = False
|
Call ``upload()`` after :py:func:`adding <CommandSequence.add>` or :py:func:`clearing <CommandSequence.clear>` mission commands.
After the return from ``upload()`` any writes are guaranteed to have completed (or thrown an
exception) and future reads will see their effects.
:param int timeout: The timeout for uploading the mission. No timeout if not provided or set to None.
|
def _add_rule(self, state, rule):
"""Parse rule and add it to machine (for internal use)."""
if rule.strip() == "-":
parsed_rule = None
else:
parsed_rule = rule.split(',')
if (len(parsed_rule) != 3 or
parsed_rule[1] not in ['L', 'N', 'R'] or
len(parsed_rule[2]) > 1):
raise SyntaxError('Wrong format of rule: ' + rule)
if parsed_rule[0] == "":
parsed_rule[0] = self.alphabet[len(self.states[state])]
if parsed_rule[2] == "":
parsed_rule[2] = state
self.states[state].append(parsed_rule)
|
Parse rule and add it to machine (for internal use).
|
def clear(self):
"""Clears the internal state of the bot.
After this, the bot can be considered "re-opened", i.e. :meth:`.is_closed`
and :meth:`.is_ready` both return ``False`` along with the bot's internal
cache cleared.
"""
self._closed = False
self._ready.clear()
self._connection.clear()
self.http.recreate()
|
Clears the internal state of the bot.
After this, the bot can be considered "re-opened", i.e. :meth:`.is_closed`
and :meth:`.is_ready` both return ``False`` along with the bot's internal
cache cleared.
|
def check(self, metainfo, datapath, progress=None):
""" Check piece hashes of a metafile against the given datapath.
"""
if datapath:
self.datapath = datapath
def check_piece(filename, piece):
"Callback for new piece"
if piece != metainfo["info"]["pieces"][check_piece.piece_index:check_piece.piece_index+20]:
self.LOG.warn("Piece #%d: Hashes differ in file %r" % (check_piece.piece_index//20, filename))
check_piece.piece_index += 20
check_piece.piece_index = 0
datameta, _ = self._make_info(int(metainfo["info"]["piece length"]), progress,
[datapath] if "length" in metainfo["info"] else
(os.path.join(*([datapath] + i["path"])) for i in metainfo["info"]["files"]),
piece_callback=check_piece
)
return datameta["pieces"] == metainfo["info"]["pieces"]
|
Check piece hashes of a metafile against the given datapath.
|
def connect(self):
"Initiate the connection to a proxying hub"
log.info("connecting")
# don't have the connection attempt reconnects, because when it goes
# down we are going to cycle to the next potential peer from the Client
self._peer = connection.Peer(
None, self._dispatcher, self._addrs.popleft(),
backend.Socket(), reconnect=False)
self._peer.start()
|
Initiate the connection to a proxying hub
|
def fill_translation_cache(instance):
"""
Fill the translation cache using information received in the
instance objects as extra fields.
You can not do this in post_init because the extra fields are
assigned by QuerySet.iterator after model initialization.
"""
if hasattr(instance, '_translation_cache'):
# do not refill the cache
return
instance._translation_cache = {}
# unsafed instances cannot have translations
if not instance.pk:
return
for language_code in get_language_code_list():
# see if translation for language_code was in the query
field_alias = get_translated_field_alias('code', language_code)
if getattr(instance, field_alias, None) is not None:
field_names = [f.attname for f in instance._meta.translation_model._meta.fields]
# if so, create a translation object and put it in the cache
field_data = {}
for fname in field_names:
field_data[fname] = getattr(instance,
get_translated_field_alias(fname, language_code))
translation = instance._meta.translation_model(**field_data)
instance._translation_cache[language_code] = translation
# In some situations an (existing in the DB) object is loaded
# without using the normal QuerySet. In such case fallback to
# loading the translations using a separate query.
# Unfortunately, this is indistinguishable from the situation when
# an object does not have any translations. Oh well, we'll have
# to live with this for the time being.
if len(instance._translation_cache.keys()) == 0:
for translation in instance.translations.all():
instance._translation_cache[translation.language_code] = translation
|
Fill the translation cache using information received in the
instance objects as extra fields.
You can not do this in post_init because the extra fields are
assigned by QuerySet.iterator after model initialization.
|
def append_hdus(hdulist, srcmap_file, source_names, hpx_order):
"""Append HEALPix maps to a list
Parameters
----------
hdulist : list
The list being appended to
srcmap_file : str
Path to the file containing the HDUs
source_names : list of str
Names of the sources to extract from srcmap_file
hpx_order : int
Maximum order for maps
"""
sys.stdout.write(" Extracting %i sources from %s" % (len(source_names), srcmap_file))
try:
hdulist_in = fits.open(srcmap_file)
except IOError:
try:
hdulist_in = fits.open('%s.gz' % srcmap_file)
except IOError:
sys.stdout.write(" Missing file %s\n" % srcmap_file)
return
for source_name in source_names:
sys.stdout.write('.')
sys.stdout.flush()
if hpx_order is None:
hdulist.append(hdulist_in[source_name])
else:
try:
hpxmap = HpxMap.create_from_hdulist(hdulist_in, hdu=source_name)
except IndexError:
print(" Index error on source %s in file %s" % (source_name, srcmap_file))
continue
except KeyError:
print(" Key error on source %s in file %s" % (source_name, srcmap_file))
continue
hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True)
hdulist.append(hpxmap_out.create_image_hdu(name=source_name))
sys.stdout.write("\n")
hdulist.flush()
hdulist_in.close()
|
Append HEALPix maps to a list
Parameters
----------
hdulist : list
The list being appended to
srcmap_file : str
Path to the file containing the HDUs
source_names : list of str
Names of the sources to extract from srcmap_file
hpx_order : int
Maximum order for maps
|
def _download_helper(url):
"""
Handle the download of an URL, using the proxy currently set in \
:mod:`socks`.
:param url: The URL to download.
:returns: A tuple of the raw content of the downloaded data and its \
associated content-type. Returns None if it was \
unable to download the document.
"""
# Try to fetch the URL using the current proxy
try:
request = urllib.request.urlopen(url)
try:
size = int(dict(request.info())['content-length'].strip())
except KeyError:
try:
size = int(dict(request.info())['Content-Length'].strip())
except KeyError:
size = 0
# Download the document
doc = b""
doc_size = 0
while True:
buf = request.read(1024)
if buf:
doc += buf
doc_size += len(buf)
if size != 0:
# Write progress bar on stdout
done = int(50 * doc_size / size)
sys.stdout.write("\r[%s%s]" %
('='*done, ' '*(50-done)))
sys.stdout.write(" "+str(int(float(done)/52*100))+"%")
sys.stdout.flush()
else:
break
# Fetch content type
contenttype = None
contenttype_req = None
try:
contenttype_req = dict(request.info())['content-type']
except KeyError:
try:
contenttype_req = dict(request.info())['Content-Type']
except KeyError:
return None
if 'pdf' in contenttype_req:
contenttype = 'pdf'
elif 'djvu' in contenttype_req:
contenttype = 'djvu'
# Check content type and status code are ok
if request.getcode() != 200 or contenttype is None:
# Else, try with the next available proxy
return None
# Return a tuple of the downloaded content and the content-type
return (doc, contenttype)
# If an exception occurred, continue with next available proxy
except (urllib.error.URLError, socket.error, ValueError):
return None
|
Handle the download of an URL, using the proxy currently set in \
:mod:`socks`.
:param url: The URL to download.
:returns: A tuple of the raw content of the downloaded data and its \
associated content-type. Returns None if it was \
unable to download the document.
|
def predict_proba(self, dataframe):
"""Predict probabilities using the model
:param dataframe: Dataframe against which to make predictions
"""
ret = numpy.ones((dataframe.shape[0], 2))
ret[:, 0] = (1 - self.mean)
ret[:, 1] = self.mean
return ret
|
Predict probabilities using the model
:param dataframe: Dataframe against which to make predictions
|
def load_data(flist, drop_duplicates=False):
'''
Usage: set train, target, and test key and feature files.
FEATURE_LIST_stage2 = {
'train':(
TEMP_PATH + 'v1_stage1_all_fold.csv',
TEMP_PATH + 'v2_stage1_all_fold.csv',
TEMP_PATH + 'v3_stage1_all_fold.csv',
),#target is not in 'train'
'target':(
INPUT_PATH + 'target.csv',
),#target is in 'target'
'test':(
TEMP_PATH + 'v1_stage1_test.csv',
TEMP_PATH + 'v2_stage1_test.csv',
TEMP_PATH + 'v3_stage1_test.csv',
),
}
'''
if (len(flist['train'])==0) or (len(flist['target'])==0) or (len(flist['test'])==0):
raise Exception('train, target, and test must be set at \
least one file, respectively.')
X_train = pd.DataFrame()
test = pd.DataFrame()
print 'Reading train dataset'
for i in flist['train']:
X_train = pd.concat([X_train, paratext.load_csv_to_pandas(PATH+i, allow_quoted_newlines=True)],axis=1)
print 'train dataset is created'
print 'Reading target data'
y_train = paratext.load_csv_to_pandas(PATH+flist['target'][0], allow_quoted_newlines=True)['target']
print 'Reading train dataset'
for i in flist['test']:
test = pd.concat([test, paratext.load_csv_to_pandas(PATH+i, allow_quoted_newlines=True)],axis=1)
#del test['t_id']
#print X_train.columns
#print test.columns
assert( (False in X_train.columns == test.columns) == False)
print 'train shape :{}'.format(X_train.shape)
if drop_duplicates == True:
#delete identical columns
unique_col = X_train.T.drop_duplicates().T.columns
X_train = X_train[unique_col]
test = test[unique_col]
assert( all(X_train.columns == test.columns))
print 'train shape after concat and drop_duplicates :{}'.format(X_train.shape)
# drop constant features
#X_train = X_train.loc[:, (X_train != X_train.ix[0]).any()]
#test = test.loc[:, (test != test.ix[0]).any()]
#common_col = list(set(X_train.columns.tolist()) and set(test.columns.tolist()))
#X_train = X_train[common_col]
#test = test[common_col]
#print 'shape after dropping constant features: {}'.format(X_train.shape)
return X_train, y_train, test
|
Usage: set train, target, and test key and feature files.
FEATURE_LIST_stage2 = {
'train':(
TEMP_PATH + 'v1_stage1_all_fold.csv',
TEMP_PATH + 'v2_stage1_all_fold.csv',
TEMP_PATH + 'v3_stage1_all_fold.csv',
),#target is not in 'train'
'target':(
INPUT_PATH + 'target.csv',
),#target is in 'target'
'test':(
TEMP_PATH + 'v1_stage1_test.csv',
TEMP_PATH + 'v2_stage1_test.csv',
TEMP_PATH + 'v3_stage1_test.csv',
),
}
|
def set_cache_buster(self, path, hash):
"""Sets the cache buster value for a given file path"""
oz.aws_cdn.set_cache_buster(self.redis(), path, hash)
|
Sets the cache buster value for a given file path
|
def register(self, request, **cleaned_data):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
Two emails will be sent. First one to the admin; this email should
contain an activation link and a resume of the new user infos.
Second one, to the user, for inform him that his request is pending.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
create_user = RegistrationProfile.objects.create_inactive_user
new_user = create_user(
cleaned_data['username'],
cleaned_data['email'],
cleaned_data['password1'],
site,
send_email=False
)
new_user.first_name = cleaned_data['first_name']
new_user.last_name = cleaned_data['last_name']
new_user.save()
user_info = UserInfo(
user=new_user,
company=cleaned_data['company'],
function=cleaned_data['function'],
address=cleaned_data['address'],
postal_code=cleaned_data['postal_code'],
city=cleaned_data['city'],
country=cleaned_data['country'],
phone=cleaned_data['phone'],
)
user_info.save()
send_activation_email(new_user, site, user_info)
send_activation_pending_email(new_user, site, user_info)
signals.user_registered.send(sender=self.__class__, user=new_user, request=request)
return new_user
|
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
Two emails will be sent. First one to the admin; this email should
contain an activation link and a resume of the new user infos.
Second one, to the user, for inform him that his request is pending.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
|
def compute_hardwired_weights(rho,N_E,N_I,periodic, onlyI=False):
'''
%This function returns the synaptic weight matrices
%(G_I_EL,G_I_ER,G_EL_I,G_ER_I,G_I_I) and the suppressive envelope
%(A_env), based on:
%
% - the scale of the synaptic profiles (rho)
% - the size of the exctitatory and inhibitory pops (N_E, N_I)
% - the boundary conditions of the network (periodic=1 for periodic b.c.s; periodic = 0 for aperiodic b.c.s)
%The parameters below are arranged according to the following order of
%synaptic weights: EL->I, ER->I, I->EL, I->ER, I->I
% (see Supplementary Methods of PNAS paper for description of params below)
It was ported directly from code provided by Ila Fiete.
'''
weight_sizes = np.asarray([[N_I,N_E], [N_I,N_E], [N_E,N_I], [N_E,N_I], [N_I,N_I]])
gamma_param = np.asarray([N_I/N_E, N_I/N_E, N_E/N_I, N_E/N_I, N_I/N_I])
eta_param = np.asarray([1.5*21, 1.5*21, 8, 8, 24]) #controls overall strength of weights
epsilon_param = np.asarray([0, 0, 0, 0, 1]) #controls contribution of negative gaussian in diff. of Gaussians weights
sigma_param = np.asarray([2, 2, 5, 5, 3]) #controls width of weight profiles
Delta_param = np.asarray([-2, 2, 8, -8, 3]) #controls weight asymmetries
mu_param = np.asarray([0, 0, -1, 1, 0]) #controls weight asymmetries
delta_param = np.asarray([0, 0, 3, 3, 3]) #controls weight asymmetries
#the for-loop below iterates through the 5 synaptic weight types
for k in [4,3,2,1,0]:
#N_2 = size of projecting pop; N_1 = size of receiving pop.
N_1 = weight_sizes[k][0]
N_2 = weight_sizes[k][1]
#create envelopes based on pop. sizes
A_1 = create_envelope(periodic,N_1)[0]
A_2 = create_envelope(periodic,N_2)[0]
#Create synaptic weight matrix
G = np.zeros((N_1, N_2))
for i in range(N_1):
for j in range(N_2):
x = i - gamma_param[k]*j
c_left = min(N_1 - np.abs(np.mod(x - Delta_param[k], N_1)), np.abs(np.mod(x - Delta_param[k], N_1)))
c_right = min(N_1 - np.abs(np.mod(x + Delta_param[k], N_1)), np.abs(np.mod(x + Delta_param[k], N_1)))
c_0 = min(N_1 - np.abs(np.mod(x, N_1)), np.abs(np.mod(x, N_1)))
G[i, j] = eta_param[k]/rho*A_1[i]*A_2[j]*((c_0-delta_param[k]*rho) >= 0)*(((-mu_param[k]*x) >= 0)*((mu_param[k]*(x+mu_param[k]*N_1/2)) >= 0) +
((mu_param[k]*(x-mu_param[k]*N_1/2)) >= 0))*(np.exp(-c_left**2/(2*(sigma_param[k]*rho)**2)) +
epsilon_param[k]*np.exp(-c_right**2/(2*(sigma_param[k]*rho)**2)))
if k==0:
G_I_EL = G
elif k==1:
G_I_ER = G
elif k==2:
G_EL_I = G
elif k==3:
G_ER_I = G
else:
G_I_I = G
if onlyI:
return G_I_I, G_I_I, G_I_I, G_I_I, G_I_I
return G_I_EL, G_I_ER, G_EL_I, G_ER_I, G_I_I
|
%This function returns the synaptic weight matrices
%(G_I_EL,G_I_ER,G_EL_I,G_ER_I,G_I_I) and the suppressive envelope
%(A_env), based on:
%
% - the scale of the synaptic profiles (rho)
% - the size of the exctitatory and inhibitory pops (N_E, N_I)
% - the boundary conditions of the network (periodic=1 for periodic b.c.s; periodic = 0 for aperiodic b.c.s)
%The parameters below are arranged according to the following order of
%synaptic weights: EL->I, ER->I, I->EL, I->ER, I->I
% (see Supplementary Methods of PNAS paper for description of params below)
It was ported directly from code provided by Ila Fiete.
|
def compare_names(first, second):
"""
Compare two names in complicated, but more error prone way.
Algorithm is using vector comparison.
Example:
>>> compare_names("Franta Putšálek", "ing. Franta Putšálek")
100.0
>>> compare_names("F. Putšálek", "ing. Franta Putšálek")
50.0
Args:
first (str): Fisst name as string.
second (str): Second name as string.
Returns:
float: Percentage of the similarity.
"""
first = name_to_vector(first)
second = name_to_vector(second)
zipped = zip(first, second)
if not zipped:
return 0
similarity_factor = 0
for fitem, _ in zipped:
if fitem in second:
similarity_factor += 1
return (float(similarity_factor) / len(zipped)) * 100
|
Compare two names in complicated, but more error prone way.
Algorithm is using vector comparison.
Example:
>>> compare_names("Franta Putšálek", "ing. Franta Putšálek")
100.0
>>> compare_names("F. Putšálek", "ing. Franta Putšálek")
50.0
Args:
first (str): Fisst name as string.
second (str): Second name as string.
Returns:
float: Percentage of the similarity.
|
def transform(self, X=None, y=None):
"""
Transform an image using an Affine transform with
rotation parameters randomly generated from the user-specified
range. Return the transform if X=None.
Arguments
---------
X : ANTsImage
Image to transform
y : ANTsImage (optional)
Another image to transform
Returns
-------
ANTsImage if y is None, else a tuple of ANTsImage types
Examples
--------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> tx = ants.contrib.RandomRotate2D(rotation_range=(-10,10))
>>> img2 = tx.transform(img)
"""
# random draw in rotation range
rotation = random.gauss(self.rotation_range[0], self.rotation_range[1])
self.params = rotation
tx = Rotate2D(rotation,
reference=self.reference,
lazy=self.lazy)
return tx.transform(X,y)
|
Transform an image using an Affine transform with
rotation parameters randomly generated from the user-specified
range. Return the transform if X=None.
Arguments
---------
X : ANTsImage
Image to transform
y : ANTsImage (optional)
Another image to transform
Returns
-------
ANTsImage if y is None, else a tuple of ANTsImage types
Examples
--------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> tx = ants.contrib.RandomRotate2D(rotation_range=(-10,10))
>>> img2 = tx.transform(img)
|
def setup(app):
"""Register domain and directive in Sphinx."""
app.add_domain(EverettDomain)
app.add_directive('autocomponent', AutoComponentDirective)
return {
'version': __version__,
'parallel_read_safe': True,
'parallel_write_safe': True
}
|
Register domain and directive in Sphinx.
|
def fcoe_fcoe_map_fcoe_map_fabric_map_fcoe_map_fabric_map_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe = ET.SubElement(config, "fcoe", xmlns="urn:brocade.com:mgmt:brocade-fcoe")
fcoe_map = ET.SubElement(fcoe, "fcoe-map")
fcoe_map_name_key = ET.SubElement(fcoe_map, "fcoe-map-name")
fcoe_map_name_key.text = kwargs.pop('fcoe_map_name')
fcoe_map_fabric_map = ET.SubElement(fcoe_map, "fcoe-map-fabric-map")
fcoe_map_fabric_map_name = ET.SubElement(fcoe_map_fabric_map, "fcoe-map-fabric-map-name")
fcoe_map_fabric_map_name.text = kwargs.pop('fcoe_map_fabric_map_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def host_resolution_order(ifo, env='NDSSERVER', epoch='now',
lookback=14*86400):
"""Generate a logical ordering of NDS (host, port) tuples for this IFO
Parameters
----------
ifo : `str`
prefix for IFO of interest
env : `str`, optional
environment variable name to use for server order,
default ``'NDSSERVER'``. The contents of this variable should
be a comma-separated list of `host:port` strings, e.g.
``'nds1.server.com:80,nds2.server.com:80'``
epoch : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS epoch of data requested
lookback : `float`
duration of spinning-disk cache. This value triggers defaulting to
the CIT NDS2 server over those at the LIGO sites
Returns
-------
hro : `list` of `2-tuples <tuple>`
ordered `list` of ``(host, port)`` tuples
"""
hosts = []
# if given environment variable exists, it will contain a
# comma-separated list of host:port strings giving the logical ordering
if env and os.getenv(env):
hosts = parse_nds_env(env)
# If that host fails, return the server for this IFO and the backup at CIT
if to_gps('now') - to_gps(epoch) > lookback:
ifolist = [None, ifo]
else:
ifolist = [ifo, None]
for difo in ifolist:
try:
host, port = DEFAULT_HOSTS[difo]
except KeyError:
# unknown default NDS2 host for detector, if we don't have
# hosts already defined (either by NDSSERVER or similar)
# we should warn the user
if not hosts:
warnings.warn('No default host found for ifo %r' % ifo)
else:
if (host, port) not in hosts:
hosts.append((host, port))
return list(hosts)
|
Generate a logical ordering of NDS (host, port) tuples for this IFO
Parameters
----------
ifo : `str`
prefix for IFO of interest
env : `str`, optional
environment variable name to use for server order,
default ``'NDSSERVER'``. The contents of this variable should
be a comma-separated list of `host:port` strings, e.g.
``'nds1.server.com:80,nds2.server.com:80'``
epoch : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS epoch of data requested
lookback : `float`
duration of spinning-disk cache. This value triggers defaulting to
the CIT NDS2 server over those at the LIGO sites
Returns
-------
hro : `list` of `2-tuples <tuple>`
ordered `list` of ``(host, port)`` tuples
|
def to_string(xml, **kwargs):
"""
Serialize an element to an encoded string representation of its XML tree.
:param xml: The root node
:type xml: str|bytes|xml.dom.minidom.Document|etree.Element
:returns: string representation of xml
:rtype: string
"""
if isinstance(xml, OneLogin_Saml2_XML._text_class):
return xml
if isinstance(xml, OneLogin_Saml2_XML._element_class):
OneLogin_Saml2_XML.cleanup_namespaces(xml)
return OneLogin_Saml2_XML._unparse_etree(xml, **kwargs)
raise ValueError("unsupported type %r" % type(xml))
|
Serialize an element to an encoded string representation of its XML tree.
:param xml: The root node
:type xml: str|bytes|xml.dom.minidom.Document|etree.Element
:returns: string representation of xml
:rtype: string
|
def interval_timer(interval, func, *args, **kwargs):
'''Interval timer function.
Taken from: http://stackoverflow.com/questions/22498038/improvement-on-interval-python/22498708
'''
stopped = Event()
def loop():
while not stopped.wait(interval): # the first call is after interval
func(*args, **kwargs)
Thread(name='IntervalTimerThread', target=loop).start()
return stopped.set
|
Interval timer function.
Taken from: http://stackoverflow.com/questions/22498038/improvement-on-interval-python/22498708
|
def _pack_with_tf_ops(dataset, keys, length):
"""Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Uses tf.while_loop. Slow.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings
length: an integer
Returns:
a dataset.
"""
empty_example = {}
for k in keys:
empty_example[k] = tf.zeros([0], dtype=tf.int32)
empty_example[k + "_position"] = tf.zeros([0], dtype=tf.int32)
keys_etc = empty_example.keys()
def write_packed_example(partial, outputs):
new_partial = empty_example.copy()
new_outputs = {}
for k in keys_etc:
new_outputs[k] = outputs[k].write(
outputs[k].size(),
tf.pad(partial[k], [[0, length - tf.size(partial[k])]]))
return new_partial, new_outputs
def map_fn(x):
"""Internal function to flat_map over.
Consumes a batch of input examples and produces a variable number of output
examples.
Args:
x: a single example
Returns:
a tf.data.Dataset
"""
partial = empty_example.copy()
i = tf.zeros([], dtype=tf.int32)
dynamic_batch_size = tf.shape(x[keys[0]])[0]
outputs = {}
for k in keys:
outputs[k] = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, element_shape=[length])
outputs[k + "_position"] = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, element_shape=[length])
def cond_fn(i, partial, outputs):
del partial, outputs
return i < dynamic_batch_size
def body_fn(i, partial, outputs):
"""Body function for while_loop.
Args:
i: integer scalar
partial: dictionary of Tensor (partially-constructed example)
outputs: dictionary of TensorArray
Returns:
A triple containing the new values of the inputs.
"""
can_append = True
one_example = {}
for k in keys:
val = tf.cast(x[k][i], tf.int32)
val = val[:tf.reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))]
one_example[k] = val
for k in keys:
can_append = tf.logical_and(
can_append,
tf.less_equal(
tf.size(partial[k]) + tf.size(one_example[k]), length))
def false_fn():
return write_packed_example(partial, outputs)
def true_fn():
return partial, outputs
partial, outputs = tf.cond(can_append, true_fn, false_fn)
new_partial = {}
for k in keys:
new_seq = one_example[k][:length]
new_seq_len = tf.size(new_seq)
new_partial[k] = tf.concat([partial[k], new_seq], 0)
new_partial[k + "_position"] = tf.concat(
[partial[k + "_position"],
tf.range(new_seq_len, dtype=tf.int32)], 0)
partial = new_partial
return i+1, partial, outputs
i, partial, outputs = tf.while_loop(
cond_fn, body_fn, (i, partial, outputs),
back_prop=False,
shape_invariants=(
tf.TensorShape([]),
{k: tf.TensorShape([None]) for k in keys_etc},
{k: tf.TensorShape(None) for k in keys_etc},
))
partial, outputs = write_packed_example(partial, outputs)
packed = {k: outputs[k].stack() for k in keys_etc}
for k in keys:
packed[k + "_segmentation"] = (
tf.cumsum(
tf.cast(tf.equal(packed[k + "_position"], 0), tf.int32), axis=1) *
tf.cast(tf.not_equal(packed[k], 0), tf.int32))
return packed
dataset = dataset.map(map_fn,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset.flat_map(tf.data.Dataset.from_tensor_slices)
|
Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Uses tf.while_loop. Slow.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings
length: an integer
Returns:
a dataset.
|
def unpackcFunc(self):
'''
"Unpacks" the consumption functions into their own field for easier access.
After the model has been solved, the consumption functions reside in the
attribute cFunc of each element of ConsumerType.solution. This method
creates a (time varying) attribute cFunc that contains a list of consumption
functions.
Parameters
----------
none
Returns
-------
none
'''
self.cFunc = []
for solution_t in self.solution:
self.cFunc.append(solution_t.cFunc)
self.addToTimeVary('cFunc')
|
"Unpacks" the consumption functions into their own field for easier access.
After the model has been solved, the consumption functions reside in the
attribute cFunc of each element of ConsumerType.solution. This method
creates a (time varying) attribute cFunc that contains a list of consumption
functions.
Parameters
----------
none
Returns
-------
none
|
def __geo_point(lat, lon, elev):
"""
GeoJSON standard:
Create a geoJson Point-type dictionary
:param list lat:
:param list lon:
:return dict:
"""
logger_noaa_lpd.info("enter geo_point")
coordinates = []
geo_dict = OrderedDict()
geometry_dict = OrderedDict()
for index, point in enumerate(lat):
coordinates.append(lat[index])
coordinates.append(lon[index])
if elev:
coordinates = coordinates + elev
geometry_dict['type'] = 'Point'
geometry_dict['coordinates'] = coordinates
geo_dict['type'] = 'Feature'
geo_dict['geometry'] = geometry_dict
return geo_dict
|
GeoJSON standard:
Create a geoJson Point-type dictionary
:param list lat:
:param list lon:
:return dict:
|
def handle_signal(self, sig, frame):
"""Handles signals, surprisingly."""
if sig in [signal.SIGINT]:
log.warning("Ctrl-C pressed, shutting down...")
if sig in [signal.SIGTERM]:
log.warning("SIGTERM received, shutting down...")
self.cleanup()
sys.exit(-sig)
|
Handles signals, surprisingly.
|
def write_to_file(self, file_path='', date=str(datetime.date.today()),
organization='N/A', members=0, teams=0):
"""
Writes the current organization information to file (csv).
"""
self.checkDir(file_path)
with open(file_path, 'w+') as output:
output.write('date,organization,members,teams,unique_contributors,'
+ 'repository,contributors,forks,stargazers,pull_requests,'
+ 'open_issues,has_readme,has_license,languages,pull_requests_open,'
+ 'pull_requests_closed,commits,closed_issues,issues\n' + date + ','
+ organization + ',' + str(members) + ',' + str(teams) + ','
+ str(len(self.unique_contributors)) + '\n')
for repo in self.all_repos:
output.write(',,,,,' + repo.name + ',' + str(repo.contributors)
+ ',' + str(repo.forks) + ','
+ str(repo.stargazers) + ',' + str(repo.pull_requests) + ','
+ str(repo.open_issues) + ',' + str(repo.readme) + ','
+ str(repo.license) + ',' + ' '.join(sorted(repo.languages))
+ ',' + str(repo.pull_requests_open) + ','
+ str(repo.pull_requests_closed) + ',' + str(repo.commits)
+ ',' + str(repo.closed_issues) + ',' + str(repo.issues)
+ '\n')
output.write(',,,,total,' + str(self.total_repos) + ','
+ str(self.total_contributors) + ','
+ str(self.total_forks) + ',' + str(self.total_stars) + ','
+ str(self.total_pull_reqs) + ',' + str(self.total_open_issues)
+ ',' + str(self.total_readmes) + ',' + str(self.total_licenses)
+ ',,' + str(self.total_pull_reqs_open) + ','
+ str(self.total_pull_reqs_closed) + ','
+ str(self.total_commits) + ',' + str(self.total_closed_issues)
+ ',' + str(self.total_issues))
output.close()
#Update total
self.write_totals(file_path="../github_stats_output/total.csv", date=date,
organization=organization, members=members, teams=teams)
#Update language sizes
self.write_languages(file_path='../github_stats_output/languages.csv',
date=date)
|
Writes the current organization information to file (csv).
|
def always_fails(
self,
work_dict):
"""always_fails
:param work_dict: dictionary for key/values
"""
label = "always_fails"
log.info(("task - {} - start "
"work_dict={}")
.format(label,
work_dict))
raise Exception(
work_dict.get(
"test_failure",
"simulating a failure"))
log.info(("task - {} - done")
.format(label))
return True
|
always_fails
:param work_dict: dictionary for key/values
|
def clone(local_root, new_root, remote, branch, rel_dest, exclude):
"""Clone "local_root" origin into a new directory and check out a specific branch. Optionally run "git rm".
:raise CalledProcessError: Unhandled git command failure.
:raise GitError: Handled git failures.
:param str local_root: Local path to git root directory.
:param str new_root: Local path empty directory in which branch will be cloned into.
:param str remote: The git remote to clone from to.
:param str branch: Checkout this branch.
:param str rel_dest: Run "git rm" on this directory if exclude is truthy.
:param iter exclude: List of strings representing relative file paths to exclude from "git rm".
"""
log = logging.getLogger(__name__)
output = run_command(local_root, ['git', 'remote', '-v'])
remotes = dict()
for match in RE_ALL_REMOTES.findall(output):
remotes.setdefault(match[0], [None, None])
if match[2] == 'fetch':
remotes[match[0]][0] = match[1]
else:
remotes[match[0]][1] = match[1]
if not remotes:
raise GitError('Git repo has no remotes.', output)
if remote not in remotes:
raise GitError('Git repo missing remote "{}".'.format(remote), output)
# Clone.
try:
run_command(new_root, ['git', 'clone', remotes[remote][0], '--depth=1', '--branch', branch, '.'])
except CalledProcessError as exc:
raise GitError('Failed to clone from remote repo URL.', exc.output)
# Make sure user didn't select a tag as their DEST_BRANCH.
try:
run_command(new_root, ['git', 'symbolic-ref', 'HEAD'])
except CalledProcessError as exc:
raise GitError('Specified branch is not a real branch.', exc.output)
# Copy all remotes from original repo.
for name, (fetch, push) in remotes.items():
try:
run_command(new_root, ['git', 'remote', 'set-url' if name == 'origin' else 'add', name, fetch], retry=3)
run_command(new_root, ['git', 'remote', 'set-url', '--push', name, push], retry=3)
except CalledProcessError as exc:
raise GitError('Failed to set git remote URL.', exc.output)
# Done if no exclude.
if not exclude:
return
# Resolve exclude paths.
exclude_joined = [
os.path.relpath(p, new_root) for e in exclude for p in glob.glob(os.path.join(new_root, rel_dest, e))
]
log.debug('Expanded %s to %s', repr(exclude), repr(exclude_joined))
# Do "git rm".
try:
run_command(new_root, ['git', 'rm', '-rf', rel_dest])
except CalledProcessError as exc:
raise GitError('"git rm" failed to remove ' + rel_dest, exc.output)
# Restore files in exclude.
run_command(new_root, ['git', 'reset', 'HEAD'] + exclude_joined)
run_command(new_root, ['git', 'checkout', '--'] + exclude_joined)
|
Clone "local_root" origin into a new directory and check out a specific branch. Optionally run "git rm".
:raise CalledProcessError: Unhandled git command failure.
:raise GitError: Handled git failures.
:param str local_root: Local path to git root directory.
:param str new_root: Local path empty directory in which branch will be cloned into.
:param str remote: The git remote to clone from to.
:param str branch: Checkout this branch.
:param str rel_dest: Run "git rm" on this directory if exclude is truthy.
:param iter exclude: List of strings representing relative file paths to exclude from "git rm".
|
def default():
""" return default options, available options:
- max_name_length: maximum length of name for additionalProperties
- max_prop_count: maximum count of properties (count of fixed properties + additional properties)
- max_str_length: maximum length of string type
- max_byte_length: maximum length of byte type
- max_array_length: maximum length of array
- max_file_length: maximum length of file, in byte
- minimal_property: only generate 'required' properties
- minimal_parameter: only generate 'required' parameter
- files: registered file object: refer to pyswagger.primitives.File for details
- object_template: dict of default values assigned for properties when 'name' matched
- parameter_template: dict of default values assigned for parameters when 'name matched
- max_property: all properties are generated, ignore 'required'
- max_parameter: all parameters are generated, ignore 'required'
:return: options
:rtype: dict
"""
return dict(
max_name_length=64,
max_prop_count=32,
max_str_length=100,
max_byte_length=100,
max_array_length=100,
max_file_length=200,
minimal_property=False,
minimal_parameter=False,
files=[],
object_template={},
parameter_template={},
max_property=False,
max_parameter=False,
)
|
return default options, available options:
- max_name_length: maximum length of name for additionalProperties
- max_prop_count: maximum count of properties (count of fixed properties + additional properties)
- max_str_length: maximum length of string type
- max_byte_length: maximum length of byte type
- max_array_length: maximum length of array
- max_file_length: maximum length of file, in byte
- minimal_property: only generate 'required' properties
- minimal_parameter: only generate 'required' parameter
- files: registered file object: refer to pyswagger.primitives.File for details
- object_template: dict of default values assigned for properties when 'name' matched
- parameter_template: dict of default values assigned for parameters when 'name matched
- max_property: all properties are generated, ignore 'required'
- max_parameter: all parameters are generated, ignore 'required'
:return: options
:rtype: dict
|
def install(path, capture_error=False): # type: (str, bool) -> None
"""Install a Python module in the executing Python environment.
Args:
path (str): Real path location of the Python module.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
"""
cmd = '%s -m pip install -U . ' % _process.python_executable()
if has_requirements(path):
cmd += '-r requirements.txt'
logger.info('Installing module with the following command:\n%s', cmd)
_process.check_error(shlex.split(cmd), _errors.InstallModuleError, cwd=path, capture_error=capture_error)
|
Install a Python module in the executing Python environment.
Args:
path (str): Real path location of the Python module.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
|
def subnet_distance(self):
"""
Specific subnet administrative distances
:return: list of tuple (subnet, distance)
"""
return [(Element.from_href(entry.get('subnet')), entry.get('distance'))
for entry in self.data.get('distance_entry')]
|
Specific subnet administrative distances
:return: list of tuple (subnet, distance)
|
def is_complete(self, zmax=118):
"""
True if table is complete i.e. all elements with Z < zmax have at least on pseudopotential
"""
for z in range(1, zmax):
if not self[z]: return False
return True
|
True if table is complete i.e. all elements with Z < zmax have at least on pseudopotential
|
def schema_delete_field(cls, key):
"""Deletes a field."""
root = '/'.join([API_ROOT, 'schemas', cls.__name__])
payload = {
'className': cls.__name__,
'fields': {
key: {
'__op': 'Delete'
}
}
}
cls.PUT(root, **payload)
|
Deletes a field.
|
def _authenticate_x509(credentials, sock_info):
"""Authenticate using MONGODB-X509.
"""
query = SON([('authenticate', 1),
('mechanism', 'MONGODB-X509')])
if credentials.username is not None:
query['user'] = credentials.username
elif sock_info.max_wire_version < 5:
raise ConfigurationError(
"A username is required for MONGODB-X509 authentication "
"when connected to MongoDB versions older than 3.4.")
sock_info.command('$external', query)
|
Authenticate using MONGODB-X509.
|
def write(text, path):
"""Writer text to file with utf-8 encoding.
Usage::
>>> from angora.dataIO import textfile
or
>>> from angora.dataIO import *
>>> textfile.write("hello world!", "test.txt")
"""
with open(path, "wb") as f:
f.write(text.encode("utf-8"))
|
Writer text to file with utf-8 encoding.
Usage::
>>> from angora.dataIO import textfile
or
>>> from angora.dataIO import *
>>> textfile.write("hello world!", "test.txt")
|
def generate_token(self):
"""Make request in API to generate a token."""
response = self._make_request()
self.auth = response
self.token = response['token']
|
Make request in API to generate a token.
|
def from_hex_key(cls, key, network=BitcoinMainNet):
"""Load the PublicKey from a compressed or uncompressed hex key.
This format is defined in PublicKey.get_key()
"""
if len(key) == 130 or len(key) == 66:
# It might be a hexlified byte array
try:
key = unhexlify(key)
except TypeError:
pass
key = ensure_bytes(key)
compressed = False
id_byte = key[0]
if not isinstance(id_byte, six.integer_types):
id_byte = ord(id_byte)
if id_byte == 4:
# Uncompressed public point
# 1B ID + 32B x coord + 32B y coord = 65 B
if len(key) != 65:
raise KeyParseError("Invalid key length")
public_pair = PublicPair(
long_or_int(hexlify(key[1:33]), 16),
long_or_int(hexlify(key[33:]), 16))
elif id_byte in [2, 3]:
# Compressed public point!
compressed = True
if len(key) != 33:
raise KeyParseError("Invalid key length")
y_odd = bool(id_byte & 0x01) # 0 even, 1 odd
x = long_or_int(hexlify(key[1:]), 16)
# The following x-to-pair algorithm was lifted from pycoin
# I still need to sit down an understand it. It is also described
# in http://www.secg.org/collateral/sec1_final.pdf
curve = SECP256k1.curve
p = curve.p()
# For SECP256k1, curve.a() is 0 and curve.b() is 7, so this is
# effectively (x ** 3 + 7) % p, but the full equation is kept
# for just-in-case-the-curve-is-broken future-proofing
alpha = (pow(x, 3, p) + curve.a() * x + curve.b()) % p
beta = square_root_mod_prime(alpha, p)
y_even = not y_odd
if y_even == bool(beta & 1):
public_pair = PublicPair(x, p - beta)
else:
public_pair = PublicPair(x, beta)
else:
raise KeyParseError("The given key is not in a known format.")
return cls.from_public_pair(public_pair, network=network,
compressed=compressed)
|
Load the PublicKey from a compressed or uncompressed hex key.
This format is defined in PublicKey.get_key()
|
def get_directory(request):
"""Get API directory as a nested list of lists."""
def get_url(url):
return reverse(url, request=request) if url else url
def is_active_url(path, url):
return path.startswith(url) if url and path else False
path = request.path
directory_list = []
def sort_key(r):
return r[0]
# TODO(ant): support arbitrarily nested
# structure, for now it is capped at a single level
# for UX reasons
for group_name, endpoints in sorted(
six.iteritems(directory),
key=sort_key
):
endpoints_list = []
for endpoint_name, endpoint in sorted(
six.iteritems(endpoints),
key=sort_key
):
if endpoint_name[:1] == '_':
continue
endpoint_url = get_url(endpoint.get('_url', None))
active = is_active_url(path, endpoint_url)
endpoints_list.append(
(endpoint_name, endpoint_url, [], active)
)
url = get_url(endpoints.get('_url', None))
active = is_active_url(path, url)
directory_list.append(
(group_name, url, endpoints_list, active)
)
return directory_list
|
Get API directory as a nested list of lists.
|
def compute_dkl(fsamps, prior_fsamps, **kwargs):
"""
Compute the Kullback Leibler divergence for function samples for posterior
and prior pre-calculated at a range of x values.
Parameters
----------
fsamps: 2D numpy.array
Posterior function samples, as computed by
:func:`fgivenx.compute_samples`
prior_fsamps: 2D numpy.array
Prior function samples, as computed by :func:`fgivenx.compute_samples`
parallel, tqdm_kwargs: optional
see docstring for :func:`fgivenx.parallel.parallel_apply`.
cache: str, optional
File root for saving previous calculations for re-use.
Returns
-------
1D numpy.array:
Kullback-Leibler divergences at each value of x. `shape=(len(fsamps))`
"""
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if cache:
cache = Cache(cache + '_dkl')
try:
return cache.check(fsamps, prior_fsamps)
except CacheException as e:
print(e)
zip_fsamps = list(zip(fsamps, prior_fsamps))
dkls = parallel_apply(DKL, zip_fsamps, parallel=parallel,
tqdm_kwargs=tqdm_kwargs)
dkls = numpy.array(dkls)
if cache:
cache.save(fsamps, prior_fsamps, dkls)
return dkls
|
Compute the Kullback Leibler divergence for function samples for posterior
and prior pre-calculated at a range of x values.
Parameters
----------
fsamps: 2D numpy.array
Posterior function samples, as computed by
:func:`fgivenx.compute_samples`
prior_fsamps: 2D numpy.array
Prior function samples, as computed by :func:`fgivenx.compute_samples`
parallel, tqdm_kwargs: optional
see docstring for :func:`fgivenx.parallel.parallel_apply`.
cache: str, optional
File root for saving previous calculations for re-use.
Returns
-------
1D numpy.array:
Kullback-Leibler divergences at each value of x. `shape=(len(fsamps))`
|
def compute_score(markers, bonus, penalty):
"""
Compute chain score using dynamic programming. If a marker is the same
linkage group as a previous one, we add bonus; otherwise, we penalize the
chain switching.
"""
nmarkers = len(markers)
s = [bonus] * nmarkers # score
f = [-1] * nmarkers # from
for i in xrange(1, nmarkers):
for j in xrange(i):
mi, mj = markers[i], markers[j]
t = bonus if mi.mlg == mj.mlg else penalty + bonus
if s[i] < s[j] + t:
s[i] = s[j] + t
f[i] = j
# Recover the highest scoring chain
highest_score = max(s)
si = s.index(highest_score)
onchain = set()
while True:
if si < 0:
break
si = f[si]
onchain.add(si)
return [x for i, x in enumerate(markers) if i in onchain]
|
Compute chain score using dynamic programming. If a marker is the same
linkage group as a previous one, we add bonus; otherwise, we penalize the
chain switching.
|
def forProperty(instance,propertyName,useGetter=False):
"""
2-way binds to an instance property.
Parameters:
- instance -- the object instance
- propertyName -- the name of the property to bind to
- useGetter: when True, calls the getter method to obtain the value. When False, the signal argument is used as input for the target setter. (default False)
Notes:
2-way binds to an instance property according to one of the following naming conventions:
@property, propertyName.setter and pyqtSignal
- getter: propertyName
- setter: propertyName
- changedSignal: propertyNameChanged
getter, setter and pyqtSignal (this is used when binding to standard QWidgets like QSpinBox)
- getter: propertyName()
- setter: setPropertyName()
- changedSignal: propertyNameChanged
"""
assert isinstance(propertyName,str)
if propertyName.startswith("get") or propertyName.startswith("set"):
#property is a getter function or a setter function, assume a corresponding setter/getter function exists
getterName = "get" + propertyName[3:]
setterName = "set" + propertyName[3:]
if len(propertyName[3:]) > 1:
signalName = propertyName[3].lower() + propertyName[4:] + "Changed"
else:
signalName = propertyName.lower() + "Changed"
assert hasattr(instance,getterName)
assert hasattr(instance,setterName)
assert hasattr(instance,signalName)
getter = getattr(instance,getterName)
setter = getattr(instance,setterName)
signal = getattr(instance,signalName)
elif hasattr(instance, propertyName) and callable(getattr(instance,propertyName)):
#property is a getter function without the "get" prefix. Assume a corresponding setter method exists
getterName = propertyName
setterName = "set" + propertyName.capitalize()
signalName = propertyName + "Changed"
assert hasattr(instance,setterName)
assert hasattr(instance,signalName)
getter = getattr(instance,getterName)
setter = getattr(instance,setterName)
signal = getattr(instance,signalName)
elif hasattr(instance, propertyName):
#property is real property. Assume it is not readonly
signalName = propertyName + "Changed"
assert hasattr(instance,signalName)
getter = lambda: getattr(instance,propertyName)
setter = lambda value: setattr(instance,propertyName,value)
signal = getattr(instance,signalName)
else:
#property is a virtual property. There should be getPropertyName and setPropertyName methods
if len(propertyName) > 1:
getterName = "get" + propertyName[0].upper() + propertyName[1:]
setterName = "set" + propertyName[0].upper() + propertyName[1:]
signalName = propertyName + "Changed"
else:
getterName = "get" + propertyName.upper()
setterName = "set" + propertyName.upper()
signalName = propertyName.lower() + "Changed"
assert hasattr(instance,getterName)
assert hasattr(instance,setterName)
assert hasattr(instance,signalName)
getter = getattr(instance,getterName)
setter = getattr(instance,setterName)
signal = getattr(instance,signalName)
return BindingEndpoint(instance, setter, signal, getter = getter if useGetter else None)
|
2-way binds to an instance property.
Parameters:
- instance -- the object instance
- propertyName -- the name of the property to bind to
- useGetter: when True, calls the getter method to obtain the value. When False, the signal argument is used as input for the target setter. (default False)
Notes:
2-way binds to an instance property according to one of the following naming conventions:
@property, propertyName.setter and pyqtSignal
- getter: propertyName
- setter: propertyName
- changedSignal: propertyNameChanged
getter, setter and pyqtSignal (this is used when binding to standard QWidgets like QSpinBox)
- getter: propertyName()
- setter: setPropertyName()
- changedSignal: propertyNameChanged
|
def set_inheritance(obj_name, enabled, obj_type='file', clear=False):
'''
Enable or disable an objects inheritance.
Args:
obj_name (str):
The name of the object
enabled (bool):
True to enable inheritance, False to disable
obj_type (Optional[str]):
The type of object. Only three objects allow inheritance. Valid
objects are:
- file (default): This is a file or directory
- registry
- registry32 (for WOW64)
clear (Optional[bool]):
True to clear existing ACEs, False to keep existing ACEs.
Default is False
Returns:
bool: True if successful, otherwise an Error
Usage:
.. code-block:: python
salt.utils.win_dacl.set_inheritance('C:\\Temp', False)
'''
if obj_type not in ['file', 'registry', 'registry32']:
raise SaltInvocationError(
'obj_type called with incorrect parameter: {0}'.format(obj_name))
if clear:
obj_dacl = dacl(obj_type=obj_type)
else:
obj_dacl = dacl(obj_name, obj_type)
return obj_dacl.save(obj_name, not enabled)
|
Enable or disable an objects inheritance.
Args:
obj_name (str):
The name of the object
enabled (bool):
True to enable inheritance, False to disable
obj_type (Optional[str]):
The type of object. Only three objects allow inheritance. Valid
objects are:
- file (default): This is a file or directory
- registry
- registry32 (for WOW64)
clear (Optional[bool]):
True to clear existing ACEs, False to keep existing ACEs.
Default is False
Returns:
bool: True if successful, otherwise an Error
Usage:
.. code-block:: python
salt.utils.win_dacl.set_inheritance('C:\\Temp', False)
|
def _import_public_names(module):
"Import public names from module into this module, like import *"
self = sys.modules[__name__]
for name in module.__all__:
if hasattr(self, name):
# don't overwrite existing names
continue
setattr(self, name, getattr(module, name))
|
Import public names from module into this module, like import *
|
def update(self, campaign_id, title, is_smooth, online_status, nick=None):
'''xxxxx.xxxxx.campaign.update
===================================
更新一个推广计划,可以设置推广计划名字、是否平滑消耗,只有在设置了日限额后平滑消耗才会产生作用。'''
request = TOPRequest('xxxxx.xxxxx.campaign.update')
request['campaign_id'] = campaign_id
request['title'] = title
request['is_smooth'] = is_smooth
request['online_status'] = online_status
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':Campaign})
return self.result
|
xxxxx.xxxxx.campaign.update
===================================
更新一个推广计划,可以设置推广计划名字、是否平滑消耗,只有在设置了日限额后平滑消耗才会产生作用。
|
def _warning(code):
"""
Return a warning message of code 'code'.
If code = (cd, str) it returns the warning message of code 'cd' and appends
str at the end
"""
if isinstance(code, str):
return code
message = ''
if isinstance(code, tuple):
if isinstance(code[0], str):
message = code[1]
code = code[0]
return CFG_BIBRECORD_WARNING_MSGS.get(code, '') + message
|
Return a warning message of code 'code'.
If code = (cd, str) it returns the warning message of code 'cd' and appends
str at the end
|
def expect(
self, re_strings='', timeout=None, output_callback=None, default_match_prefix='.*\n',
strip_ansi=True
):
"""
This function takes in a regular expression (or regular expressions)
that represent the last line of output from the server. The function
waits for one or more of the terms to be matched. The regexes are
matched using expression \n<regex>$ so you'll need to provide an
easygoing regex such as '.*server.*' if you wish to have a fuzzy
match.
:param re_strings: Either a regex string or list of regex strings
that we should expect; if this is not specified,
then EOF is expected (i.e. the shell is completely
closed after the exit command is issued)
:param timeout: Timeout in seconds. If this timeout is exceeded,
then an exception is raised.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param default_match_prefix: A prefix to all match regexes, defaults to '.*\n',
can set to '' on cases prompt is the first line,
or the command has no output.
:param strip_ansi: If True, will strip ansi control chars befores regex matching
default to True.
:return: An EOF returns -1, a regex metch returns 0 and a match in a
list of regexes returns the index of the matched string in
the list.
:raises: A socket.timeout exception is raised on timeout.
"""
output_callback = output_callback if output_callback else self.output_callback
# Set the channel timeout
timeout = timeout if timeout else self.timeout
self.channel.settimeout(timeout)
# Create an empty output buffer
self.current_output = ''
# This function needs all regular expressions to be in the form of a
# list, so if the user provided a string, let's convert it to a 1
# item list.
if isinstance(re_strings, str) and len(re_strings) != 0:
re_strings = [re_strings]
# Loop until one of the expressions is matched or loop forever if
# nothing is expected (usually used for exit)
while (
len(re_strings) == 0 or
not [re_string
for re_string in re_strings
if re.match(default_match_prefix + re_string + '$',
self.current_output, re.DOTALL)]
):
# Read some of the output
current_buffer = self.channel.recv(self.buffer_size)
# If we have an empty buffer, then the SSH session has been closed
if len(current_buffer) == 0:
break
# Convert the buffer to our chosen encoding
current_buffer_decoded = current_buffer.decode(self.encoding)
# Strip all ugly \r (Ctrl-M making) characters from the current
# read
current_buffer_decoded = current_buffer_decoded.replace('\r', '')
# Display the current buffer in realtime if requested to do so
# (good for debugging purposes)
if self.display:
output_callback(current_buffer_decoded)
if strip_ansi:
current_buffer_decoded = strip_ansi_codes(current_buffer_decoded)
# Add the currently read buffer to the output
self.current_output += current_buffer_decoded
# Grab the first pattern that was matched
if len(re_strings) != 0:
found_pattern = [(re_index, re_string)
for re_index, re_string in enumerate(re_strings)
if re.match(default_match_prefix + re_string + '$',
self.current_output, re.DOTALL)]
# Clean the output up by removing the sent command
self.current_output_clean = self.current_output
if len(self.current_send_string) != 0:
self.current_output_clean = (
self.current_output_clean.replace(
self.current_send_string + '\n', ''
)
)
# Reset the current send string to ensure that multiple expect calls
# don't result in bad output cleaning
self.current_send_string = ''
# Clean the output up by removing the expect output from the end if
# requested and save the details of the matched pattern
if len(re_strings) != 0 and len(found_pattern) != 0:
self.current_output_clean = (
re.sub(
found_pattern[0][1] + '$', '', self.current_output_clean
)
)
self.last_match = found_pattern[0][1]
return found_pattern[0][0]
else:
# We would socket timeout before getting here, but for good
# measure, let's send back a -1
return -1
|
This function takes in a regular expression (or regular expressions)
that represent the last line of output from the server. The function
waits for one or more of the terms to be matched. The regexes are
matched using expression \n<regex>$ so you'll need to provide an
easygoing regex such as '.*server.*' if you wish to have a fuzzy
match.
:param re_strings: Either a regex string or list of regex strings
that we should expect; if this is not specified,
then EOF is expected (i.e. the shell is completely
closed after the exit command is issued)
:param timeout: Timeout in seconds. If this timeout is exceeded,
then an exception is raised.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param default_match_prefix: A prefix to all match regexes, defaults to '.*\n',
can set to '' on cases prompt is the first line,
or the command has no output.
:param strip_ansi: If True, will strip ansi control chars befores regex matching
default to True.
:return: An EOF returns -1, a regex metch returns 0 and a match in a
list of regexes returns the index of the matched string in
the list.
:raises: A socket.timeout exception is raised on timeout.
|
def launch_minecraft(port, installdir="MalmoPlatform", replaceable=False):
"""Launch Minecraft listening for malmoenv connections.
Args:
port: the TCP port to listen on.
installdir: the install dir name. Defaults to MalmoPlatform.
Must be same as given (or defaulted) in download call if used.
replaceable: whether or not to automatically restart Minecraft (default is false).
"""
launch_script = './launchClient.sh'
if os.name == 'nt':
launch_script = 'launchClient.bat'
cwd = os.getcwd()
os.chdir(installdir)
os.chdir("Minecraft")
try:
cmd = [launch_script, '-port', str(port), '-env']
if replaceable:
cmd.append('-replaceable')
subprocess.check_call(cmd)
finally:
os.chdir(cwd)
|
Launch Minecraft listening for malmoenv connections.
Args:
port: the TCP port to listen on.
installdir: the install dir name. Defaults to MalmoPlatform.
Must be same as given (or defaulted) in download call if used.
replaceable: whether or not to automatically restart Minecraft (default is false).
|
def alive(opts):
'''
Validate and return the connection status with the remote device.
.. versionadded:: 2018.3.0
'''
dev = conn()
thisproxy['conn'].connected = ping()
if not dev.connected:
__salt__['event.fire_master']({}, 'junos/proxy/{}/stop'.format(
opts['proxy']['host']))
return dev.connected
|
Validate and return the connection status with the remote device.
.. versionadded:: 2018.3.0
|
def select_catalogue(self, selector, distance, selector_type='circle',
distance_metric='epicentral', point_depth=None,
upper_eq_depth=None, lower_eq_depth=None):
'''
Selects the catalogue associated to the point source.
Effectively a wrapper to the two functions select catalogue within
a distance of the point and select catalogue within cell centred on
point
:param selector:
Populated instance of :class:
`openquake.hmtk.seismicity.selector.CatalogueSelector`
:param float distance:
Distance from point (km) for selection
:param str selector_type:
Chooses whether to select within {'circle'} or within a {'square'}.
:param str distance_metric:
'epicentral' or 'hypocentral' (only for 'circle' selector type)
:param float point_depth:
Assumed hypocentral depth of the point (only applied to 'circle'
distance type)
:param float upper_depth:
Upper seismogenic depth (km) (only for 'square')
:param float lower_depth:
Lower seismogenic depth (km) (only for 'square')
'''
if selector.catalogue.get_number_events() < 1:
raise ValueError('No events found in catalogue!')
if 'square' in selector_type:
# Calls select catalogue within cell function
self.select_catalogue_within_cell(selector,
distance,
upper_depth=upper_eq_depth,
lower_depth=lower_eq_depth)
elif 'circle' in selector_type:
# Calls select catalogue within distance function
self.select_catalogue_within_distance(selector, distance,
distance_metric, point_depth)
else:
raise ValueError('Unrecognised selection type for point source!')
|
Selects the catalogue associated to the point source.
Effectively a wrapper to the two functions select catalogue within
a distance of the point and select catalogue within cell centred on
point
:param selector:
Populated instance of :class:
`openquake.hmtk.seismicity.selector.CatalogueSelector`
:param float distance:
Distance from point (km) for selection
:param str selector_type:
Chooses whether to select within {'circle'} or within a {'square'}.
:param str distance_metric:
'epicentral' or 'hypocentral' (only for 'circle' selector type)
:param float point_depth:
Assumed hypocentral depth of the point (only applied to 'circle'
distance type)
:param float upper_depth:
Upper seismogenic depth (km) (only for 'square')
:param float lower_depth:
Lower seismogenic depth (km) (only for 'square')
|
def writePlistToString(rootObject):
'''Return 'rootObject' as a plist-formatted string.'''
plistData, error = (
NSPropertyListSerialization.
dataFromPropertyList_format_errorDescription_(
rootObject, NSPropertyListXMLFormat_v1_0, None))
if plistData is None:
if error:
error = error.encode('ascii', 'ignore')
else:
error = "Unknown error"
raise NSPropertyListSerializationException(error)
else:
return str(plistData)
|
Return 'rootObject' as a plist-formatted string.
|
def template(page=None, layout=None, **kwargs):
"""
Decorator to change the view template and layout.
It works on both View class and view methods
on class
only $layout is applied, everything else will be passed to the kwargs
Using as first argument, it will be the layout.
:first arg or $layout: The layout to use for that view
:param layout: The layout to use for that view
:param kwargs:
get pass to the TEMPLATE_CONTEXT
** on method that return a dict
page or layout are optional
:param page: The html page
:param layout: The layout to use for that view
:param kwargs:
get pass to the view as k/V
** on other methods that return other type, it doesn't apply
:return:
"""
pkey = "_template_extends__"
def decorator(f):
if inspect.isclass(f):
layout_ = layout or page
extends = kwargs.pop("extends", None)
if extends and hasattr(extends, pkey):
items = getattr(extends, pkey).items()
if "layout" in items:
layout_ = items.pop("layout")
for k, v in items:
kwargs.setdefault(k, v)
if not layout_:
layout_ = "layout.html"
kwargs.setdefault("brand_name", "")
kwargs["layout"] = layout_
setattr(f, pkey, kwargs)
setattr(f, "base_layout", kwargs.get("layout"))
f.g(TEMPLATE_CONTEXT=kwargs)
return f
else:
@functools.wraps(f)
def wrap(*args2, **kwargs2):
response = f(*args2, **kwargs2)
if isinstance(response, dict) or response is None:
response = response or {}
if page:
response.setdefault("template_", page)
if layout:
response.setdefault("layout_", layout)
for k, v in kwargs.items():
response.setdefault(k, v)
return response
return wrap
return decorator
|
Decorator to change the view template and layout.
It works on both View class and view methods
on class
only $layout is applied, everything else will be passed to the kwargs
Using as first argument, it will be the layout.
:first arg or $layout: The layout to use for that view
:param layout: The layout to use for that view
:param kwargs:
get pass to the TEMPLATE_CONTEXT
** on method that return a dict
page or layout are optional
:param page: The html page
:param layout: The layout to use for that view
:param kwargs:
get pass to the view as k/V
** on other methods that return other type, it doesn't apply
:return:
|
def _new_name(method, old_name):
"""Return a method with a deprecation warning."""
# Looks suspiciously like a decorator, but isn't!
@wraps(method)
def _method(*args, **kwargs):
warnings.warn(
"method '{}' has been deprecated, please rename to '{}'".format(
old_name, method.__name__
),
DeprecationWarning,
)
return method(*args, **kwargs)
deprecated_msg = """
Note:
.. deprecated:: 2.2.0
Please use `~{}`
""".format(
method.__name__
)
if getattr(_method, "__doc__"):
_method.__doc__ += deprecated_msg
return _method
|
Return a method with a deprecation warning.
|
def on_loss_begin(self, last_output:Tuple[Tensor,Tensor,Tensor], **kwargs):
"Save the extra outputs for later and only returns the true output."
self.raw_out,self.out = last_output[1],last_output[2]
return {'last_output': last_output[0]}
|
Save the extra outputs for later and only returns the true output.
|
def serialize(self, keep_readonly=False):
"""Return the JSON that would be sent to azure from this model.
This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`.
:param bool keep_readonly: If you want to serialize the readonly attributes
:returns: A dict JSON compatible object
:rtype: dict
"""
serializer = Serializer(self._infer_class_models())
return serializer._serialize(self, keep_readonly=keep_readonly)
|
Return the JSON that would be sent to azure from this model.
This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`.
:param bool keep_readonly: If you want to serialize the readonly attributes
:returns: A dict JSON compatible object
:rtype: dict
|
def is_standard(action):
""" actions which are general "store" instructions.
e.g. anything which has an argument style like:
$ script.py -f myfilename.txt
"""
boolean_actions = (
_StoreConstAction, _StoreFalseAction,
_StoreTrueAction
)
return (not action.choices
and not isinstance(action, _CountAction)
and not isinstance(action, _HelpAction)
and type(action) not in boolean_actions)
|
actions which are general "store" instructions.
e.g. anything which has an argument style like:
$ script.py -f myfilename.txt
|
def update(self):
"""Retrieve latest state of the device."""
is_on = self._device.get_power_state()
if is_on:
self._state = STATE_ON
volume = self._device.get_current_volume()
if volume is not None:
self._volume_level = float(volume) / self._max_volume
input_ = self._device.get_current_input()
if input_ is not None:
self._current_input = input_.meta_name
inputs = self._device.get_inputs()
if inputs is not None:
self._available_inputs = [input_.name for input_ in inputs]
else:
if is_on is None:
self._state = None
else:
self._state = STATE_OFF
self._volume_level = None
self._current_input = None
self._available_inputs = None
|
Retrieve latest state of the device.
|
def name(self):
"""
Compact representation for the names
"""
names = self.names.split()
if len(names) == 1:
return names[0]
elif len(names) == 2:
return ' '.join(names)
else:
return ' '.join([names[0], '...', names[-1]])
|
Compact representation for the names
|
def get_annotated_list_qs(cls, qs):
"""
Gets an annotated list from a queryset.
"""
result, info = [], {}
start_depth, prev_depth = (None, None)
for node in qs:
depth = node.get_depth()
if start_depth is None:
start_depth = depth
open = (depth and (prev_depth is None or depth > prev_depth))
if prev_depth is not None and depth < prev_depth:
info['close'] = list(range(0, prev_depth - depth))
info = {'open': open, 'close': [], 'level': depth - start_depth}
result.append((node, info,))
prev_depth = depth
if start_depth and start_depth > 0:
info['close'] = list(range(0, prev_depth - start_depth + 1))
return result
|
Gets an annotated list from a queryset.
|
def create_track_token(request):
"""Returns ``TrackToken``.
``TrackToken' contains request and user making changes.
It can be passed to ``TrackedModel.save`` instead of ``request``.
It is intended to be used when passing ``request`` is not possible
e.g. when ``TrackedModel.save`` will be called from celery task.
"""
from tracked_model.models import RequestInfo
request_pk = RequestInfo.create_or_get_from_request(request).pk
user_pk = None
if request.user.is_authenticated():
user_pk = request.user.pk
return TrackToken(request_pk=request_pk, user_pk=user_pk)
|
Returns ``TrackToken``.
``TrackToken' contains request and user making changes.
It can be passed to ``TrackedModel.save`` instead of ``request``.
It is intended to be used when passing ``request`` is not possible
e.g. when ``TrackedModel.save`` will be called from celery task.
|
def simDeath(self):
'''
Randomly determine which consumers die, and distribute their wealth among the survivors.
This method only works if there is only one period in the cycle.
Parameters
----------
None
Returns
-------
who_dies : np.array(bool)
Boolean array of size AgentCount indicating which agents die.
'''
# Divide agents into wealth groups, kill one random agent per wealth group
# order = np.argsort(self.aLvlNow)
# how_many_die = int(self.AgentCount*(1.0-self.LivPrb[0]))
# group_size = self.AgentCount/how_many_die # This should be an integer
# base_idx = self.RNG.randint(0,group_size,size=how_many_die)
# kill_by_rank = np.arange(how_many_die,dtype=int)*group_size + base_idx
# who_dies = np.zeros(self.AgentCount,dtype=bool)
# who_dies[order[kill_by_rank]] = True
# Just select a random set of agents to die
how_many_die = int(round(self.AgentCount*(1.0-self.LivPrb[0])))
base_bool = np.zeros(self.AgentCount,dtype=bool)
base_bool[0:how_many_die] = True
who_dies = self.RNG.permutation(base_bool)
if self.T_age is not None:
who_dies[self.t_age >= self.T_age] = True
# Divide up the wealth of those who die, giving it to those who survive
who_lives = np.logical_not(who_dies)
wealth_living = np.sum(self.aLvlNow[who_lives])
wealth_dead = np.sum(self.aLvlNow[who_dies])
Ractuarial = 1.0 + wealth_dead/wealth_living
self.aNrmNow[who_lives] = self.aNrmNow[who_lives]*Ractuarial
self.aLvlNow[who_lives] = self.aLvlNow[who_lives]*Ractuarial
return who_dies
|
Randomly determine which consumers die, and distribute their wealth among the survivors.
This method only works if there is only one period in the cycle.
Parameters
----------
None
Returns
-------
who_dies : np.array(bool)
Boolean array of size AgentCount indicating which agents die.
|
def getSubtotal(self):
""" Compute Subtotal """
if self.supplyorder_lineitems:
return sum(
[(Decimal(obj['Quantity']) * Decimal(obj['Price'])) for obj in self.supplyorder_lineitems])
return 0
|
Compute Subtotal
|
def create_table(
self,
impala_name,
kudu_name,
primary_keys=None,
obj=None,
schema=None,
database=None,
external=False,
force=False,
):
"""
Create an Kudu-backed table in the connected Impala cluster. For
non-external tables, this will create a Kudu table with a compatible
storage schema.
This function is patterned after the ImpalaClient.create_table function
designed for physical filesystems (like HDFS).
Parameters
----------
impala_name : string
Name of the created Impala table
kudu_name : string
Name of hte backing Kudu table. Will be created if external=False
primary_keys : list of column names
List of
obj : TableExpr or pandas.DataFrame, optional
If passed, creates table from select statement results
schema : ibis.Schema, optional
Mutually exclusive with expr, creates an empty table with a
particular schema
database : string, default None (optional)
external : boolean, default False
If False, a new Kudu table will be created. Otherwise, the Kudu table
must already exist.
"""
self._check_connected()
if not external and (primary_keys is None or len(primary_keys) == 0):
raise ValueError(
'Must specify primary keys when DDL creates a '
'new Kudu table'
)
if obj is not None:
if external:
raise ValueError(
'Cannot create an external Kudu-Impala table '
'from an expression or DataFrame'
)
if isinstance(obj, pd.DataFrame):
from ibis.impala.pandas_interop import write_temp_dataframe
writer, to_insert = write_temp_dataframe(
self.impala_client, obj
)
else:
to_insert = obj
# XXX: exposing a lot of internals
ast = self.impala_client._build_ast(to_insert)
select = ast.queries[0]
stmt = CTASKudu(
impala_name,
kudu_name,
self.client.master_addrs,
select,
primary_keys,
database=database,
)
else:
if external:
ktable = self.client.table(kudu_name)
kschema = ktable.schema
schema = schema_kudu_to_ibis(kschema)
primary_keys = kschema.primary_keys()
elif schema is None:
raise ValueError(
'Must specify schema for new empty ' 'Kudu-backed table'
)
stmt = CreateTableKudu(
impala_name,
kudu_name,
self.client.master_addrs,
schema,
primary_keys,
external=external,
database=database,
can_exist=False,
)
self.impala_client._execute(stmt)
|
Create an Kudu-backed table in the connected Impala cluster. For
non-external tables, this will create a Kudu table with a compatible
storage schema.
This function is patterned after the ImpalaClient.create_table function
designed for physical filesystems (like HDFS).
Parameters
----------
impala_name : string
Name of the created Impala table
kudu_name : string
Name of hte backing Kudu table. Will be created if external=False
primary_keys : list of column names
List of
obj : TableExpr or pandas.DataFrame, optional
If passed, creates table from select statement results
schema : ibis.Schema, optional
Mutually exclusive with expr, creates an empty table with a
particular schema
database : string, default None (optional)
external : boolean, default False
If False, a new Kudu table will be created. Otherwise, the Kudu table
must already exist.
|
def deserialize(cls, assoc_s):
"""
Parse an association as stored by serialize().
inverse of serialize
@param assoc_s: Association as serialized by serialize()
@type assoc_s: str
@return: instance of this class
"""
pairs = kvform.kvToSeq(assoc_s, strict=True)
keys = []
values = []
for k, v in pairs:
keys.append(k)
values.append(v)
if keys != cls.assoc_keys:
raise ValueError('Unexpected key values: %r', keys)
version, handle, secret, issued, lifetime, assoc_type = values
if version != '2':
raise ValueError('Unknown version: %r' % version)
issued = int(issued)
lifetime = int(lifetime)
secret = oidutil.fromBase64(secret)
return cls(handle, secret, issued, lifetime, assoc_type)
|
Parse an association as stored by serialize().
inverse of serialize
@param assoc_s: Association as serialized by serialize()
@type assoc_s: str
@return: instance of this class
|
def render(self, name, value, attrs=None, renderer=None):
"""
Render the widget as an HTML string.
Overridden here to support Django < 1.11.
"""
if self.has_template_widget_rendering:
return super(ClearableFileInputWithImagePreview, self).render(
name, value, attrs=attrs, renderer=renderer
)
else:
context = self.get_context(name, value, attrs)
return render_to_string(self.template_name, context)
|
Render the widget as an HTML string.
Overridden here to support Django < 1.11.
|
def _validate(self, writing=False):
"""Validate internal correctness."""
if (((len(self.fragment_offset) != len(self.fragment_length)) or
(len(self.fragment_length) != len(self.data_reference)))):
msg = ("The lengths of the fragment offsets ({len_offsets}), "
"fragment lengths ({len_fragments}), and "
"data reference items ({len_drefs}) must be the same.")
msg = msg.format(len_offsets=len(self.fragment_offset),
len_fragments=len(self.fragment_length),
len_drefs=len(self.data_reference))
self._dispatch_validation_error(msg, writing=writing)
if any([x <= 0 for x in self.fragment_offset]):
msg = "Fragment offsets must all be positive."
self._dispatch_validation_error(msg, writing=writing)
if any([x <= 0 for x in self.fragment_length]):
msg = "Fragment lengths must all be positive."
self._dispatch_validation_error(msg, writing=writing)
|
Validate internal correctness.
|
def _use_framework(module):
"""
Internal helper, to set this modules methods to a specified
framework helper-methods.
"""
import txaio
for method_name in __all__:
if method_name in ['use_twisted', 'use_asyncio']:
continue
setattr(txaio, method_name,
getattr(module, method_name))
|
Internal helper, to set this modules methods to a specified
framework helper-methods.
|
def _outer_values_update(self, full_values):
"""
Here you put the values, which were collected before in the right places.
E.g. set the gradients of parameters, etc.
"""
super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values)
if self.has_uncertain_inputs():
meangrad_tmp, vargrad_tmp = self.kern.gradients_qX_expectations(
variational_posterior=self.X,
Z=self.Z, dL_dpsi0=full_values['dL_dpsi0'],
dL_dpsi1=full_values['dL_dpsi1'],
dL_dpsi2=full_values['dL_dpsi2'],
psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
self.X.mean.gradient = meangrad_tmp
self.X.variance.gradient = vargrad_tmp
else:
self.X.gradient = self.kern.gradients_X(full_values['dL_dKnm'], self.X, self.Z)
self.X.gradient += self.kern.gradients_X_diag(full_values['dL_dKdiag'], self.X)
|
Here you put the values, which were collected before in the right places.
E.g. set the gradients of parameters, etc.
|
def process_event(self, event_name: str, data: dict) -> None:
"""
Process event after epoch
Args:
event_name: whether event is send after epoch or batch.
Set of values: ``"after_epoch", "after_batch"``
data: event data (dictionary)
Returns:
None
"""
if event_name == "after_epoch":
self.epochs_done = data["epochs_done"]
self.batches_seen = data["batches_seen"]
self.train_examples_seen = data["train_examples_seen"]
return
|
Process event after epoch
Args:
event_name: whether event is send after epoch or batch.
Set of values: ``"after_epoch", "after_batch"``
data: event data (dictionary)
Returns:
None
|
def _nodeSatisfiesValue(cntxt: Context, n: Node, vsv: ShExJ.valueSetValue) -> bool:
"""
A term matches a valueSetValue if:
* vsv is an objectValue and n = vsv.
* vsv is a Language with langTag lt and n is a language-tagged string with a language tag l and l = lt.
* vsv is a IriStem, LiteralStem or LanguageStem with stem st and nodeIn(n, st).
* vsv is a IriStemRange, LiteralStemRange or LanguageStemRange with stem st and exclusions excls and
nodeIn(n, st) and there is no x in excls such that nodeIn(n, excl).
* vsv is a Wildcard with exclusions excls and there is no x in excls such that nodeIn(n, excl).
Note that ObjectLiteral is *not* typed in ShExJ.jsg, so we identify it by a lack of a 'type' variable
.. note:: Mismatch with spec
This won't work correctly if the stem value is passed in to nodeIn, as there will be no way to know whether
we're matching an IRI or other type
... note:: Language issue
The stem range spec shouldn't have the first element in the exclusions
"""
vsv = map_object_literal(vsv)
if isinstance_(vsv, ShExJ.objectValue):
return objectValueMatches(n, vsv)
if isinstance(vsv, ShExJ.Language):
if vsv.languageTag is not None and isinstance(n, Literal) and n.language is not None:
return n.language == vsv.languageTag
else:
return False
if isinstance(vsv, ShExJ.IriStem):
return nodeInIriStem(cntxt, n, vsv.stem)
if isinstance(vsv, ShExJ.IriStemRange):
exclusions = vsv.exclusions if vsv.exclusions is not None else []
return nodeInIriStem(cntxt, n, vsv.stem) and not any(
(uriref_matches_iriref(n, excl) if isinstance(excl, ShExJ.IRIREF) else
uriref_startswith_iriref(n, excl.stem)) for excl in exclusions)
if isinstance(vsv, ShExJ.LiteralStem):
return nodeInLiteralStem(cntxt, n, vsv.stem)
if isinstance(vsv, ShExJ.LiteralStemRange):
exclusions = vsv.exclusions if vsv.exclusions is not None else []
return nodeInLiteralStem(cntxt, n, vsv.stem) and not any(str(n) == excl for excl in exclusions)
if isinstance(vsv, ShExJ.LanguageStem):
return nodeInLanguageStem(cntxt, n, vsv.stem)
if isinstance(vsv, ShExJ.LanguageStemRange):
exclusions = vsv.exclusions if vsv.exclusions is not None else []
return nodeInLanguageStem(cntxt, n, vsv.stem) and not any(str(n) == str(excl) for excl in exclusions)
return False
|
A term matches a valueSetValue if:
* vsv is an objectValue and n = vsv.
* vsv is a Language with langTag lt and n is a language-tagged string with a language tag l and l = lt.
* vsv is a IriStem, LiteralStem or LanguageStem with stem st and nodeIn(n, st).
* vsv is a IriStemRange, LiteralStemRange or LanguageStemRange with stem st and exclusions excls and
nodeIn(n, st) and there is no x in excls such that nodeIn(n, excl).
* vsv is a Wildcard with exclusions excls and there is no x in excls such that nodeIn(n, excl).
Note that ObjectLiteral is *not* typed in ShExJ.jsg, so we identify it by a lack of a 'type' variable
.. note:: Mismatch with spec
This won't work correctly if the stem value is passed in to nodeIn, as there will be no way to know whether
we're matching an IRI or other type
... note:: Language issue
The stem range spec shouldn't have the first element in the exclusions
|
def labels(self, hs_dims=None, prune=False):
"""Get labels for the cube slice, and perform pruning by slice."""
if self.ca_as_0th:
labels = self._cube.labels(include_transforms_for_dims=hs_dims)[1:]
else:
labels = self._cube.labels(include_transforms_for_dims=hs_dims)[-2:]
if not prune:
return labels
def prune_dimension_labels(labels, prune_indices):
"""Get pruned labels for single dimension, besed on prune inds."""
labels = [label for label, prune in zip(labels, prune_indices) if not prune]
return labels
labels = [
prune_dimension_labels(dim_labels, dim_prune_inds)
for dim_labels, dim_prune_inds in zip(labels, self._prune_indices(hs_dims))
]
return labels
|
Get labels for the cube slice, and perform pruning by slice.
|
def nextProperty(self, propuri):
"""Returns the next property in the list of properties. If it's the last one, returns the first one."""
if propuri == self.properties[-1].uri:
return self.properties[0]
flag = False
for x in self.properties:
if flag == True:
return x
if x.uri == propuri:
flag = True
return None
|
Returns the next property in the list of properties. If it's the last one, returns the first one.
|
def sync_firmware(self):
"""Syncs the emulator's firmware version and the DLL's firmware.
This method is useful for ensuring that the firmware running on the
J-Link matches the firmware supported by the DLL.
Args:
self (JLink): the ``JLink`` instance
Returns:
``None``
"""
serial_no = self.serial_number
if self.firmware_newer():
# The J-Link's firmware is newer than the one compatible with the
# DLL (though there are promises of backwards compatibility), so
# perform a downgrade.
try:
# This may throw an exception on older versions of the J-Link
# software due to the software timing out after a firmware
# upgrade.
self.invalidate_firmware()
self.update_firmware()
except errors.JLinkException as e:
pass
res = self.open(serial_no=serial_no)
if self.firmware_newer():
raise errors.JLinkException('Failed to sync firmware version.')
return res
elif self.firmware_outdated():
# The J-Link's firmware is older than the one compatible with the
# DLL, so perform a firmware upgrade.
try:
# This may throw an exception on older versions of the J-Link
# software due to the software timing out after a firmware
# upgrade.
self.update_firmware()
except errors.JLinkException as e:
pass
if self.firmware_outdated():
raise errors.JLinkException('Failed to sync firmware version.')
return self.open(serial_no=serial_no)
return None
|
Syncs the emulator's firmware version and the DLL's firmware.
This method is useful for ensuring that the firmware running on the
J-Link matches the firmware supported by the DLL.
Args:
self (JLink): the ``JLink`` instance
Returns:
``None``
|
def wrap_text_in_a_box(body='', title='', style='double_star', **args):
r"""Return a nicely formatted text box.
e.g.
******************
** title **
**--------------**
** body **
******************
Indentation and newline are respected.
:param body: the main text
:param title: an optional title
:param style: the name of one of the style in CFG_WRAP_STYLES. By default
the double_star style is used.
You can further tune the desired style by setting various optional
parameters:
:param horiz_sep: a string that is repeated in order to produce a
separator row between the title and the body (if needed)
or a tuple of three characters in the form (l, c, r)
:param max_col: the maximum number of coulmns used by the box
(including indentation)
:param min_col: the symmetrical minimum number of columns
:param tab_str: a string to represent indentation
:param tab_num: the number of leveles of indentations
:param border: a tuple of 8 element in the form
(tl, t, tr, l, r, bl, b, br) of strings that represent the
different corners and sides of the box
:param prefix: a prefix string added before the box
:param suffix: a suffix string added after the box
:param break_long: wethever to break long words in order to respect
max_col
:param force_horiz: True in order to print the horizontal line even when
there is no title
e.g.:
print wrap_text_in_a_box(title='prova',
body=' 123 prova.\n Vediamo come si indenta',
horiz_sep='-', style='no_border', max_col=20, tab_num=1)
prova
----------------
123 prova.
Vediamo come
si indenta
"""
def _wrap_row(row, max_col, break_long):
"""Wrap a single row."""
spaces = _RE_BEGINNING_SPACES.match(row).group()
row = row[len(spaces):]
spaces = spaces.expandtabs()
return textwrap.wrap(row, initial_indent=spaces,
subsequent_indent=spaces, width=max_col,
break_long_words=break_long)
def _clean_newlines(text):
text = _RE_LONELY_NEWLINES.sub(' \n', text)
return _RE_NEWLINES_CLEANER.sub(lambda x: x.group()[:-1], text)
body = unicode(body, 'utf-8')
title = unicode(title, 'utf-8')
astyle = dict(CFG_WRAP_TEXT_IN_A_BOX_STYLES['__DEFAULT'])
if style in CFG_WRAP_TEXT_IN_A_BOX_STYLES:
astyle.update(CFG_WRAP_TEXT_IN_A_BOX_STYLES[style])
astyle.update(args)
horiz_sep = astyle['horiz_sep']
border = astyle['border']
tab_str = astyle['tab_str'] * astyle['tab_num']
max_col = max(astyle['max_col'] -
len(border[3]) - len(border[4]) - len(tab_str), 1)
min_col = astyle['min_col']
prefix = astyle['prefix']
suffix = astyle['suffix']
force_horiz = astyle['force_horiz']
break_long = astyle['break_long']
body = _clean_newlines(body)
tmp_rows = [_wrap_row(row, max_col, break_long)
for row in body.split('\n')]
body_rows = []
for rows in tmp_rows:
if rows:
body_rows += rows
else:
body_rows.append('')
if not ''.join(body_rows).strip():
# Concrete empty body
body_rows = []
title = _clean_newlines(title)
tmp_rows = [_wrap_row(row, max_col, break_long)
for row in title.split('\n')]
title_rows = []
for rows in tmp_rows:
if rows:
title_rows += rows
else:
title_rows.append('')
if not ''.join(title_rows).strip():
# Concrete empty title
title_rows = []
max_col = max([len(row) for row in body_rows + title_rows] + [min_col])
mid_top_border_len = max_col + \
len(border[3]) + len(border[4]) - len(border[0]) - len(border[2])
mid_bottom_border_len = max_col + \
len(border[3]) + len(border[4]) - len(border[5]) - len(border[7])
top_border = border[0] + \
(border[1] * mid_top_border_len)[:mid_top_border_len] + border[2]
bottom_border = border[5] + \
(border[6] * mid_bottom_border_len)[:mid_bottom_border_len] + \
border[7]
if isinstance(horiz_sep, tuple) and len(horiz_sep) == 3:
horiz_line = horiz_sep[0] + \
(horiz_sep[1] * (max_col + 2))[:(max_col + 2)] + horiz_sep[2]
else:
horiz_line = border[3] + (horiz_sep * max_col)[:max_col] + border[4]
title_rows = [tab_str + border[3] + row +
' ' * (max_col - len(row)) +
border[4] for row in title_rows]
body_rows = [tab_str + border[3] + row +
' ' * (max_col - len(row)) + border[4] for row in body_rows]
ret = []
if top_border:
ret += [tab_str + top_border]
ret += title_rows
if title_rows or force_horiz:
ret += [tab_str + horiz_line]
ret += body_rows
if bottom_border:
ret += [tab_str + bottom_border]
return (prefix + '\n'.join(ret) + suffix).encode('utf-8')
|
r"""Return a nicely formatted text box.
e.g.
******************
** title **
**--------------**
** body **
******************
Indentation and newline are respected.
:param body: the main text
:param title: an optional title
:param style: the name of one of the style in CFG_WRAP_STYLES. By default
the double_star style is used.
You can further tune the desired style by setting various optional
parameters:
:param horiz_sep: a string that is repeated in order to produce a
separator row between the title and the body (if needed)
or a tuple of three characters in the form (l, c, r)
:param max_col: the maximum number of coulmns used by the box
(including indentation)
:param min_col: the symmetrical minimum number of columns
:param tab_str: a string to represent indentation
:param tab_num: the number of leveles of indentations
:param border: a tuple of 8 element in the form
(tl, t, tr, l, r, bl, b, br) of strings that represent the
different corners and sides of the box
:param prefix: a prefix string added before the box
:param suffix: a suffix string added after the box
:param break_long: wethever to break long words in order to respect
max_col
:param force_horiz: True in order to print the horizontal line even when
there is no title
e.g.:
print wrap_text_in_a_box(title='prova',
body=' 123 prova.\n Vediamo come si indenta',
horiz_sep='-', style='no_border', max_col=20, tab_num=1)
prova
----------------
123 prova.
Vediamo come
si indenta
|
def x_forwarded_for(self):
"""X-Forwarded-For header value.
This is the amended header so that it contains the previous IP address
in the forwarding change.
"""
ip = self._request.META.get('REMOTE_ADDR')
current_xff = self.headers.get('X-Forwarded-For')
return '%s, %s' % (current_xff, ip) if current_xff else ip
|
X-Forwarded-For header value.
This is the amended header so that it contains the previous IP address
in the forwarding change.
|
def get_client_class(self, client_class_name):
"""Returns a specific client class details from CPNR server."""
request_url = self._build_url(['ClientClass', client_class_name])
return self._do_request('GET', request_url)
|
Returns a specific client class details from CPNR server.
|
def get_template(template_dict, parameter_overrides=None):
"""
Given a SAM template dictionary, return a cleaned copy of the template where SAM plugins have been run
and parameter values have been substituted.
Parameters
----------
template_dict : dict
unprocessed SAM template dictionary
parameter_overrides: dict
Optional dictionary of values for template parameters
Returns
-------
dict
Processed SAM template
"""
template_dict = template_dict or {}
if template_dict:
template_dict = SamTranslatorWrapper(template_dict).run_plugins()
template_dict = SamBaseProvider._resolve_parameters(template_dict, parameter_overrides)
ResourceMetadataNormalizer.normalize(template_dict)
return template_dict
|
Given a SAM template dictionary, return a cleaned copy of the template where SAM plugins have been run
and parameter values have been substituted.
Parameters
----------
template_dict : dict
unprocessed SAM template dictionary
parameter_overrides: dict
Optional dictionary of values for template parameters
Returns
-------
dict
Processed SAM template
|
def _get_username(self, username=None, use_config=True, config_filename=None):
"""Determine the username
If a username is given, this name is used. Otherwise the configuration
file will be consulted if `use_config` is set to True. The user is asked
for the username if the username is not available. Then the username is
stored in the configuration file.
:param username: Username (used directly if given)
:type username: ``str``
:param use_config: Whether to read username from configuration file
:type use_config: ``bool``
:param config_filename: Path to the configuration file
:type config_filename: ``str``
"""
if not username and use_config:
if self._config is None:
self._read_config(config_filename)
username = self._config.get("credentials", "username", fallback=None)
if not username:
username = input("Please enter your username: ").strip()
while not username:
username = input("No username specified. Please enter your username: ").strip()
if 'credendials' not in self._config:
self._config.add_section('credentials')
self._config.set("credentials", "username", username)
self._save_config()
return username
|
Determine the username
If a username is given, this name is used. Otherwise the configuration
file will be consulted if `use_config` is set to True. The user is asked
for the username if the username is not available. Then the username is
stored in the configuration file.
:param username: Username (used directly if given)
:type username: ``str``
:param use_config: Whether to read username from configuration file
:type use_config: ``bool``
:param config_filename: Path to the configuration file
:type config_filename: ``str``
|
def IsOutOfLineMethodDefinition(clean_lines, linenum):
"""Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
return False
|
Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
|
def insert(self, var, value, index=None):
"""Insert at the index.
If the index is not provided appends to the end of the list.
"""
current = self.__get(var)
if not isinstance(current, list):
raise KeyError("%s: is not a list" % var)
if index is None:
current.append(value)
else:
current.insert(index, value)
if self.auto_save:
self.save()
|
Insert at the index.
If the index is not provided appends to the end of the list.
|
def datafind_connection(server=None):
""" Return a connection to the datafind server
Parameters
-----------
server : {SERVER:PORT, string}, optional
A string representation of the server and port.
The port may be ommitted.
Returns
--------
connection
The open connection to the datafind server.
"""
if server:
datafind_server = server
else:
# Get the server name from the environment
if 'LIGO_DATAFIND_SERVER' in os.environ:
datafind_server = os.environ["LIGO_DATAFIND_SERVER"]
else:
err = "Trying to obtain the ligo datafind server url from "
err += "the environment, ${LIGO_DATAFIND_SERVER}, but that "
err += "variable is not populated."
raise ValueError(err)
# verify authentication options
if not datafind_server.endswith("80"):
cert_file, key_file = glue.datafind.find_credential()
else:
cert_file, key_file = None, None
# Is a port specified in the server URL
dfs_fields = datafind_server.split(':', 1)
server = dfs_fields[0]
port = int(dfs_fields[1]) if len(dfs_fields) == 2 else None
# Open connection to the datafind server
if cert_file and key_file:
connection = glue.datafind.GWDataFindHTTPSConnection(
host=server, port=port, cert_file=cert_file, key_file=key_file)
else:
connection = glue.datafind.GWDataFindHTTPConnection(
host=server, port=port)
return connection
|
Return a connection to the datafind server
Parameters
-----------
server : {SERVER:PORT, string}, optional
A string representation of the server and port.
The port may be ommitted.
Returns
--------
connection
The open connection to the datafind server.
|
def get_source(self, environment, template):
'''
Salt-specific loader to find imported jinja files.
Jinja imports will be interpreted as originating from the top
of each of the directories in the searchpath when the template
name does not begin with './' or '../'. When a template name
begins with './' or '../' then the import will be relative to
the importing file.
'''
# FIXME: somewhere do seprataor replacement: '\\' => '/'
_template = template
if template.split('/', 1)[0] in ('..', '.'):
is_relative = True
else:
is_relative = False
# checks for relative '..' paths that step-out of file_roots
if is_relative:
# Starts with a relative path indicator
if not environment or 'tpldir' not in environment.globals:
log.warning(
'Relative path "%s" cannot be resolved without an environment',
template
)
raise TemplateNotFound
base_path = environment.globals['tpldir']
_template = os.path.normpath('/'.join((base_path, _template)))
if _template.split('/', 1)[0] == '..':
log.warning(
'Discarded template path "%s": attempts to'
' ascend outside of salt://', template
)
raise TemplateNotFound(template)
self.check_cache(_template)
if environment and template:
tpldir = os.path.dirname(_template).replace('\\', '/')
tplfile = _template
if is_relative:
tpldir = environment.globals.get('tpldir', tpldir)
tplfile = template
tpldata = {
'tplfile': tplfile,
'tpldir': '.' if tpldir == '' else tpldir,
'tpldot': tpldir.replace('/', '.'),
}
environment.globals.update(tpldata)
# pylint: disable=cell-var-from-loop
for spath in self.searchpath:
filepath = os.path.join(spath, _template)
try:
with salt.utils.files.fopen(filepath, 'rb') as ifile:
contents = ifile.read().decode(self.encoding)
mtime = os.path.getmtime(filepath)
def uptodate():
try:
return os.path.getmtime(filepath) == mtime
except OSError:
return False
return contents, filepath, uptodate
except IOError:
# there is no file under current path
continue
# pylint: enable=cell-var-from-loop
# there is no template file within searchpaths
raise TemplateNotFound(template)
|
Salt-specific loader to find imported jinja files.
Jinja imports will be interpreted as originating from the top
of each of the directories in the searchpath when the template
name does not begin with './' or '../'. When a template name
begins with './' or '../' then the import will be relative to
the importing file.
|
def add_section(self, section):
"""A block section of code to be used as substitutions
:param section: A block section of code to be used as substitutions
:type section: Section
"""
self._sections = self._ensure_append(section, self._sections)
|
A block section of code to be used as substitutions
:param section: A block section of code to be used as substitutions
:type section: Section
|
def teetsv(table, source=None, encoding=None, errors='strict', write_header=True,
**csvargs):
"""
Convenience function, as :func:`petl.io.csv.teecsv` but with different
default dialect (tab delimited).
"""
csvargs.setdefault('dialect', 'excel-tab')
return teecsv(table, source=source, encoding=encoding, errors=errors,
write_header=write_header, **csvargs)
|
Convenience function, as :func:`petl.io.csv.teecsv` but with different
default dialect (tab delimited).
|
def fundrefxml2json(self, node):
"""Convert a FundRef 'skos:Concept' node into JSON."""
doi = FundRefDOIResolver.strip_doi_host(self.get_attrib(node,
'rdf:about'))
oaf_id = FundRefDOIResolver().resolve_by_doi(
"http://dx.doi.org/" + doi)
name = node.find('./skosxl:prefLabel/skosxl:Label/skosxl:literalForm',
namespaces=self.namespaces).text
# Extract acronyms
acronyms = []
for n in node.findall('./skosxl:altLabel/skosxl:Label',
namespaces=self.namespaces):
usagenode = n.find('./fref:usageFlag', namespaces=self.namespaces)
if usagenode is not None:
if self.get_attrib(usagenode, 'rdf:resource') == \
('http://data.crossref.org/fundingdata'
'/vocabulary/abbrevName'):
label = n.find('./skosxl:literalForm',
namespaces=self.namespaces)
if label is not None:
acronyms.append(label.text)
parent_node = node.find('./skos:broader', namespaces=self.namespaces)
if parent_node is None:
parent = {}
else:
parent = {
"$ref": self.get_attrib(parent_node, 'rdf:resource'),
}
country_elem = node.find('./svf:country', namespaces=self.namespaces)
country_url = self.get_attrib(country_elem, 'rdf:resource')
country_code = self.cc_resolver.cc_from_url(country_url)
type_ = node.find('./svf:fundingBodyType',
namespaces=self.namespaces).text
subtype = node.find('./svf:fundingBodySubType',
namespaces=self.namespaces).text
country_elem = node.find('./svf:country', namespaces=self.namespaces)
modified_elem = node.find('./dct:modified', namespaces=self.namespaces)
created_elem = node.find('./dct:created', namespaces=self.namespaces)
json_dict = {
'$schema': self.schema_formatter.schema_url,
'doi': doi,
'identifiers': {
'oaf': oaf_id,
},
'name': name,
'acronyms': acronyms,
'parent': parent,
'country': country_code,
'type': type_,
'subtype': subtype.lower(),
'remote_created': (created_elem.text if created_elem is not None
else None),
'remote_modified': (modified_elem.text if modified_elem is not None
else None),
}
return json_dict
|
Convert a FundRef 'skos:Concept' node into JSON.
|
def _summarize_o_mutation_type(model):
"""
This function create the actual mutation io summary corresponding to the model
"""
from nautilus.api.util import summarize_mutation_io
# compute the appropriate name for the object
object_type_name = get_model_string(model)
# return a mutation io object
return summarize_mutation_io(
name=object_type_name,
type=_summarize_object_type(model),
required=False
)
|
This function create the actual mutation io summary corresponding to the model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.