code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def register_view(self, view):
"""This method is called by the view, that calls it when it is
ready to register itself. Here we connect the 'pressed' signal
of the button with a controller's method. Signal 'destroy'
for the main window is handled as well."""
# connects the signals:
self.view['main_window'].connect('destroy', gtk.main_quit)
# initializes the text of label:
self.view.set_text("%d" % self.model.counter)
return
|
This method is called by the view, that calls it when it is
ready to register itself. Here we connect the 'pressed' signal
of the button with a controller's method. Signal 'destroy'
for the main window is handled as well.
|
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except: # pylint:disable=bare-except
stream = BufferedStream(stream)
return stream
|
Produces a file object from source.
source can be either a file object, local filename or a string.
|
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# get original values
hslab = 50 # See info in GMPEt_Inslab_med.dat
rjb, rrup = utils.get_equivalent_distance_inslab(rup.mag, dists.repi,
hslab)
dists.rjb = rjb
dists.rrup = rrup
mean, stddevs = super().get_mean_and_stddevs(sites, rup, dists, imt,
stddev_types)
cff = self.SITE_COEFFS[imt]
mean_adj = np.log(np.exp(mean) * 10**cff['mf'])
stddevs = [np.ones(len(dists.rrup))*get_sigma(imt)]
return mean_adj, stddevs
|
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
|
def sequence_matcher_similarity(state_a, state_b):
"""
The `difflib.SequenceMatcher` ratio between the state addresses in the history of the path.
:param state_a: The first state to compare
:param state_b: The second state to compare
"""
addrs_a = tuple(state_a.history.bbl_addrs)
addrs_b = tuple(state_b.history.bbl_addrs)
return SequenceMatcher(a=addrs_a, b=addrs_b).ratio()
|
The `difflib.SequenceMatcher` ratio between the state addresses in the history of the path.
:param state_a: The first state to compare
:param state_b: The second state to compare
|
def _get(self, target, alias):
"""
Internal method to get a specific alias.
"""
if target not in self._aliases:
return
return self._aliases[target].get(alias)
|
Internal method to get a specific alias.
|
def find_block(context, *names):
'''
Find the first matching block in the current block_context
'''
block_set = context.render_context[BLOCK_CONTEXT_KEY]
for name in names:
block = block_set.get_block(name)
if block is not None:
return block
raise template.TemplateSyntaxError('No widget found for: %r' % (names,))
|
Find the first matching block in the current block_context
|
def request(self, method, url, headers=None, raise_exception=True, **kwargs):
"""Main method for routing HTTP requests to the configured Vault base_uri.
:param method: HTTP method to use with the request. E.g., GET, POST, etc.
:type method: str
:param url: Partial URL path to send the request to. This will be joined to the end of the instance's base_uri
attribute.
:type url: str | unicode
:param headers: Additional headers to include with the request.
:type headers: dict
:param raise_exception: If True, raise an exception via utils.raise_for_error(). Set this parameter to False to
bypass this functionality.
:type raise_exception: bool
:param kwargs: Additional keyword arguments to include in the requests call.
:type kwargs: dict
:return: The response of the request.
:rtype: requests.Response
"""
if '//' in url:
# Vault CLI treats a double forward slash ('//') as a single forward slash for a given path.
# To avoid issues with the requests module's redirection logic, we perform the same translation here.
logger.warning('Replacing double-slashes ("//") in path with single slash ("/") to avoid Vault redirect response.')
url = url.replace('//', '/')
url = self.urljoin(self.base_uri, url)
if not headers:
headers = {}
if self.token:
headers['X-Vault-Token'] = self.token
if self.namespace:
headers['X-Vault-Namespace'] = self.namespace
wrap_ttl = kwargs.pop('wrap_ttl', None)
if wrap_ttl:
headers['X-Vault-Wrap-TTL'] = str(wrap_ttl)
_kwargs = self._kwargs.copy()
_kwargs.update(kwargs)
response = self.session.request(
method=method,
url=url,
headers=headers,
allow_redirects=self.allow_redirects,
**_kwargs
)
if raise_exception and 400 <= response.status_code < 600:
text = errors = None
if response.headers.get('Content-Type') == 'application/json':
errors = response.json().get('errors')
if errors is None:
text = response.text
utils.raise_for_error(response.status_code, text, errors=errors)
return response
|
Main method for routing HTTP requests to the configured Vault base_uri.
:param method: HTTP method to use with the request. E.g., GET, POST, etc.
:type method: str
:param url: Partial URL path to send the request to. This will be joined to the end of the instance's base_uri
attribute.
:type url: str | unicode
:param headers: Additional headers to include with the request.
:type headers: dict
:param raise_exception: If True, raise an exception via utils.raise_for_error(). Set this parameter to False to
bypass this functionality.
:type raise_exception: bool
:param kwargs: Additional keyword arguments to include in the requests call.
:type kwargs: dict
:return: The response of the request.
:rtype: requests.Response
|
def make_openid_request(arq, keys, issuer, request_object_signing_alg, recv):
"""
Construct the JWT to be passed by value (the request parameter) or by
reference (request_uri).
The request will be signed
:param arq: The Authorization request
:param keys: Keys to use for signing/encrypting. A KeyJar instance
:param issuer: Who is signing this JSON Web Token
:param request_object_signing_alg: Which signing algorithm to use
:param recv: The intended receiver of the request
:return: JWT encoded OpenID request
"""
_jwt = JWT(key_jar=keys, iss=issuer, sign_alg=request_object_signing_alg)
return _jwt.pack(arq.to_dict(), owner=issuer, recv=recv)
|
Construct the JWT to be passed by value (the request parameter) or by
reference (request_uri).
The request will be signed
:param arq: The Authorization request
:param keys: Keys to use for signing/encrypting. A KeyJar instance
:param issuer: Who is signing this JSON Web Token
:param request_object_signing_alg: Which signing algorithm to use
:param recv: The intended receiver of the request
:return: JWT encoded OpenID request
|
def remove_peer(self, peer):
"""
remove one or multiple peers from PEERS variable
:param peer(list or string):
"""
if type(peer) == list:
for x in peer:
check_url(x)
for i in self.PEERS:
if x in i:
self.PEERS.remove(i)
elif type(peer) == str:
check_url(peer)
for i in self.PEERS:
if peer == i:
self.PEERS.remove(i)
else:
raise ValueError('peer paramater did not pass url validation')
|
remove one or multiple peers from PEERS variable
:param peer(list or string):
|
def dynamize_range_key_condition(self, range_key_condition):
"""
Convert a layer2 range_key_condition parameter into the
structure required by Layer1.
"""
d = None
if range_key_condition:
d = {}
for range_value in range_key_condition:
range_condition = range_key_condition[range_value]
if range_condition == 'BETWEEN':
if isinstance(range_value, tuple):
avl = [self.dynamize_value(v) for v in range_value]
else:
msg = 'BETWEEN condition requires a tuple value'
raise TypeError(msg)
elif isinstance(range_value, tuple):
msg = 'Tuple can only be supplied with BETWEEN condition'
raise TypeError(msg)
else:
avl = [self.dynamize_value(range_value)]
d = {'AttributeValueList': avl,
'ComparisonOperator': range_condition}
return d
|
Convert a layer2 range_key_condition parameter into the
structure required by Layer1.
|
def _log_default(self):
"""Start logging for this application.
The default is to log to stdout using a StreaHandler. The log level
starts at loggin.WARN, but this can be adjusted by setting the
``log_level`` attribute.
"""
log = logging.getLogger(self.__class__.__name__)
log.setLevel(self.log_level)
if sys.executable.endswith('pythonw.exe'):
# this should really go to a file, but file-logging is only
# hooked up in parallel applications
_log_handler = logging.StreamHandler(open(os.devnull, 'w'))
else:
_log_handler = logging.StreamHandler()
_log_formatter = logging.Formatter(self.log_format)
_log_handler.setFormatter(_log_formatter)
log.addHandler(_log_handler)
return log
|
Start logging for this application.
The default is to log to stdout using a StreaHandler. The log level
starts at loggin.WARN, but this can be adjusted by setting the
``log_level`` attribute.
|
def end_of_history(self, current): # (M->)
u'''Move to the end of the input history, i.e., the line currently
being entered.'''
self.history_cursor = len(self.history)
current.set_line(self.history[-1].get_line_text())
|
u'''Move to the end of the input history, i.e., the line currently
being entered.
|
def modifiedLaplacian(img):
''''LAPM' algorithm (Nayar89)'''
M = np.array([-1, 2, -1])
G = cv2.getGaussianKernel(ksize=3, sigma=-1)
Lx = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=M, kernelY=G)
Ly = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=G, kernelY=M)
FM = np.abs(Lx) + np.abs(Ly)
return cv2.mean(FM)[0]
|
LAPM' algorithm (Nayar89)
|
def _doActualSave(self, fname, comment, set_ro=False, overwriteRO=False):
""" Override this so we can handle case of file not writable, as
well as to make our _lastSavedState copy. """
self.debug('Saving, file name given: '+str(fname)+', set_ro: '+\
str(set_ro)+', overwriteRO: '+str(overwriteRO))
cantWrite = False
inInstArea = False
if fname in (None, ''): fname = self._taskParsObj.getFilename()
# now do some final checks then save
try:
if _isInstalled(fname): # check: may be installed but not read-only
inInstArea = cantWrite = True
else:
# in case of save-as, allow overwrite of read-only file
if overwriteRO and os.path.exists(fname):
setWritePrivs(fname, True, True) # try make writable
# do the save
rv=self._taskParsObj.saveParList(filename=fname,comment=comment)
except IOError:
cantWrite = True
# User does not have privs to write to this file. Get name of local
# choice and try to use that.
if cantWrite:
fname = self._taskParsObj.getDefaultSaveFilename()
# Tell them the context is changing, and where we are saving
msg = 'Read-only config file for task "'
if inInstArea:
msg = 'Installed config file for task "'
msg += self._taskParsObj.getName()+'" is not to be overwritten.'+\
' Values will be saved to: \n\n\t"'+fname+'".'
showwarning(message=msg, title="Will not overwrite!")
# Try saving to their local copy
rv=self._taskParsObj.saveParList(filename=fname, comment=comment)
# Treat like a save-as (update title for ALL save ops)
self._saveAsPostSave_Hook(fname)
# Limit write privs if requested (only if not in the rc dir)
if set_ro and os.path.dirname(os.path.abspath(fname)) != \
os.path.abspath(self._rcDir):
cfgpars.checkSetReadOnly(fname)
# Before returning, make a copy so we know what was last saved.
# The dict() method returns a deep-copy dict of the keyvals.
self._lastSavedState = self._taskParsObj.dict()
return rv
|
Override this so we can handle case of file not writable, as
well as to make our _lastSavedState copy.
|
def search_filter(entities, filters):
"""Read all ``entities`` and locally filter them.
This method can be used like so::
entities = EntitySearchMixin(entities, {'name': 'foo'})
In this example, only entities where ``entity.name == 'foo'`` holds
true are returned. An arbitrary number of field names and values may be
provided as filters.
.. NOTE:: This method calls :meth:`EntityReadMixin.read`. As a result,
this method only works when called on a class that also inherits
from :class:`EntityReadMixin`.
:param entities: A list of :class:`Entity` objects. All list items
should be of the same type.
:param filters: A dict in the form ``{field_name: field_value, …}``.
:raises nailgun.entity_mixins.NoSuchFieldError: If any of the fields
named in ``filters`` do not exist on the entities being filtered.
:raises: ``NotImplementedError`` If any of the fields named in
``filters`` are a :class:`nailgun.entity_fields.OneToOneField` or
:class:`nailgun.entity_fields.OneToManyField`.
"""
# Check to make sure all arguments are sane.
if len(entities) == 0:
return entities
fields = entities[0].get_fields() # assume all entities are identical
if not set(filters).issubset(fields):
raise NoSuchFieldError(
'Valid filters are {0}, but received {1} instead.'
.format(fields.keys(), filters.keys())
)
for field_name in filters:
if isinstance(fields[field_name], (OneToOneField, OneToManyField)):
raise NotImplementedError(
'Search results cannot (yet?) be locally filtered by '
'`OneToOneField`s and `OneToManyField`s. {0} is a {1}.'
.format(field_name, type(fields[field_name]).__name__)
)
# The arguments are sane. Filter away!
filtered = [entity.read() for entity in entities] # don't alter inputs
for field_name, field_value in filters.items():
filtered = [
entity for entity in filtered
if getattr(entity, field_name) == field_value
]
return filtered
|
Read all ``entities`` and locally filter them.
This method can be used like so::
entities = EntitySearchMixin(entities, {'name': 'foo'})
In this example, only entities where ``entity.name == 'foo'`` holds
true are returned. An arbitrary number of field names and values may be
provided as filters.
.. NOTE:: This method calls :meth:`EntityReadMixin.read`. As a result,
this method only works when called on a class that also inherits
from :class:`EntityReadMixin`.
:param entities: A list of :class:`Entity` objects. All list items
should be of the same type.
:param filters: A dict in the form ``{field_name: field_value, …}``.
:raises nailgun.entity_mixins.NoSuchFieldError: If any of the fields
named in ``filters`` do not exist on the entities being filtered.
:raises: ``NotImplementedError`` If any of the fields named in
``filters`` are a :class:`nailgun.entity_fields.OneToOneField` or
:class:`nailgun.entity_fields.OneToManyField`.
|
def shareproject(self, project_id, group_id, group_access):
"""
Allow to share project with group.
:param project_id: The ID of a project
:param group_id: The ID of a group
:param group_access: Level of permissions for sharing
:return: True is success
"""
data = {'id': project_id, 'group_id': group_id, 'group_access': group_access}
request = requests.post(
'{0}/{1}/share'.format(self.projects_url, project_id),
headers=self.headers, data=data, verify=self.verify_ssl)
return request.status_code == 201
|
Allow to share project with group.
:param project_id: The ID of a project
:param group_id: The ID of a group
:param group_access: Level of permissions for sharing
:return: True is success
|
def isrchi(value, ndim, array):
"""
Search for a given value within an integer array. Return
the index of the first matching array entry, or -1 if the key
value was not found.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/isrchi_c.html
:param value: Key value to be found in array.
:type value: int
:param ndim: Dimension of array.
:type ndim: int
:param array: Integer array to search.
:type array: Array of ints
:return:
The index of the first matching array element or -1
if the value is not found.
:rtype: int
"""
value = ctypes.c_int(value)
ndim = ctypes.c_int(ndim)
array = stypes.toIntVector(array)
return libspice.isrchi_c(value, ndim, array)
|
Search for a given value within an integer array. Return
the index of the first matching array entry, or -1 if the key
value was not found.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/isrchi_c.html
:param value: Key value to be found in array.
:type value: int
:param ndim: Dimension of array.
:type ndim: int
:param array: Integer array to search.
:type array: Array of ints
:return:
The index of the first matching array element or -1
if the value is not found.
:rtype: int
|
def start_after(self, document_fields):
"""Start query results after a particular document value.
The result set will **exclude** the document specified by
``document_fields``.
If the current query already has specified a start cursor -- either
via this method or
:meth:`~.firestore_v1beta1.query.Query.start_at` -- this will
overwrite it.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"start after" cursor.
"""
return self._cursor_helper(document_fields, before=False, start=True)
|
Start query results after a particular document value.
The result set will **exclude** the document specified by
``document_fields``.
If the current query already has specified a start cursor -- either
via this method or
:meth:`~.firestore_v1beta1.query.Query.start_at` -- this will
overwrite it.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"start after" cursor.
|
def addTrack(self, track):
"""
Add a track to be recorded.
:param: track: An :class:`aiortc.AudioStreamTrack` or :class:`aiortc.VideoStreamTrack`.
"""
if track.kind == 'audio':
if self.__container.format.name == 'wav':
codec_name = 'pcm_s16le'
elif self.__container.format.name == 'mp3':
codec_name = 'mp3'
else:
codec_name = 'aac'
stream = self.__container.add_stream(codec_name)
else:
if self.__container.format.name == 'image2':
stream = self.__container.add_stream('png', rate=30)
stream.pix_fmt = 'rgb24'
else:
stream = self.__container.add_stream('libx264', rate=30)
stream.pix_fmt = 'yuv420p'
self.__tracks[track] = MediaRecorderContext(stream)
|
Add a track to be recorded.
:param: track: An :class:`aiortc.AudioStreamTrack` or :class:`aiortc.VideoStreamTrack`.
|
def post(self, request, *args, **kwargs):
""" Validates subscription data before creating Subscription message
"""
# Ensure that we check for the 'data' key in the request object before
# attempting to reference it
if "data" in request.data:
# This is a workaround for JSONField not liking blank/null refs
if "metadata" not in request.data["data"]:
request.data["data"]["metadata"] = {}
if "initial_sequence_number" not in request.data["data"]:
request.data["data"]["initial_sequence_number"] = request.data[
"data"
].get("next_sequence_number")
subscription = SubscriptionSerializer(data=request.data["data"])
if subscription.is_valid():
subscription.save()
# Return
status = 201
accepted = {"accepted": True}
return Response(accepted, status=status)
else:
status = 400
return Response(subscription.errors, status=status)
else:
status = 400
message = {"data": ["This field is required."]}
return Response(message, status=status)
|
Validates subscription data before creating Subscription message
|
def get_slab(self, shift=0, tol=0.1, energy=None):
"""
This method takes in shift value for the c lattice direction and
generates a slab based on the given shift. You should rarely use this
method. Instead, it is used by other generation algorithms to obtain
all slabs.
Arg:
shift (float): A shift value in Angstrom that determines how much a
slab should be shifted.
tol (float): Tolerance to determine primitive cell.
energy (float): An energy to assign to the slab.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
h = self._proj_height
p = h/self.parent.lattice.d_hkl(self.miller_index)
if self.in_unit_planes:
nlayers_slab = int(math.ceil(self.min_slab_size / p))
nlayers_vac = int(math.ceil(self.min_vac_size / p))
else:
nlayers_slab = int(math.ceil(self.min_slab_size / h))
nlayers_vac = int(math.ceil(self.min_vac_size / h))
nlayers = nlayers_slab + nlayers_vac
species = self.oriented_unit_cell.species_and_occu
props = self.oriented_unit_cell.site_properties
props = {k: v * nlayers_slab for k, v in props.items()}
frac_coords = self.oriented_unit_cell.frac_coords
frac_coords = np.array(frac_coords) + np.array([0, 0, -shift])[None, :]
frac_coords -= np.floor(frac_coords)
a, b, c = self.oriented_unit_cell.lattice.matrix
new_lattice = [a, b, nlayers * c]
frac_coords[:, 2] = frac_coords[:, 2] / nlayers
all_coords = []
for i in range(nlayers_slab):
fcoords = frac_coords.copy()
fcoords[:, 2] += i / nlayers
all_coords.extend(fcoords)
slab = Structure(new_lattice, species * nlayers_slab, all_coords,
site_properties=props)
scale_factor = self.slab_scale_factor
# Whether or not to orthogonalize the structure
if self.lll_reduce:
lll_slab = slab.copy(sanitize=True)
mapping = lll_slab.lattice.find_mapping(slab.lattice)
scale_factor = np.dot(mapping[2], scale_factor)
slab = lll_slab
# Whether or not to center the slab layer around the vacuum
if self.center_slab:
avg_c = np.average([c[2] for c in slab.frac_coords])
slab.translate_sites(list(range(len(slab))), [0, 0, 0.5 - avg_c])
if self.primitive:
prim = slab.get_primitive_structure(tolerance=tol)
if energy is not None:
energy = prim.volume / slab.volume * energy
slab = prim
# Reorient the lattice to get the correct reduced cell
ouc = self.oriented_unit_cell.copy()
if self.primitive:
#find a reduced ouc
slab_l = slab.lattice
ouc = ouc.get_primitive_structure(constrain_latt={"a": slab_l.a, "b": slab_l.b,
"alpha": slab_l.alpha,
"beta": slab_l.beta,
"gamma": slab_l.gamma})
return Slab(slab.lattice, slab.species_and_occu,
slab.frac_coords, self.miller_index,
ouc, shift, scale_factor, energy=energy,
site_properties=slab.site_properties,
reorient_lattice=self.reorient_lattice)
|
This method takes in shift value for the c lattice direction and
generates a slab based on the given shift. You should rarely use this
method. Instead, it is used by other generation algorithms to obtain
all slabs.
Arg:
shift (float): A shift value in Angstrom that determines how much a
slab should be shifted.
tol (float): Tolerance to determine primitive cell.
energy (float): An energy to assign to the slab.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
|
def find_standard_sakefile(settings):
"""Returns the filename of the appropriate sakefile"""
error = settings["error"]
if settings["customsake"]:
custom = settings["customsake"]
if not os.path.isfile(custom):
error("Specified sakefile '{}' doesn't exist", custom)
sys.exit(1)
return custom
# no custom specified, going over defaults in order
for name in ["Sakefile", "Sakefile.yaml", "Sakefile.yml"]:
if os.path.isfile(name):
return name
error("Error: there is no Sakefile to read")
sys.exit(1)
|
Returns the filename of the appropriate sakefile
|
def paintEvent(self, event):
"""Fills the panel background using QPalette."""
if self.isVisible() and self.position != self.Position.FLOATING:
# fill background
self._background_brush = QBrush(QColor(
self.editor.sideareas_color))
self._foreground_pen = QPen(QColor(
self.palette().windowText().color()))
painter = QPainter(self)
painter.fillRect(event.rect(), self._background_brush)
|
Fills the panel background using QPalette.
|
def get_urls(self, **kwargs):
"""
Ensure the correct host by injecting the current site.
"""
kwargs["site"] = Site.objects.get(id=current_site_id())
return super(DisplayableSitemap, self).get_urls(**kwargs)
|
Ensure the correct host by injecting the current site.
|
def mag_field_motors(RAW_IMU, SENSOR_OFFSETS, ofs, SERVO_OUTPUT_RAW, motor_ofs):
'''calculate magnetic field strength from raw magnetometer'''
mag_x = RAW_IMU.xmag
mag_y = RAW_IMU.ymag
mag_z = RAW_IMU.zmag
ofs = get_motor_offsets(SERVO_OUTPUT_RAW, ofs, motor_ofs)
if SENSOR_OFFSETS is not None and ofs is not None:
mag_x += ofs[0] - SENSOR_OFFSETS.mag_ofs_x
mag_y += ofs[1] - SENSOR_OFFSETS.mag_ofs_y
mag_z += ofs[2] - SENSOR_OFFSETS.mag_ofs_z
return sqrt(mag_x**2 + mag_y**2 + mag_z**2)
|
calculate magnetic field strength from raw magnetometer
|
def bulk_get_or_create(self, data_list):
"""
data_list is the data to get or create
We generate the query and set all the record keys based on passed in queryset
Then we loop over each item in the data_list, which has the keys already! No need to generate them. Should save a lot of time
Use values instead of the whole object, much faster
Args:
data_list:
Returns:
"""
items_to_create = dict()
for record_key, record_config in data_list.items():
if record_key not in items_to_create:
record = self.get_instance(record_key)
if not record:
items_to_create[record_key] = self.model_cls(**record_config)
if items_to_create:
"""
TODO. I think we can optimize this. Switch to values, get the primary id
Query set is just select the model with that ID. Return the model object without running the full queryset again. Should be a lot faster.
"""
self.model_cls.objects.bulk_create(items_to_create.values())
self.set_record_lookup(True)
return self.record_lookup
|
data_list is the data to get or create
We generate the query and set all the record keys based on passed in queryset
Then we loop over each item in the data_list, which has the keys already! No need to generate them. Should save a lot of time
Use values instead of the whole object, much faster
Args:
data_list:
Returns:
|
def parse_column_filter(definition):
"""Parse a `str` of the form 'column>50'
Parameters
----------
definition : `str`
a column filter definition of the form ``<name><operator><threshold>``
or ``<threshold><operator><name><operator><threshold>``, e.g.
``frequency >= 10``, or ``50 < snr < 100``
Returns
-------
filters : `list` of `tuple`
a `list` of filter 3-`tuple`s, where each `tuple` contains the
following elements:
- ``column`` (`str`) - the name of the column on which to operate
- ``operator`` (`callable`) - the operator to call when evaluating
the filter
- ``operand`` (`anything`) - the argument to the operator function
Raises
------
ValueError
if the filter definition cannot be parsed
KeyError
if any parsed operator string cannnot be mapped to a function from
the `operator` module
Notes
-----
Strings that contain non-alphanumeric characters (e.g. hyphen `-`) should
be quoted inside the filter definition, to prevent such characters
being interpreted as operators, e.g. ``channel = X1:TEST`` should always
be passed as ``channel = "X1:TEST"``.
Examples
--------
>>> parse_column_filter("frequency>10")
[('frequency', <function operator.gt>, 10.)]
>>> parse_column_filter("50 < snr < 100")
[('snr', <function operator.gt>, 50.), ('snr', <function operator.lt>, 100.)]
>>> parse_column_filter("channel = "H1:TEST")
[('channel', <function operator.eq>, 'H1:TEST')]
""" # noqa
# parse definition into parts (skipping null tokens)
parts = list(generate_tokens(StringIO(definition.strip()).readline))
while parts[-1][0] in (token.ENDMARKER, token.NEWLINE):
parts = parts[:-1]
# parse simple definition: e.g: snr > 5
if len(parts) == 3:
a, b, c = parts # pylint: disable=invalid-name
if a[0] in [token.NAME, token.STRING]: # string comparison
name = QUOTE_REGEX.sub('', a[1])
oprtr = OPERATORS[b[1]]
value = _float_or_str(c[1])
return [(name, oprtr, value)]
elif b[0] in [token.NAME, token.STRING]:
name = QUOTE_REGEX.sub('', b[1])
oprtr = OPERATORS_INV[b[1]]
value = _float_or_str(a[1])
return [(name, oprtr, value)]
# parse between definition: e.g: 5 < snr < 10
elif len(parts) == 5:
a, b, c, d, e = list(zip(*parts))[1] # pylint: disable=invalid-name
name = QUOTE_REGEX.sub('', c)
return [(name, OPERATORS_INV[b], _float_or_str(a)),
(name, OPERATORS[d], _float_or_str(e))]
raise ValueError("Cannot parse filter definition from %r" % definition)
|
Parse a `str` of the form 'column>50'
Parameters
----------
definition : `str`
a column filter definition of the form ``<name><operator><threshold>``
or ``<threshold><operator><name><operator><threshold>``, e.g.
``frequency >= 10``, or ``50 < snr < 100``
Returns
-------
filters : `list` of `tuple`
a `list` of filter 3-`tuple`s, where each `tuple` contains the
following elements:
- ``column`` (`str`) - the name of the column on which to operate
- ``operator`` (`callable`) - the operator to call when evaluating
the filter
- ``operand`` (`anything`) - the argument to the operator function
Raises
------
ValueError
if the filter definition cannot be parsed
KeyError
if any parsed operator string cannnot be mapped to a function from
the `operator` module
Notes
-----
Strings that contain non-alphanumeric characters (e.g. hyphen `-`) should
be quoted inside the filter definition, to prevent such characters
being interpreted as operators, e.g. ``channel = X1:TEST`` should always
be passed as ``channel = "X1:TEST"``.
Examples
--------
>>> parse_column_filter("frequency>10")
[('frequency', <function operator.gt>, 10.)]
>>> parse_column_filter("50 < snr < 100")
[('snr', <function operator.gt>, 50.), ('snr', <function operator.lt>, 100.)]
>>> parse_column_filter("channel = "H1:TEST")
[('channel', <function operator.eq>, 'H1:TEST')]
|
def set_states(self, left_state, right_state):
"""
Checks that the specified paths stay the same over the next `depth` states.
"""
simgr = self.project.factory.simulation_manager(right_state)
simgr.stash(to_stash='right')
simgr.active.append(left_state)
simgr.stash(to_stash='left')
simgr.stash(to_stash='stashed_left')
simgr.stash(to_stash='stashed_right')
return self.set_simgr(simgr)
|
Checks that the specified paths stay the same over the next `depth` states.
|
def set_token(self, token):
"""Validate and set token
:param token: the token (dict) to set
"""
if not token:
self.token = None
return
expected_keys = ['token_type', 'refresh_token', 'access_token', 'scope', 'expires_in', 'expires_at']
if not isinstance(token, dict) or not set(token) >= set(expected_keys):
raise InvalidUsage("Expected a token dictionary containing the following keys: {0}"
.format(expected_keys))
# Set sanitized token
self.token = dict((k, v) for k, v in token.items() if k in expected_keys)
|
Validate and set token
:param token: the token (dict) to set
|
def mentions_links(uri, s):
""" Turns mentions-like strings into HTML links,
@uri: /uri/ root for the hashtag-like
@s: the #str string you're looking for |@|mentions in
-> #str HTML link |<a href="/uri/mention">mention</a>|
"""
for username, after in mentions_re.findall(s):
_uri = '/' + (uri or "").lstrip("/") + quote(username)
link = '<a href="{}">@{}</a>{}'.format(_uri.lower(), username, after)
s = s.replace('@' + username, link)
return s
|
Turns mentions-like strings into HTML links,
@uri: /uri/ root for the hashtag-like
@s: the #str string you're looking for |@|mentions in
-> #str HTML link |<a href="/uri/mention">mention</a>|
|
def fix_e271(self, result):
"""Fix extraneous whitespace around keywords."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
fixed = fix_whitespace(target,
offset=offset,
replacement=' ')
if fixed == target:
return []
else:
self.source[line_index] = fixed
|
Fix extraneous whitespace around keywords.
|
def build_url(base, seg, query=None):
"""
Create a URL from a list of path segments and an optional dict of query
parameters.
"""
def clean_segment(segment):
"""
Cleans the segment and encodes to UTF-8 if the segment is unicode.
"""
segment = segment.strip('/')
if isinstance(segment, basestring):
segment = segment.encode('utf-8')
return segment
seg = (quote(clean_segment(s)) for s in seg)
if query is None or len(query) == 0:
query_string = ''
else:
query_string = "?" + urlencode(query)
path = '/'.join(seg) + query_string
adjusted_base = base.rstrip('/') + '/'
return urljoin(str(adjusted_base), str(path))
|
Create a URL from a list of path segments and an optional dict of query
parameters.
|
def file_list(self, tgt_env):
'''
Get file list for the target environment using GitPython
'''
files = set()
symlinks = {}
tree = self.get_tree(tgt_env)
if not tree:
# Not found, return empty objects
return files, symlinks
if self.root(tgt_env):
try:
tree = tree / self.root(tgt_env)
except KeyError:
return files, symlinks
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
else:
relpath = lambda path: path
add_mountpoint = lambda path: salt.utils.path.join(
self.mountpoint(tgt_env), path, use_posixpath=True)
for file_blob in tree.traverse():
if not isinstance(file_blob, git.Blob):
continue
file_path = add_mountpoint(relpath(file_blob.path))
files.add(file_path)
if stat.S_ISLNK(file_blob.mode):
stream = six.StringIO()
file_blob.stream_data(stream)
stream.seek(0)
link_tgt = stream.read()
stream.close()
symlinks[file_path] = link_tgt
return files, symlinks
|
Get file list for the target environment using GitPython
|
def upload_file(token, channel_name, file_name):
""" upload file to a channel """
slack = Slacker(token)
slack.files.upload(file_name, channels=channel_name)
|
upload file to a channel
|
def consume(self, key, amount=1, rate=None, capacity=None, **kwargs):
"""Consume an amount for a given key.
Non-default rate/capacity can be given to override Throttler defaults.
Returns:
bool: whether the units could be consumed
"""
bucket = self.get_bucket(key, rate, capacity, **kwargs)
return bucket.consume(amount)
|
Consume an amount for a given key.
Non-default rate/capacity can be given to override Throttler defaults.
Returns:
bool: whether the units could be consumed
|
def unlock(self, password):
""" The password is used to encrypt this masterpassword. To
decrypt the keys stored in the keys database, one must use
BIP38, decrypt the masterpassword from the configuration
store with the user password, and use the decrypted
masterpassword to decrypt the BIP38 encrypted private keys
from the keys storage!
:param str password: Password to use for en-/de-cryption
"""
self.password = password
if self.config_key in self.config and self.config[self.config_key]:
self._decrypt_masterpassword()
else:
self._new_masterpassword(password)
self._save_encrypted_masterpassword()
|
The password is used to encrypt this masterpassword. To
decrypt the keys stored in the keys database, one must use
BIP38, decrypt the masterpassword from the configuration
store with the user password, and use the decrypted
masterpassword to decrypt the BIP38 encrypted private keys
from the keys storage!
:param str password: Password to use for en-/de-cryption
|
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=None):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
ProcessGlobalSuppresions(lines)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if file_extension in GetHeaderExtensions():
CheckForHeaderGuard(filename, clean_lines, error)
for line in range(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# Check that the .cc file has included its header if it exists.
if _IsSourceExtension(file_extension):
CheckHeaderFileIncluded(filename, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
|
Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
|
def has_implementation(self, number, arch, abi_list=()):
"""
Pretty much the intersection of SimLibrary.has_implementation() and SimSyscallLibrary.get().
:param number: The syscall number
:param arch: The architecture being worked with, as either a string name or an archinfo.Arch
:param abi_list: A list of ABI names that could be used
:return: A bool of whether or not an implementation of the syscall is available
"""
name, _, _ = self._canonicalize(number, arch, abi_list)
return super(SimSyscallLibrary, self).has_implementation(name)
|
Pretty much the intersection of SimLibrary.has_implementation() and SimSyscallLibrary.get().
:param number: The syscall number
:param arch: The architecture being worked with, as either a string name or an archinfo.Arch
:param abi_list: A list of ABI names that could be used
:return: A bool of whether or not an implementation of the syscall is available
|
def _Backward3_sat_v_P(P, T, x):
"""Backward equation for region 3 for saturated state, vs=f(P,x)
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
x : integer
Vapor quality, [-]
Returns
-------
v : float
Specific volume, [m³/kg]
Notes
-----
The vapor quality (x) can be 0 (saturated liquid) or 1 (saturated vapour)
"""
if x == 0:
if P < 19.00881189:
region = "c"
elif P < 21.0434:
region = "s"
elif P < 21.9316:
region = "u"
else:
region = "y"
else:
if P < 20.5:
region = "t"
elif P < 21.0434:
region = "r"
elif P < 21.9009:
region = "x"
else:
region = "z"
return _Backward3x_v_PT(T, P, region)
|
Backward equation for region 3 for saturated state, vs=f(P,x)
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
x : integer
Vapor quality, [-]
Returns
-------
v : float
Specific volume, [m³/kg]
Notes
-----
The vapor quality (x) can be 0 (saturated liquid) or 1 (saturated vapour)
|
def batch_query_event_records(
self,
batch_size: int,
filters: List[Tuple[str, Any]] = None,
logical_and: bool = True,
) -> Iterator[List[EventRecord]]:
"""Batch query event records with a given batch size and an optional filter
This is a generator function returning each batch to the caller to work with.
"""
limit = batch_size
offset = 0
result_length = 1
while result_length != 0:
result = self._get_event_records(
limit=limit,
offset=offset,
filters=filters,
logical_and=logical_and,
)
result_length = len(result)
offset += result_length
yield result
|
Batch query event records with a given batch size and an optional filter
This is a generator function returning each batch to the caller to work with.
|
def convert_elementwise_add(net, node, module, builder):
"""Convert an elementwise add layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_names, output_name = _get_input_output_name(net, node, [0, 1])
name = node['name']
builder.add_elementwise(name, input_names, output_name, 'ADD')
|
Convert an elementwise add layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
|
def tickets_create_many(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/tickets#create-many-tickets"
api_path = "/api/v2/tickets/create_many.json"
return self.call(api_path, method="POST", data=data, **kwargs)
|
https://developer.zendesk.com/rest_api/docs/core/tickets#create-many-tickets
|
def exec_function(ast, globals_map):
"""Execute a python code object in the given environment.
Args:
globals_map: Dictionary to use as the globals context.
Returns:
locals_map: Dictionary of locals from the environment after execution.
"""
locals_map = globals_map
exec ast in globals_map, locals_map
return locals_map
|
Execute a python code object in the given environment.
Args:
globals_map: Dictionary to use as the globals context.
Returns:
locals_map: Dictionary of locals from the environment after execution.
|
def get(self, block=True, timeout=None, method='pop'):
"""
If *block* is True, this method blocks until an element can be removed from
the deque with the specified *method*. If *block* is False, the function
will raise #Empty if no elements are available.
# Arguments
block (bool): #True to block and wait until an element becomes available,
#False otherwise.
timeout (number, None): The timeout in seconds to use when waiting for
an element (only with `block=True`).
method (str): The name of the method to use to remove an element from the
queue. Must be either `'pop'` or `'popleft'`.
# Raises
ValueError: If *method* has an invalid value.
Timeout: If the *timeout* is exceeded.
"""
if method not in ('pop', 'popleft'):
raise ValueError('method must be "pop" or "popleft": {0!r}'.format(method))
t_start = time.clock()
while not self:
if not block:
raise self.Empty
if timeout is None:
wait(self)
else:
t_delta = time.clock() - t_start
if t_delta > timeout:
raise Timeout
wait(self, timeout - t_delta)
return getattr(self, method)()
|
If *block* is True, this method blocks until an element can be removed from
the deque with the specified *method*. If *block* is False, the function
will raise #Empty if no elements are available.
# Arguments
block (bool): #True to block and wait until an element becomes available,
#False otherwise.
timeout (number, None): The timeout in seconds to use when waiting for
an element (only with `block=True`).
method (str): The name of the method to use to remove an element from the
queue. Must be either `'pop'` or `'popleft'`.
# Raises
ValueError: If *method* has an invalid value.
Timeout: If the *timeout* is exceeded.
|
def run_pylint():
"""run pylint"""
from pylint.lint import Run
try:
Run(sys.argv[1:])
except KeyboardInterrupt:
sys.exit(1)
|
run pylint
|
def __neighbor_indexes_distance_matrix(self, index_point):
"""!
@brief Return neighbors of the specified object in case of distance matrix.
@param[in] index_point (uint): Index point whose neighbors are should be found.
@return (list) List of indexes of neighbors in line the connectivity radius.
"""
distances = self.__pointer_data[index_point]
return [index_neighbor for index_neighbor in range(len(distances))
if ((distances[index_neighbor] <= self.__eps) and (index_neighbor != index_point))]
|
!
@brief Return neighbors of the specified object in case of distance matrix.
@param[in] index_point (uint): Index point whose neighbors are should be found.
@return (list) List of indexes of neighbors in line the connectivity radius.
|
def _included_frames(frame_list, frame_format):
"""frame_list should be a list of filenames"""
return INCLUDED_FRAMES.format(Nframes=len(frame_list),
frame_dir=os.path.dirname(frame_list[0]),
frame_format=frame_format)
|
frame_list should be a list of filenames
|
def _update_process_resources(self, process, vals):
"""Updates the resources info in :attr:`processes` dictionary.
"""
resources = ["cpus"]
for r in resources:
if not self.processes[process][r]:
try:
self.processes[process][r] = vals[0]["cpus"]
# When the trace column is not present
except KeyError:
pass
|
Updates the resources info in :attr:`processes` dictionary.
|
def publishCommand(self, typeId, deviceId, commandId, msgFormat, data=None, qos=0, on_publish=None):
"""
Publish a command to a device
# Parameters
typeId (string) : The type of the device this command is to be published to
deviceId (string): The id of the device this command is to be published to
command (string) : The name of the command
msgFormat (string) : The format of the command payload
data (dict) : The command data
qos (int) : The equivalent MQTT semantics of quality of service using the same constants (optional, defaults to `0`)
on_publish (function) : A function that will be called when receipt of the publication is confirmed. This has
different implications depending on the qos:
- qos 0 : the client has asynchronously begun to send the event
- qos 1 and 2 : the client has confirmation of delivery from WIoTP
"""
if self._config.isQuickstart():
self.logger.warning("QuickStart applications do not support sending commands")
return False
if not self.connectEvent.wait(timeout=10):
return False
else:
topic = "iot-2/type/%s/id/%s/cmd/%s/fmt/%s" % (typeId, deviceId, commandId, msgFormat)
# Raise an exception if there is no codec for this msgFormat
if self.getMessageCodec(msgFormat) is None:
raise MissingMessageEncoderException(msgFormat)
payload = self.getMessageCodec(msgFormat).encode(data, datetime.now())
result = self.client.publish(topic, payload=payload, qos=qos, retain=False)
if result[0] == paho.MQTT_ERR_SUCCESS:
# Because we are dealing with aync pub/sub model and callbacks it is possible that
# the _onPublish() callback for this mid is called before we obtain the lock to place
# the mid into the _onPublishCallbacks list.
#
# _onPublish knows how to handle a scenario where the mid is not present (no nothing)
# in this scenario we will need to invoke the callback directly here, because at the time
# the callback was invoked the mid was not yet in the list.
with self._messagesLock:
if result[1] in self._onPublishCallbacks:
# paho callback beat this thread so call callback inline now
del self._onPublishCallbacks[result[1]]
if on_publish is not None:
on_publish()
else:
# this thread beat paho callback so set up for call later
self._onPublishCallbacks[result[1]] = on_publish
return True
else:
return False
|
Publish a command to a device
# Parameters
typeId (string) : The type of the device this command is to be published to
deviceId (string): The id of the device this command is to be published to
command (string) : The name of the command
msgFormat (string) : The format of the command payload
data (dict) : The command data
qos (int) : The equivalent MQTT semantics of quality of service using the same constants (optional, defaults to `0`)
on_publish (function) : A function that will be called when receipt of the publication is confirmed. This has
different implications depending on the qos:
- qos 0 : the client has asynchronously begun to send the event
- qos 1 and 2 : the client has confirmation of delivery from WIoTP
|
def ensure_path_exists(dir_path):
"""
Make sure that a path exists
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return True
return False
|
Make sure that a path exists
|
def _peek_job(self, pos):
"""
Return the job currently at position `pos`, but still keep it in the job queue. An IndexError will be raised
if that position does not currently exist in the job list.
:param int pos: Position of the job to get.
:return: The job
"""
if pos < len(self._job_info_queue):
return self._job_info_queue[pos].job
raise IndexError()
|
Return the job currently at position `pos`, but still keep it in the job queue. An IndexError will be raised
if that position does not currently exist in the job list.
:param int pos: Position of the job to get.
:return: The job
|
def translate_key(jsonkey):
"helper for translate_*"
nombre,pkey,field=ujson.loads(jsonkey)
return FieldKey(nombre,tuple(pkey),field)
|
helper for translate_*
|
def crc16_nojit(s, crc=0):
"""CRC16 implementation acording to CCITT standards."""
for ch in bytearray(s): # bytearray's elements are integers in both python 2 and 3
crc = ((crc << 8) & 0xFFFF) ^ _crc16_tab[((crc >> 8) & 0xFF) ^ (ch & 0xFF)]
crc &= 0xFFFF
return crc
|
CRC16 implementation acording to CCITT standards.
|
def get_item_metadata(self, handle):
"""Return dictionary containing all metadata associated with handle.
In other words all the metadata added using the ``add_item_metadata``
method.
:param handle: handle for accessing an item before the dataset is
frozen
:returns: dictionary containing item metadata
"""
metadata = {}
identifier = generate_identifier(handle)
prefix = self.fragments_key_prefix + '{}'.format(identifier)
blob_generator = self._blobservice.list_blobs(
self.uuid,
include='metadata',
prefix=prefix
)
for blob in blob_generator:
metadata_key = blob.name.split('.')[-2]
value_as_string = self.get_text(blob.name)
value = json.loads(value_as_string)
metadata[metadata_key] = value
return metadata
|
Return dictionary containing all metadata associated with handle.
In other words all the metadata added using the ``add_item_metadata``
method.
:param handle: handle for accessing an item before the dataset is
frozen
:returns: dictionary containing item metadata
|
def plotMDS(data, theOrders, theLabels, theColors, theAlphas, theSizes,
theMarkers, options):
"""Plot the MDS data.
:param data: the data to plot (MDS values).
:param theOrders: the order of the populations to plot.
:param theLabels: the names of the populations to plot.
:param theColors: the colors of the populations to plot.
:param theAlphas: the alpha value for the populations to plot.
:param theSizes: the sizes of the markers for each population to plot.
:param theMarkers: the type of marker for each population to plot.
:param options: the options.
:type data: list of numpy.array
:type theOrders: list
:type theLabels: list
:type theColors: list
:type theAlphas: list
:type theSizes: list
:type theMarkers: list
:type options: argparse.Namespace
"""
# Do the import
import matplotlib as mpl
if options.format != "X11" and mpl.get_backend() != "agg":
mpl.use("Agg")
import matplotlib.pyplot as plt
if options.format != "X11":
plt.ioff()
fig = plt.figure()
try:
fig.subplots_adjust(right=options.adjust_right,
left=options.adjust_left,
bottom=options.adjust_bottom,
top=options.adjust_top)
except ValueError as e:
raise ProgramError(e)
ax = fig.add_subplot(111)
# Setting the axis
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.set_ticks_position("left")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_position(("outward", 9))
ax.spines["left"].set_position(("outward", 9))
# The plot
plotObject = []
labels = []
for i, index in enumerate(theOrders):
try:
tmp, = ax.plot(data[0][i], data[1][i], theMarkers[i],
color=theColors[i], mec=theColors[i],
markersize=theSizes[i], alpha=theAlphas[i])
except ValueError as e:
msg = "Problem with markers: %(e)s" % locals()
raise ProgramError(msg)
plotObject.append(tmp)
labels.append(index)
# The legend
prop = mpl.font_manager.FontProperties(size=options.legend_size)
leg = ax.legend(plotObject, labels, loc=options.legend_position,
numpoints=1, fancybox=True, prop=prop,
ncol=options.legend_ncol)
leg.get_frame().set_alpha(0.5)
# The title and XY labels
ax.set_title(options.title, fontsize=options.title_fontsize, weight="bold")
ax.set_xlabel(options.xlabel, fontsize=options.label_fontsize)
ax.set_ylabel(options.ylabel, fontsize=options.label_fontsize)
# Changing the size of the tick labels
for tick in ax.yaxis.get_major_ticks() + ax.xaxis.get_major_ticks():
tick.label.set_fontsize(options.axis_fontsize)
if options.format == "X11":
# Show the plot
plt.show()
else:
fileName = options.out + "." + options.format
try:
plt.savefig(fileName, dpi=300)
except IOError:
msg = "%(fileName)s: can't write file" % locals()
raise ProgramError(msg)
except ValueError as e:
colorError = False
for errorMsg in str(e).split("\n"):
if errorMsg.startswith("to_rgb"):
colorError = True
if colorError:
msg = "problem with the population colors"
raise ProgramError(msg)
else:
print str(e)
|
Plot the MDS data.
:param data: the data to plot (MDS values).
:param theOrders: the order of the populations to plot.
:param theLabels: the names of the populations to plot.
:param theColors: the colors of the populations to plot.
:param theAlphas: the alpha value for the populations to plot.
:param theSizes: the sizes of the markers for each population to plot.
:param theMarkers: the type of marker for each population to plot.
:param options: the options.
:type data: list of numpy.array
:type theOrders: list
:type theLabels: list
:type theColors: list
:type theAlphas: list
:type theSizes: list
:type theMarkers: list
:type options: argparse.Namespace
|
def start(self):
""" Make an HTTP request with a specific method """
# TODO : Use Timeout here and _ignore_request_idle
from .nurest_session import NURESTSession
session = NURESTSession.get_current_session()
if self.async:
thread = threading.Thread(target=self._make_request, kwargs={'session': session})
thread.is_daemon = False
thread.start()
return self.transaction_id
return self._make_request(session=session)
|
Make an HTTP request with a specific method
|
def get_range(self):
"""Return the highest and the lowest note in a tuple."""
(min, max) = (100000, -1)
for cont in self.bar:
for note in cont[2]:
if int(note) < int(min):
min = note
elif int(note) > int(max):
max = note
return (min, max)
|
Return the highest and the lowest note in a tuple.
|
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None):
'''
Make a web call to GoGrid
.. versionadded:: 2015.8.0
'''
vm_ = get_configured_provider()
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
sharedsecret = config.get_cloud_config_value(
'sharedsecret', vm_, __opts__, search_global=False
)
path = 'https://api.gogrid.com/api/'
if action:
path += action
if command:
path += '/{0}'.format(command)
log.debug('GoGrid URL: %s', path)
if not isinstance(args, dict):
args = {}
epoch = six.text_type(int(time.time()))
hashtext = ''.join((apikey, sharedsecret, epoch))
args['sig'] = salt.utils.hashutils.md5_digest(hashtext)
args['format'] = 'json'
args['v'] = '1.0'
args['api_key'] = apikey
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
return_content = None
result = salt.utils.http.query(
path,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
opts=__opts__,
)
log.debug('GoGrid Response Status Code: %s', result['status'])
return result['dict']
|
Make a web call to GoGrid
.. versionadded:: 2015.8.0
|
def __query_cmd(self, command, device=None):
"""Calls a command"""
base_url = u'%s&switchcmd=%s' % (self.__homeauto_url_with_sid(), command)
if device is None:
url = base_url
else:
url = '%s&ain=%s' % (base_url, device)
if self.__debug:
print(u'Query Command URI: ' + url)
return self.__query(url)
|
Calls a command
|
def get_container(self, path):
"""Return single container."""
if not settings.container_permitted(path):
raise errors.NotPermittedException(
"Access to container \"%s\" is not permitted." % path)
return self._get_container(path)
|
Return single container.
|
def connect(self):
# type: () -> None
"""
Connect to server
Returns:
None
"""
if self.connection_type.lower() == 'ssl':
self.server = smtplib.SMTP_SSL(host=self.host, port=self.port, local_hostname=self.local_hostname,
timeout=self.timeout, source_address=self.source_address)
elif self.connection_type.lower() == 'lmtp':
self.server = smtplib.LMTP(host=self.host, port=self.port, local_hostname=self.local_hostname,
source_address=self.source_address)
else:
self.server = smtplib.SMTP(host=self.host, port=self.port, local_hostname=self.local_hostname,
timeout=self.timeout, source_address=self.source_address)
self.server.login(self.username, self.password)
|
Connect to server
Returns:
None
|
def get_assessment_admin_session(self):
"""Gets the ``OsidSession`` associated with the assessment administration service.
return: (osid.assessment.AssessmentAdminSession) - an
``AssessmentAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_admin()`` is ``true``.*
"""
if not self.supports_assessment_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AssessmentAdminSession(runtime=self._runtime)
|
Gets the ``OsidSession`` associated with the assessment administration service.
return: (osid.assessment.AssessmentAdminSession) - an
``AssessmentAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_admin()`` is ``true``.*
|
def aliases(self):
"""
Aliases for UBI volume. This propery evaluates to device node itself
plus the ``'ubi${INDEX}:${LABEL}'`` string. The latter is used to
identify the device in /proc/mounts table, and is not really an alias.
"""
return ['ubi{}:{}'.format(self.device.parent.sys_number, self.label),
self.node]
|
Aliases for UBI volume. This propery evaluates to device node itself
plus the ``'ubi${INDEX}:${LABEL}'`` string. The latter is used to
identify the device in /proc/mounts table, and is not really an alias.
|
def create(source_name, size, metadata_backend=None, storage_backend=None):
"""
Creates a thumbnail file and its relevant metadata. Returns a
Thumbnail instance.
"""
if storage_backend is None:
storage_backend = backends.storage.get_backend()
if metadata_backend is None:
metadata_backend = backends.metadata.get_backend()
thumbnail_file = processors.process(storage_backend.open(source_name), size)
thumbnail_file = post_processors.process(thumbnail_file, size)
name = get_thumbnail_name(source_name, size)
name = storage_backend.save(name, thumbnail_file)
metadata = metadata_backend.add_thumbnail(source_name, size, name)
return Thumbnail(metadata=metadata, storage=storage_backend)
|
Creates a thumbnail file and its relevant metadata. Returns a
Thumbnail instance.
|
def validate_arguments(args):
"""
Validate that the necessary argument for normal or diff analysis are specified.
:param: args: Command line arguments namespace
"""
if args.diff:
if not args.output_dir:
logger.error('No Output location specified')
print_usage()
sys.exit(0)
# elif not (args.config and args.output_dir):
elif not args.output_dir:
print_usage()
sys.exit(0)
|
Validate that the necessary argument for normal or diff analysis are specified.
:param: args: Command line arguments namespace
|
def write_bytes(fp, data):
"""
Write bytes to the file object and returns bytes written.
:return: written byte size
"""
pos = fp.tell()
fp.write(data)
written = fp.tell() - pos
assert written == len(data), 'written=%d, expected=%d' % (
written, len(data)
)
return written
|
Write bytes to the file object and returns bytes written.
:return: written byte size
|
def Serialize(self, writer):
"""
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
"""
super(SpentCoinState, self).Serialize(writer)
writer.WriteUInt256(self.TransactionHash)
writer.WriteUInt32(self.TransactionHeight)
writer.WriteVarInt(len(self.Items))
for item in self.Items:
writer.WriteUInt16(item.index)
writer.WriteUInt32(item.height)
|
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
|
def defBoroCnst(self,BoroCnstArt):
'''
Defines the constrained portion of the consumption function as cFuncNowCnst,
an attribute of self.
Parameters
----------
BoroCnstArt : float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
Returns
-------
None
'''
# Make temporary grids of income shocks and next period income values
ShkCount = self.TranShkValsNext.size
pLvlCount = self.pLvlGrid.size
PermShkVals_temp = np.tile(np.reshape(self.PermShkValsNext,(1,ShkCount)),(pLvlCount,1))
TranShkVals_temp = np.tile(np.reshape(self.TranShkValsNext,(1,ShkCount)),(pLvlCount,1))
pLvlNext_temp = np.tile(np.reshape(self.pLvlNextFunc(self.pLvlGrid),(pLvlCount,1)),(1,ShkCount))*PermShkVals_temp
# Find the natural borrowing constraint for each persistent income level
aLvlMin_candidates = (self.mLvlMinNext(pLvlNext_temp) - TranShkVals_temp*pLvlNext_temp)/self.Rfree
aLvlMinNow = np.max(aLvlMin_candidates,axis=1)
self.BoroCnstNat = LinearInterp(np.insert(self.pLvlGrid,0,0.0),np.insert(aLvlMinNow,0,0.0))
# Define the minimum allowable mLvl by pLvl as the greater of the natural and artificial borrowing constraints
if self.BoroCnstArt is not None:
self.BoroCnstArt = LinearInterp(np.array([0.0,1.0]),np.array([0.0,self.BoroCnstArt]))
self.mLvlMinNow = UpperEnvelope(self.BoroCnstArt,self.BoroCnstNat)
else:
self.mLvlMinNow = self.BoroCnstNat
# Define the constrained consumption function as "consume all" shifted by mLvlMin
cFuncNowCnstBase = BilinearInterp(np.array([[0.,0.],[1.,1.]]),np.array([0.0,1.0]),np.array([0.0,1.0]))
self.cFuncNowCnst = VariableLowerBoundFunc2D(cFuncNowCnstBase,self.mLvlMinNow)
|
Defines the constrained portion of the consumption function as cFuncNowCnst,
an attribute of self.
Parameters
----------
BoroCnstArt : float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
Returns
-------
None
|
def _validate_response(self, response):
"""
:param response: requests.models.Response
:raises: pybomb.exceptions.InvalidResponseException
:raises: pybomb.exceptions.BadRequestException
"""
try:
response.raise_for_status()
except HTTPError as http_error:
raise BadRequestException(str(http_error))
response_data = response.json()
if response_data["status_code"] != self.RESPONSE_STATUS_OK:
raise InvalidResponseException(
"Response code {0}: {1}".format(
response_data["status_code"], response_data["error"]
)
)
|
:param response: requests.models.Response
:raises: pybomb.exceptions.InvalidResponseException
:raises: pybomb.exceptions.BadRequestException
|
def from_tibiadata(cls, content, vocation=None):
"""Builds a highscores object from a TibiaData highscores response.
Notes
-----
Since TibiaData.com's response doesn't contain any indication of the vocation filter applied,
:py:attr:`vocation` can't be determined from the response, so the attribute must be assigned manually.
If the attribute is known, it can be passed for it to be assigned in this method.
Parameters
----------
content: :class:`str`
The JSON content of the response.
vocation: :class:`VocationFilter`, optional
The vocation filter to assign to the results. Note that this won't affect the parsing.
Returns
-------
:class:`Highscores`
The highscores contained in the page, or None if the content is for the highscores of a nonexistent world.
Raises
------
InvalidContent
If content is not a JSON string of the highscores response."""
json_content = parse_json(content)
try:
highscores_json = json_content["highscores"]
if "error" in highscores_json["data"]:
return None
world = highscores_json["world"]
category = highscores_json["type"]
highscores = cls(world, category)
for entry in highscores_json["data"]:
value_key = "level"
if highscores.category in [Category.ACHIEVEMENTS, Category.LOYALTY_POINTS, Category.EXPERIENCE]:
value_key = "points"
if highscores.category == Category.EXPERIENCE:
highscores.entries.append(ExpHighscoresEntry(entry["name"], entry["rank"], entry["voc"],
entry[value_key], entry["level"]))
elif highscores.category == Category.LOYALTY_POINTS:
highscores.entries.append(LoyaltyHighscoresEntry(entry["name"], entry["rank"], entry["voc"],
entry[value_key], entry["title"]))
else:
highscores.entries.append(HighscoresEntry(entry["name"], entry["rank"], entry["voc"],
entry[value_key]))
highscores.results_count = len(highscores.entries)
except KeyError:
raise InvalidContent("content is not a TibiaData highscores response.")
if isinstance(vocation, VocationFilter):
highscores.vocation = vocation
return highscores
|
Builds a highscores object from a TibiaData highscores response.
Notes
-----
Since TibiaData.com's response doesn't contain any indication of the vocation filter applied,
:py:attr:`vocation` can't be determined from the response, so the attribute must be assigned manually.
If the attribute is known, it can be passed for it to be assigned in this method.
Parameters
----------
content: :class:`str`
The JSON content of the response.
vocation: :class:`VocationFilter`, optional
The vocation filter to assign to the results. Note that this won't affect the parsing.
Returns
-------
:class:`Highscores`
The highscores contained in the page, or None if the content is for the highscores of a nonexistent world.
Raises
------
InvalidContent
If content is not a JSON string of the highscores response.
|
def list_my(self):
""" Find organization that has the current identity as the owner or as the member """
org_list = self.call_contract_command("Registry", "listOrganizations", [])
rez_owner = []
rez_member = []
for idx, org_id in enumerate(org_list):
(found, org_id, org_name, owner, members, serviceNames, repositoryNames) = self.call_contract_command("Registry", "getOrganizationById", [org_id])
if (not found):
raise Exception("Organization was removed during this call. Please retry.");
if self.ident.address == owner:
rez_owner.append((org_name, bytes32_to_str(org_id)))
if self.ident.address in members:
rez_member.append((org_name, bytes32_to_str(org_id)))
if (rez_owner):
self._printout("# Organizations you are the owner of")
self._printout("# OrgName OrgId")
for n,i in rez_owner:
self._printout("%s %s"%(n,i))
if (rez_member):
self._printout("# Organizations you are the member of")
self._printout("# OrgName OrgId")
for n,i in rez_member:
self._printout("%s %s"%(n,i))
|
Find organization that has the current identity as the owner or as the member
|
def xmlns(source):
"""
Returns a map of prefix to namespace for the given XML file.
"""
namespaces = {}
events=("end", "start-ns", "end-ns")
for (event, elem) in iterparse(source, events):
if event == "start-ns":
prefix, ns = elem
namespaces[prefix] = ns
elif event == "end":
break
# Reset stream
if hasattr(source, "seek"):
source.seek(0)
return namespaces
|
Returns a map of prefix to namespace for the given XML file.
|
def target_Orange_table(self):
'''
Returns the target table as an Orange example table.
:rtype: orange.ExampleTable
'''
table, cls_att = self.db.target_table, self.db.target_att
if not self.db.orng_tables:
return self.convert_table(table, cls_att=cls_att)
else:
return self.db.orng_tables[table]
|
Returns the target table as an Orange example table.
:rtype: orange.ExampleTable
|
def get_attachment(self, project, build_id, timeline_id, record_id, type, name, **kwargs):
"""GetAttachment.
[Preview API] Gets a specific attachment.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str timeline_id: The ID of the timeline.
:param str record_id: The ID of the timeline record.
:param str type: The type of the attachment.
:param str name: The name of the attachment.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if timeline_id is not None:
route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str')
if record_id is not None:
route_values['recordId'] = self._serialize.url('record_id', record_id, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
if name is not None:
route_values['name'] = self._serialize.url('name', name, 'str')
response = self._send(http_method='GET',
location_id='af5122d3-3438-485e-a25a-2dbbfde84ee6',
version='5.0-preview.2',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
|
GetAttachment.
[Preview API] Gets a specific attachment.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str timeline_id: The ID of the timeline.
:param str record_id: The ID of the timeline record.
:param str type: The type of the attachment.
:param str name: The name of the attachment.
:rtype: object
|
def rewind(self, count):
"""Rewind index."""
if count > self._index: # pragma: no cover
raise ValueError("Can't rewind past beginning!")
self._index -= count
|
Rewind index.
|
def get_language_from_abbr(self, abbr):
"""Get language full name from abbreviation."""
for language in self.user_data.languages:
if language['language'] == abbr:
return language['language_string']
return None
|
Get language full name from abbreviation.
|
def collapse_if_tuple(abi):
"""Converts a tuple from a dict to a parenthesized list of its types.
>>> from eth_utils.abi import collapse_if_tuple
>>> collapse_if_tuple(
... {
... 'components': [
... {'name': 'anAddress', 'type': 'address'},
... {'name': 'anInt', 'type': 'uint256'},
... {'name': 'someBytes', 'type': 'bytes'},
... ],
... 'type': 'tuple',
... }
... )
'(address,uint256,bytes)'
"""
typ = abi["type"]
if not typ.startswith("tuple"):
return typ
delimited = ",".join(collapse_if_tuple(c) for c in abi["components"])
# Whatever comes after "tuple" is the array dims. The ABI spec states that
# this will have the form "", "[]", or "[k]".
array_dim = typ[5:]
collapsed = "({}){}".format(delimited, array_dim)
return collapsed
|
Converts a tuple from a dict to a parenthesized list of its types.
>>> from eth_utils.abi import collapse_if_tuple
>>> collapse_if_tuple(
... {
... 'components': [
... {'name': 'anAddress', 'type': 'address'},
... {'name': 'anInt', 'type': 'uint256'},
... {'name': 'someBytes', 'type': 'bytes'},
... ],
... 'type': 'tuple',
... }
... )
'(address,uint256,bytes)'
|
def fw_romaji_lt(full, regular):
'''
Generates a lookup table with the fullwidth rōmaji characters
on the left side, and the regular rōmaji characters as the values.
'''
lt = {}
for n in range(len(full)):
fw = full[n]
reg = regular[n]
lt[fw] = reg
return lt
|
Generates a lookup table with the fullwidth rōmaji characters
on the left side, and the regular rōmaji characters as the values.
|
def _facet_counts(items):
"""Returns facet counts as dict.
Given the `items()` on the raw dictionary from Elasticsearch this processes
it and returns the counts keyed on the facet name provided in the original
query.
"""
facets = {}
for name, data in items:
facets[name] = FacetResult(name, data)
return facets
|
Returns facet counts as dict.
Given the `items()` on the raw dictionary from Elasticsearch this processes
it and returns the counts keyed on the facet name provided in the original
query.
|
def license(self, value=None):
"""No arguments: Get the document's license from metadata
Argument: Set the document's license in metadata
"""
if not (value is None):
if (self.metadatatype == "native"):
self.metadata['license'] = value
else:
self._license = value
if (self.metadatatype == "native"):
if 'license' in self.metadata:
return self.metadata['license']
else:
return None
else:
return self._license
|
No arguments: Get the document's license from metadata
Argument: Set the document's license in metadata
|
def next(self):
"""NEXT command.
"""
code, message = self.command("NEXT")
if code != 223:
raise NNTPReplyError(code, message)
parts = message.split(None, 3)
try:
article = int(parts[0])
ident = parts[1]
except (IndexError, ValueError):
raise NNTPDataError("Invalid NEXT status")
return article, ident
|
NEXT command.
|
def scroll(self):
'''
Perfrom scroll action.
Usage:
d().scroll(steps=50) # default vertically and forward
d().scroll.horiz.forward(steps=100)
d().scroll.vert.backward(steps=100)
d().scroll.horiz.toBeginning(steps=100, max_swipes=100)
d().scroll.vert.toEnd(steps=100)
d().scroll.horiz.to(text="Clock")
'''
def __scroll(vertical, forward, steps=100):
method = self.jsonrpc.scrollForward if forward else self.jsonrpc.scrollBackward
return method(self.selector, vertical, steps)
def __scroll_to_beginning(vertical, steps=100, max_swipes=1000):
return self.jsonrpc.scrollToBeginning(self.selector, vertical, max_swipes, steps)
def __scroll_to_end(vertical, steps=100, max_swipes=1000):
return self.jsonrpc.scrollToEnd(self.selector, vertical, max_swipes, steps)
def __scroll_to(vertical, **kwargs):
return self.jsonrpc.scrollTo(self.selector, Selector(**kwargs), vertical)
@param_to_property(
dimention=["vert", "vertically", "vertical", "horiz", "horizental", "horizentally"],
action=["forward", "backward", "toBeginning", "toEnd", "to"])
def _scroll(dimention="vert", action="forward", **kwargs):
vertical = dimention in ["vert", "vertically", "vertical"]
if action in ["forward", "backward"]:
return __scroll(vertical, action == "forward", **kwargs)
elif action == "toBeginning":
return __scroll_to_beginning(vertical, **kwargs)
elif action == "toEnd":
return __scroll_to_end(vertical, **kwargs)
elif action == "to":
return __scroll_to(vertical, **kwargs)
return _scroll
|
Perfrom scroll action.
Usage:
d().scroll(steps=50) # default vertically and forward
d().scroll.horiz.forward(steps=100)
d().scroll.vert.backward(steps=100)
d().scroll.horiz.toBeginning(steps=100, max_swipes=100)
d().scroll.vert.toEnd(steps=100)
d().scroll.horiz.to(text="Clock")
|
def _get_linked_entities(self) -> Dict[str, Dict[str, Tuple[str, str, List[int]]]]:
"""
This method gets entities from the current utterance finds which tokens they are linked to.
The entities are divided into two main groups, ``numbers`` and ``strings``. We rely on these
entities later for updating the valid actions and the grammar.
"""
current_tokenized_utterance = [] if not self.tokenized_utterances \
else self.tokenized_utterances[-1]
# We generate a dictionary where the key is the type eg. ``number`` or ``string``.
# The value is another dictionary where the key is the action and the value is a tuple
# of the nonterminal, the string value and the linking score.
entity_linking_scores: Dict[str, Dict[str, Tuple[str, str, List[int]]]] = {}
number_linking_scores: Dict[str, Tuple[str, str, List[int]]] = {}
string_linking_scores: Dict[str, Tuple[str, str, List[int]]] = {}
# Get time range start
self.add_to_number_linking_scores({'0'},
number_linking_scores,
get_time_range_start_from_utterance,
current_tokenized_utterance,
'time_range_start')
self.add_to_number_linking_scores({'1200'},
number_linking_scores,
get_time_range_end_from_utterance,
current_tokenized_utterance,
'time_range_end')
self.add_to_number_linking_scores({'0', '1', '60', '41'},
number_linking_scores,
get_numbers_from_utterance,
current_tokenized_utterance,
'number')
self.add_to_number_linking_scores({'0'},
number_linking_scores,
get_costs_from_utterance,
current_tokenized_utterance,
'fare_round_trip_cost')
self.add_to_number_linking_scores({'0'},
number_linking_scores,
get_costs_from_utterance,
current_tokenized_utterance,
'fare_one_direction_cost')
self.add_to_number_linking_scores({'0'},
number_linking_scores,
get_flight_numbers_from_utterance,
current_tokenized_utterance,
'flight_number')
self.add_dates_to_number_linking_scores(number_linking_scores,
current_tokenized_utterance)
# Add string linking dict.
string_linking_dict: Dict[str, List[int]] = {}
for tokenized_utterance in self.tokenized_utterances:
string_linking_dict = get_strings_from_utterance(tokenized_utterance)
strings_list = AtisWorld.sql_table_context.strings_list
strings_list.append(('flight_airline_code_string -> ["\'EA\'"]', 'EA'))
strings_list.append(('airline_airline_name_string-> ["\'EA\'"]', 'EA'))
# We construct the linking scores for strings from the ``string_linking_dict`` here.
for string in strings_list:
entity_linking = [0 for token in current_tokenized_utterance]
# string_linking_dict has the strings and linking scores from the last utterance.
# If the string is not in the last utterance, then the linking scores will be all 0.
for token_index in string_linking_dict.get(string[1], []):
entity_linking[token_index] = 1
action = string[0]
string_linking_scores[action] = (action.split(' -> ')[0], string[1], entity_linking)
entity_linking_scores['number'] = number_linking_scores
entity_linking_scores['string'] = string_linking_scores
return entity_linking_scores
|
This method gets entities from the current utterance finds which tokens they are linked to.
The entities are divided into two main groups, ``numbers`` and ``strings``. We rely on these
entities later for updating the valid actions and the grammar.
|
def log(self, *args, **kwargs):
"""Log a statement from this component"""
func = inspect.currentframe().f_back.f_code
# Dump the message + the name of this function to the log.
if 'exc' in kwargs and kwargs['exc'] is True:
exc_type, exc_obj, exc_tb = exc_info()
line_no = exc_tb.tb_lineno
# print('EXCEPTION DATA:', line_no, exc_type, exc_obj, exc_tb)
args += traceback.extract_tb(exc_tb),
else:
line_no = func.co_firstlineno
sourceloc = "[%.10s@%s:%i]" % (
func.co_name,
func.co_filename,
line_no
)
hfoslog(sourceloc=sourceloc, emitter=self.uniquename, *args, **kwargs)
|
Log a statement from this component
|
def _fetchSequence(ac, startIndex=None, endIndex=None):
"""Fetch sequences from NCBI using the eself interface.
An interbase interval may be optionally provided with startIndex and
endIndex. NCBI eself will return just the requested subsequence, which
might greatly reduce payload sizes (especially with chromosome-scale
sequences). When wrapped is True, return list of sequence lines rather
than concatenated sequence.
>>> len(_fetchSequence('NP_056374.2'))
1596
Pass the desired interval rather than using Python's [] slice
operator.
>>> _fetchSequence('NP_056374.2',0,10)
'MESRETLSSS'
>>> _fetchSequence('NP_056374.2')[0:10]
'MESRETLSSS'
"""
urlFmt = (
"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?"
"db=nucleotide&id={ac}&rettype=fasta&retmode=text")
if startIndex is None or endIndex is None:
url = urlFmt.format(ac=ac)
else:
urlFmt += "&seq_start={start}&seq_stop={stop}"
url = urlFmt.format(ac=ac, start=startIndex + 1, stop=endIndex)
resp = requests.get(url)
resp.raise_for_status()
seqlines = resp.content.splitlines()[1:]
print("{ac}[{s},{e}) => {n} lines ({u})".format(
ac=ac, s=startIndex, e=endIndex, n=len(seqlines), u=url))
# return response as list of lines, already line wrapped
return seqlines
|
Fetch sequences from NCBI using the eself interface.
An interbase interval may be optionally provided with startIndex and
endIndex. NCBI eself will return just the requested subsequence, which
might greatly reduce payload sizes (especially with chromosome-scale
sequences). When wrapped is True, return list of sequence lines rather
than concatenated sequence.
>>> len(_fetchSequence('NP_056374.2'))
1596
Pass the desired interval rather than using Python's [] slice
operator.
>>> _fetchSequence('NP_056374.2',0,10)
'MESRETLSSS'
>>> _fetchSequence('NP_056374.2')[0:10]
'MESRETLSSS'
|
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
|
Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
|
def _combine_variant_collections(cls, combine_fn, variant_collections, kwargs):
"""
Create a single VariantCollection from multiple different collections.
Parameters
----------
cls : class
Should be VariantCollection
combine_fn : function
Function which takes any number of sets of variants and returns
some combination of them (typically union or intersection).
variant_collections : tuple of VariantCollection
kwargs : dict
Optional dictionary of keyword arguments to pass to the initializer
for VariantCollection.
"""
kwargs["variants"] = combine_fn(*[set(vc) for vc in variant_collections])
kwargs["source_to_metadata_dict"] = cls._merge_metadata_dictionaries(
[vc.source_to_metadata_dict for vc in variant_collections])
kwargs["sources"] = set.union(*([vc.sources for vc in variant_collections]))
for key, value in variant_collections[0].to_dict().items():
# If some optional parameter isn't explicitly specified as an
# argument to union() or intersection() then use the same value
# as the first VariantCollection.
#
# I'm doing this so that the meaning of VariantCollection.union
# and VariantCollection.intersection with a single argument is
# the identity function (rather than setting optional parameters
# to their default values.
if key not in kwargs:
kwargs[key] = value
return cls(**kwargs)
|
Create a single VariantCollection from multiple different collections.
Parameters
----------
cls : class
Should be VariantCollection
combine_fn : function
Function which takes any number of sets of variants and returns
some combination of them (typically union or intersection).
variant_collections : tuple of VariantCollection
kwargs : dict
Optional dictionary of keyword arguments to pass to the initializer
for VariantCollection.
|
def eth_call(self, from_, to=None, gas=None,
gas_price=None, value=None, data=None,
block=BLOCK_TAG_LATEST):
"""https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_call
:param from_: From account address
:type from_: str
:param to: To account address (optional)
:type to: str
:param gas: Gas amount for current transaction (optional)
:type gas: int
:param gas_price: Gas price for current transaction (optional)
:type gas_price: int
:param value: Amount of ether to send (optional)
:type value: int
:param data: Additional data for transaction (optional)
:type data: hex
:param block: Block tag or number (optional)
:type block: int or BLOCK_TAGS
:rtype: str
"""
block = validate_block(block)
obj = {}
obj['from'] = from_
if to is not None:
obj['to'] = to
if gas is not None:
obj['gas'] = hex(gas)
if gas_price is not None:
obj['gasPrice'] = hex(gas_price)
if value is not None:
obj['value'] = hex(ether_to_wei(value))
if data is not None:
obj['data'] = data
return (yield from self.rpc_call('eth_call', [obj, block]))
|
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_call
:param from_: From account address
:type from_: str
:param to: To account address (optional)
:type to: str
:param gas: Gas amount for current transaction (optional)
:type gas: int
:param gas_price: Gas price for current transaction (optional)
:type gas_price: int
:param value: Amount of ether to send (optional)
:type value: int
:param data: Additional data for transaction (optional)
:type data: hex
:param block: Block tag or number (optional)
:type block: int or BLOCK_TAGS
:rtype: str
|
def fit_spectrum(self, specFunc, initPars, freePars=None):
""" Fit for the free parameters of a spectral function
Parameters
----------
specFunc : `~fermipy.spectrum.SpectralFunction`
The Spectral Function
initPars : `~numpy.ndarray`
The initial values of the parameters
freePars : `~numpy.ndarray`
Boolean array indicating which parameters should be free in
the fit.
Returns
-------
params : `~numpy.ndarray`
Best-fit parameters.
spec_vals : `~numpy.ndarray`
The values of the best-fit spectral model in each energy bin.
ts_spec : float
The TS of the best-fit spectrum
chi2_vals : `~numpy.ndarray`
Array of chi-squared values for each energy bin.
chi2_spec : float
Global chi-squared value for the sum of all energy bins.
pval_spec : float
p-value of chi-squared for the best-fit spectrum.
"""
if not isinstance(specFunc, SEDFunctor):
specFunc = self.create_functor(specFunc, initPars,
scale=specFunc.scale)
if freePars is None:
freePars = np.empty(len(initPars), dtype=bool)
freePars.fill(True)
initPars = np.array(initPars)
freePars = np.array(freePars)
def fToMin(x):
xp = np.array(specFunc.params)
xp[freePars] = x
return self.__call__(specFunc(xp))
result = fmin(fToMin, initPars[freePars], disp=False, xtol=1e-6)
out_pars = specFunc.params
out_pars[freePars] = np.array(result)
spec_vals = specFunc(out_pars)
spec_npred = np.zeros(len(spec_vals))
if isinstance(specFunc, spectrum.SEDFluxFunctor):
spec_npred = spec_vals * self.refSpec.ref_npred / self.refSpec.ref_flux
elif isinstance(specFunc, spectrum.SEDEFluxFunctor):
spec_npred = spec_vals * self.refSpec.ref_npred / self.refSpec.ref_eflux
ts_spec = self.TS_spectrum(spec_vals)
chi2_vals = self.chi2_vals(spec_vals)
chi2_spec = np.sum(chi2_vals)
pval_spec = stats.distributions.chi2.sf(chi2_spec, len(spec_vals))
return dict(params=out_pars, spec_vals=spec_vals,
spec_npred=spec_npred,
ts_spec=ts_spec, chi2_spec=chi2_spec,
chi2_vals=chi2_vals, pval_spec=pval_spec)
|
Fit for the free parameters of a spectral function
Parameters
----------
specFunc : `~fermipy.spectrum.SpectralFunction`
The Spectral Function
initPars : `~numpy.ndarray`
The initial values of the parameters
freePars : `~numpy.ndarray`
Boolean array indicating which parameters should be free in
the fit.
Returns
-------
params : `~numpy.ndarray`
Best-fit parameters.
spec_vals : `~numpy.ndarray`
The values of the best-fit spectral model in each energy bin.
ts_spec : float
The TS of the best-fit spectrum
chi2_vals : `~numpy.ndarray`
Array of chi-squared values for each energy bin.
chi2_spec : float
Global chi-squared value for the sum of all energy bins.
pval_spec : float
p-value of chi-squared for the best-fit spectrum.
|
def _initialize(self):
"""Set up and normalize initial data once input data is specified."""
self.y_transform = self.y - numpy.mean(self.y)
self.y_transform /= numpy.std(self.y_transform)
self.x_transforms = [numpy.zeros(len(self.y)) for _xi in self.x]
self._compute_sorted_indices()
|
Set up and normalize initial data once input data is specified.
|
def expr_code(self, expr):
"""Generate a Python expression for `expr`."""
if "|" in expr:
pipes = expr.split("|")
code = self.expr_code(pipes[0])
for func in pipes[1:]:
self.all_vars.add(func)
code = "c_%s(%s)" % (func, code)
elif "." in expr:
dots = expr.split(".")
code = self.expr_code(dots[0])
args = [repr(d) for d in dots[1:]]
code = "dot(%s, %s)" % (code, ", ".join(args))
else:
self.all_vars.add(expr)
code = "c_%s" % expr
return code
|
Generate a Python expression for `expr`.
|
def delete(self, doc_id: str) -> bool:
"""Delete a document with id."""
try:
self.instance.delete(self.index, self.doc_type, doc_id)
except RequestError as ex:
logging.error(ex)
return False
else:
return True
|
Delete a document with id.
|
def _add_childTnLst(self):
"""Add `./p:timing/p:tnLst/p:par/p:cTn/p:childTnLst` descendant.
Any existing `p:timing` child element is ruthlessly removed and
replaced.
"""
self.remove(self.get_or_add_timing())
timing = parse_xml(self._childTnLst_timing_xml())
self._insert_timing(timing)
return timing.xpath('./p:tnLst/p:par/p:cTn/p:childTnLst')[0]
|
Add `./p:timing/p:tnLst/p:par/p:cTn/p:childTnLst` descendant.
Any existing `p:timing` child element is ruthlessly removed and
replaced.
|
def ipa_substrings(unicode_string, single_char_parsing=False):
"""
Return a list of (non-empty) substrings of the given string,
where each substring is either:
1. the longest Unicode string starting at the current index
representing a (known) valid IPA character, or
2. a single Unicode character (which is not IPA valid).
If ``single_char_parsing`` is ``False``,
parse the string one Unicode character at a time,
that is, do not perform the greedy parsing.
For example, if ``s = u"\u006e\u0361\u006d"``,
with ``single_char_parsing=True`` the result will be
a list with a single element: ``[u"\u006e\u0361\u006d"]``,
while ``single_char_parsing=False`` will yield a list with three elements:
``[u"\u006e", u"\u0361", u"\u006d"]``.
Return ``None`` if ``unicode_string`` is ``None``.
:param str unicode_string: the Unicode string to be parsed
:param bool single_char_parsing: if ``True``, parse one Unicode character at a time
:rtype: list of str
"""
return split_using_dictionary(
string=unicode_string,
dictionary=UNICODE_TO_IPA,
max_key_length=UNICODE_TO_IPA_MAX_KEY_LENGTH,
single_char_parsing=single_char_parsing
)
|
Return a list of (non-empty) substrings of the given string,
where each substring is either:
1. the longest Unicode string starting at the current index
representing a (known) valid IPA character, or
2. a single Unicode character (which is not IPA valid).
If ``single_char_parsing`` is ``False``,
parse the string one Unicode character at a time,
that is, do not perform the greedy parsing.
For example, if ``s = u"\u006e\u0361\u006d"``,
with ``single_char_parsing=True`` the result will be
a list with a single element: ``[u"\u006e\u0361\u006d"]``,
while ``single_char_parsing=False`` will yield a list with three elements:
``[u"\u006e", u"\u0361", u"\u006d"]``.
Return ``None`` if ``unicode_string`` is ``None``.
:param str unicode_string: the Unicode string to be parsed
:param bool single_char_parsing: if ``True``, parse one Unicode character at a time
:rtype: list of str
|
def cosinebell(n, fraction):
"""Return a cosine bell spanning n pixels, masking a fraction of pixels
Parameters
----------
n : int
Number of pixels.
fraction : float
Length fraction over which the data will be masked.
"""
mask = np.ones(n)
nmasked = int(fraction * n)
for i in range(nmasked):
yval = 0.5 * (1 - np.cos(np.pi * float(i) / float(nmasked)))
mask[i] = yval
mask[n - i - 1] = yval
return mask
|
Return a cosine bell spanning n pixels, masking a fraction of pixels
Parameters
----------
n : int
Number of pixels.
fraction : float
Length fraction over which the data will be masked.
|
def polygons_obb(polygons):
"""
Find the OBBs for a list of shapely.geometry.Polygons
"""
rectangles = [None] * len(polygons)
transforms = [None] * len(polygons)
for i, p in enumerate(polygons):
transforms[i], rectangles[i] = polygon_obb(p)
return np.array(transforms), np.array(rectangles)
|
Find the OBBs for a list of shapely.geometry.Polygons
|
def sed_or_dryrun(*args, **kwargs):
"""
Wrapper around Fabric's contrib.files.sed() to give it a dryrun option.
http://docs.fabfile.org/en/0.9.1/api/contrib/files.html#fabric.contrib.files.sed
"""
dryrun = get_dryrun(kwargs.get('dryrun'))
if 'dryrun' in kwargs:
del kwargs['dryrun']
use_sudo = kwargs.get('use_sudo', False)
if dryrun:
context = dict(
filename=args[0] if len(args) >= 1 else kwargs['filename'],
before=args[1] if len(args) >= 2 else kwargs['before'],
after=args[2] if len(args) >= 3 else kwargs['after'],
backup=args[3] if len(args) >= 4 else kwargs.get('backup', '.bak'),
limit=kwargs.get('limit', ''),
)
cmd = 'sed -i{backup} -r -e "/{limit}/ s/{before}/{after}/g {filename}"'.format(**context)
cmd_run = 'sudo' if use_sudo else 'run'
if BURLAP_COMMAND_PREFIX:
print('%s %s: %s' % (render_command_prefix(), cmd_run, cmd))
else:
print(cmd)
else:
from fabric.contrib.files import sed
sed(*args, **kwargs)
|
Wrapper around Fabric's contrib.files.sed() to give it a dryrun option.
http://docs.fabfile.org/en/0.9.1/api/contrib/files.html#fabric.contrib.files.sed
|
def combined_analysis(using):
"""
Combine the analysis in ES with the analysis defined in Python. The one in
Python takes precedence
"""
python_analysis = collect_analysis(using)
es_analysis = existing_analysis(using)
if es_analysis == DOES_NOT_EXIST:
return python_analysis
# we want to ensure everything defined in Python land is added, or
# overrides the things defined in ES
for section in python_analysis:
if section not in es_analysis:
es_analysis[section] = python_analysis[section]
subdict_python = python_analysis[section]
subdict_es = es_analysis[section]
for name in subdict_python:
subdict_es[name] = subdict_python[name]
return es_analysis
|
Combine the analysis in ES with the analysis defined in Python. The one in
Python takes precedence
|
def type_map(gtype, fn):
"""Map fn over all child types of gtype."""
cb = ffi.callback('VipsTypeMap2Fn', fn)
return vips_lib.vips_type_map(gtype, cb, ffi.NULL, ffi.NULL)
|
Map fn over all child types of gtype.
|
def clean_restricted_chars(path, restricted_chars=restricted_chars):
'''
Get path without restricted characters.
:param path: path
:return: path without restricted characters
:rtype: str or unicode (depending on given path)
'''
for character in restricted_chars:
path = path.replace(character, '_')
return path
|
Get path without restricted characters.
:param path: path
:return: path without restricted characters
:rtype: str or unicode (depending on given path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.