code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def get(self, request, enterprise_uuid, course_id):
"""
Handle the enrollment of enterprise learner in the provided course.
Based on `enterprise_uuid` in URL, the view will decide which
enterprise customer's course enrollment record should be created.
Depending on the value of query parameter `course_mode` then learner
will be either redirected to LMS dashboard for audit modes or
redirected to ecommerce basket flow for payment of premium modes.
"""
enrollment_course_mode = request.GET.get('course_mode')
enterprise_catalog_uuid = request.GET.get('catalog')
# Redirect the learner to LMS dashboard in case no course mode is
# provided as query parameter `course_mode`
if not enrollment_course_mode:
return redirect(LMS_DASHBOARD_URL)
enrollment_api_client = EnrollmentApiClient()
course_modes = enrollment_api_client.get_course_modes(course_id)
# Verify that the request user belongs to the enterprise against the
# provided `enterprise_uuid`.
enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid)
enterprise_customer_user = get_enterprise_customer_user(request.user.id, enterprise_customer.uuid)
if not course_modes:
context_data = get_global_context(request, enterprise_customer)
error_code = 'ENTHCE000'
log_message = (
'No course_modes for course_id {course_id} for enterprise_catalog_uuid '
'{enterprise_catalog_uuid}.'
'The following error was presented to '
'user {userid}: {error_code}'.format(
userid=request.user.id,
enterprise_catalog_uuid=enterprise_catalog_uuid,
course_id=course_id,
error_code=error_code
)
)
return render_page_with_error_code_message(request, context_data, error_code, log_message)
selected_course_mode = None
for course_mode in course_modes:
if course_mode['slug'] == enrollment_course_mode:
selected_course_mode = course_mode
break
if not selected_course_mode:
return redirect(LMS_DASHBOARD_URL)
# Create the Enterprise backend database records for this course
# enrollment
__, created = EnterpriseCourseEnrollment.objects.get_or_create(
enterprise_customer_user=enterprise_customer_user,
course_id=course_id,
)
if created:
track_enrollment('course-landing-page-enrollment', request.user.id, course_id, request.get_full_path())
DataSharingConsent.objects.update_or_create(
username=enterprise_customer_user.username,
course_id=course_id,
enterprise_customer=enterprise_customer_user.enterprise_customer,
defaults={
'granted': True
},
)
audit_modes = getattr(settings, 'ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES', ['audit', 'honor'])
if selected_course_mode['slug'] in audit_modes:
# In case of Audit course modes enroll the learner directly through
# enrollment API client and redirect the learner to dashboard.
enrollment_api_client.enroll_user_in_course(
request.user.username, course_id, selected_course_mode['slug']
)
return redirect(LMS_COURSEWARE_URL.format(course_id=course_id))
# redirect the enterprise learner to the ecommerce flow in LMS
# Note: LMS start flow automatically detects the paid mode
premium_flow = LMS_START_PREMIUM_COURSE_FLOW_URL.format(course_id=course_id)
if enterprise_catalog_uuid:
premium_flow += '?catalog={catalog_uuid}'.format(
catalog_uuid=enterprise_catalog_uuid
)
return redirect(premium_flow)
|
Handle the enrollment of enterprise learner in the provided course.
Based on `enterprise_uuid` in URL, the view will decide which
enterprise customer's course enrollment record should be created.
Depending on the value of query parameter `course_mode` then learner
will be either redirected to LMS dashboard for audit modes or
redirected to ecommerce basket flow for payment of premium modes.
|
def get_file_sha1(filename_or_io):
'''
Calculates the SHA1 of a file or file object using a buffer to handle larger files.
'''
file_data = get_file_io(filename_or_io)
cache_key = file_data.cache_key
if cache_key and cache_key in FILE_SHAS:
return FILE_SHAS[cache_key]
with file_data as file_io:
hasher = sha1()
buff = file_io.read(BLOCKSIZE)
while len(buff) > 0:
if isinstance(buff, six.text_type):
buff = buff.encode('utf-8')
hasher.update(buff)
buff = file_io.read(BLOCKSIZE)
digest = hasher.hexdigest()
if cache_key:
FILE_SHAS[cache_key] = digest
return digest
|
Calculates the SHA1 of a file or file object using a buffer to handle larger files.
|
def author(self, value):
"""
Setter for **self.__author** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"author", value)
self.__author = value
|
Setter for **self.__author** attribute.
:param value: Attribute value.
:type value: unicode
|
def get_as_nullable_integer(self, key):
"""
Converts map element into an integer or returns None if conversion is not possible.
:param key: an index of element to get.
:return: integer value of the element or None if conversion is not supported.
"""
value = self.get(key)
return IntegerConverter.to_nullable_integer(value)
|
Converts map element into an integer or returns None if conversion is not possible.
:param key: an index of element to get.
:return: integer value of the element or None if conversion is not supported.
|
def from_hoy(cls, hoy, leap_year=False):
"""Create Ladybug Datetime from an hour of the year.
Args:
hoy: A float value 0 <= and < 8760
"""
return cls.from_moy(round(hoy * 60), leap_year)
|
Create Ladybug Datetime from an hour of the year.
Args:
hoy: A float value 0 <= and < 8760
|
async def kick_chat_member(self, chat_id: typing.Union[base.Integer, base.String], user_id: base.Integer,
until_date: typing.Union[base.Integer, None] = None) -> base.Boolean:
"""
Use this method to kick a user from a group, a supergroup or a channel.
In the case of supergroups and channels, the user will not be able to return to the group
on their own using invite links, etc., unless unbanned first.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting
is off in the target group.
Otherwise members may only be removed by the group's creator or by the member that added them.
Source: https://core.telegram.org/bots/api#kickchatmember
:param chat_id: Unique identifier for the target group or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param until_date: Date when the user will be unbanned, unix time
:type until_date: :obj:`typing.Union[base.Integer, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
until_date = prepare_arg(until_date)
payload = generate_payload(**locals())
result = await self.request(api.Methods.KICK_CHAT_MEMBER, payload)
return result
|
Use this method to kick a user from a group, a supergroup or a channel.
In the case of supergroups and channels, the user will not be able to return to the group
on their own using invite links, etc., unless unbanned first.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting
is off in the target group.
Otherwise members may only be removed by the group's creator or by the member that added them.
Source: https://core.telegram.org/bots/api#kickchatmember
:param chat_id: Unique identifier for the target group or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param until_date: Date when the user will be unbanned, unix time
:type until_date: :obj:`typing.Union[base.Integer, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
|
def requirements(work_dir, hive_root, with_requirements,
with_dockerfile, active_module, active_module_file):
"""编译全新依赖文件"""
import sys
sys.path.insert(0, hive_root)
hive_root = os.path.abspath(os.path.expanduser(hive_root))
work_dir = work_dir or os.path.join(
os.environ.get('FANTASY_APP_PATH',
os.getcwd()))
work_dir = os.path.expanduser(work_dir)
requirements_root = os.path.join(work_dir, 'requirements')
migrate_root = os.path.join(work_dir, 'migrations')
# active_modules 严格按照顺序
active_module_paths = []
active_module_list = []
if active_module_file:
with open(active_module_file, 'r') as fp:
for l in fp:
pkg = l.split('#')[0].strip()
if pkg:
active_module_list.append(l.strip("\n"))
pass
active_module_list += active_module
for m in active_module_list:
try:
mod = importlib.import_module(m)
active_module_paths.append(os.path.dirname(mod.__file__))
except ImportError:
click.echo('module "%s" not found.' % m, color="yellow")
pass
pass
def build_requirements():
"""构造requirements文件
requirements文件共分为两份:
- hive.txt 从hive项目中直接复制
- hive-modules.txt 从指定的模块中装载依赖项
.. note::
requirements要求必须是顺序无关的
因为我们会使用set来去重,并按照value排序
"""
if not os.path.exists(requirements_root):
os.makedirs(requirements_root)
pass
click.echo(click.style("Generate hive requirements...", fg="yellow"))
shutil.copy(
os.path.join(hive_root, 'requirements.txt'),
os.path.join(requirements_root, 'hive.txt')
)
click.echo(click.style("Generate hive-module requirements...",
fg="yellow"))
requirements_files = []
for m in active_module_paths:
t = os.path.join(m, 'requirements.txt')
if os.path.exists(t):
requirements_files.append(t)
pass
module_packages = set()
with fileinput.input(requirements_files) as fp:
for line in fp:
pkg = line.split('#')[0].strip()
if pkg:
module_packages.add(pkg)
pass
with click.open_file(
os.path.join(requirements_root, 'hive-modules.txt'),
'w') as fp:
for p in module_packages:
fp.write("%s\n" % p)
pass
pass
def build_dockerfile():
"""构造Dockerfile"""
modules_in_hive = map(
lambda x: x.replace(hive_root, '').lstrip('/'),
filter(lambda x: x.startswith(hive_root),
active_module_paths))
modules_path = ' '.join(modules_in_hive)
docker_file = os.path.join(
os.path.dirname(requirements_root),
'Dockerfile'
)
# update Dockerfile
if os.path.exists(docker_file):
click.echo(click.style("Found Dockerfile,try update...",
fg="yellow"))
with open(docker_file, 'r') as fp:
buffer = fp.read()
pass
import re
replaced = re.sub('ARG HIVE_PACKAGES=".*"',
'ARG HIVE_PACKAGES="%s"' % modules_path,
buffer)
with open(docker_file, 'w') as fp:
fp.write(replaced)
pass
pass
pass
def build_migrations():
models_pairs = filter(
lambda pair: os.path.exists(pair[0]),
map(lambda x: (os.path.join(x[0], 'models.py'), x[1]),
[(v, active_module_list[i]) for i, v in
enumerate(active_module_paths)]))
try:
_, models = zip(*models_pairs)
except ValueError:
click.echo(click.style("No models found,"
"is it include in "
"your PYTHONPATH?\n"
"Modules: %s" %
','.join(active_module_list),
fg="yellow"))
return
click.echo(click.style("Found models.txt,try update...",
fg="yellow"))
with open(os.path.join(migrate_root, 'models.txt'), 'w') as fp:
for p in models:
fp.write("%s\n" % p)
pass
pass
def build_tasks():
tasks_pairs = filter(
lambda pair: os.path.exists(pair[0]),
map(lambda x: (os.path.join(x[0], 'tasks.py'), x[1]),
[(v, active_module_list[i]) for i, v in
enumerate(active_module_paths)]))
try:
_, tasks = zip(*tasks_pairs)
except ValueError:
click.echo(click.style("No tasks found,"
"is it include in "
"your PYTHONPATH?\n"
"Modules: %s" %
','.join(active_module_list),
fg="yellow"))
return
click.echo(click.style("Found tasks.txt,try update...",
fg="yellow"))
with open(os.path.join(migrate_root, 'tasks.txt'), 'w') as fp:
for p in tasks:
fp.write("%s\n" % p)
pass
if with_requirements:
build_requirements()
if with_dockerfile:
build_dockerfile()
if os.path.exists(migrate_root):
build_migrations()
if os.path.exists(migrate_root):
build_tasks()
click.echo(click.style("Generate done...", fg="yellow"))
pass
|
编译全新依赖文件
|
def delete_edges(self, edges: Iterable[Tuple[str, str]]):
""" Iterate over a set of edges and remove the ones that are present in
the graph. """
for edge in edges:
if self.has_edge(*edge):
self.remove_edge(*edge)
|
Iterate over a set of edges and remove the ones that are present in
the graph.
|
def lock(self, key, lease_time=-1):
"""
Acquires the lock for the specified key infinitely or for the specified lease time if provided.
If the lock is not available, the current thread becomes disabled for thread scheduling purposes and lies
dormant until the lock has been acquired.
Scope of the lock is this map only. Acquired lock is only for the key in this map.
Locks are re-entrant; so, if the key is locked N times, it should be unlocked N times before another thread can
acquire it.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the key to lock.
:param lease_time: (int), time in seconds to wait before releasing the lock (optional).
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(multi_map_lock_codec, key_data, key=key_data,
thread_id=thread_id(), ttl=to_millis(lease_time),
reference_id=self.reference_id_generator.get_and_increment())
|
Acquires the lock for the specified key infinitely or for the specified lease time if provided.
If the lock is not available, the current thread becomes disabled for thread scheduling purposes and lies
dormant until the lock has been acquired.
Scope of the lock is this map only. Acquired lock is only for the key in this map.
Locks are re-entrant; so, if the key is locked N times, it should be unlocked N times before another thread can
acquire it.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the key to lock.
:param lease_time: (int), time in seconds to wait before releasing the lock (optional).
|
async def _send_to_messenger_profile(self, page, content):
"""
The messenger profile API handles all meta-information about the bot,
like the menu. This allows to submit data to this API endpoint.
:param page: page dict from the configuration
:param content: content to be sent to Facebook (as dict)
"""
log_name = ', '.join(repr(x) for x in content.keys())
page_id = page['page_id']
current = await self._get_messenger_profile(page, content.keys())
if dict_is_subset(content, current):
logger.info('Page %s: %s is already up to date', page_id, log_name)
return
params = {
'access_token': page['page_token'],
}
headers = {
'content-type': 'application/json',
}
post = self.session.post(
PROFILE_ENDPOINT,
params=params,
headers=headers,
data=ujson.dumps(content)
)
# noinspection PyBroadException
try:
async with post as r:
await self._handle_fb_response(r)
except Exception:
logger.exception('Page %s: %s could not be set', page_id, log_name)
reporter.report()
else:
logger.info('Page %s: %s was updated', page_id, log_name)
|
The messenger profile API handles all meta-information about the bot,
like the menu. This allows to submit data to this API endpoint.
:param page: page dict from the configuration
:param content: content to be sent to Facebook (as dict)
|
def send_trending_data(events):
"""creates data point payloads for trending data to influxdb
"""
bodies = {}
# sort the values
top_hits = sorted(
[(key, count) for key, count in events.items()],
key=lambda x: x[1],
reverse=True
)[:100]
# build up points to be written
for (site, content_id), count in top_hits:
if not len(site) or not re.match(CONTENT_ID_REGEX, content_id):
continue
# add point
bodies.setdefault(site, [])
bodies[site].append([content_id, count])
for site, points in bodies.items():
# create name
name = "{}_trending".format(site)
# send payload to influxdb
try:
data = [{
"name": name,
"columns": ["content_id", "value"],
"points": points,
}]
INFLUXDB_CLIENT.write_points(data)
except Exception as e:
LOGGER.exception(e)
|
creates data point payloads for trending data to influxdb
|
def lcp(s1, s2):
'''longest common prefix
>>> lcp('abcdx', 'abcdy'), lcp('', 'a'), lcp('x', 'yz')
(4, 0, 0)
'''
i = 0
for i, (c1, c2) in enumerate(zip(s1, s2)):
if c1 != c2:
return i
return min(len(s1), len(s2))
|
longest common prefix
>>> lcp('abcdx', 'abcdy'), lcp('', 'a'), lcp('x', 'yz')
(4, 0, 0)
|
def log(msg, delay=0.5, chevrons=True, verbose=True):
"""Log a message to stdout."""
if verbose:
if chevrons:
click.echo("\n❯❯ " + msg)
else:
click.echo(msg)
time.sleep(delay)
|
Log a message to stdout.
|
def channels_remove_moderator(self, room_id, user_id, **kwargs):
"""Removes the role of moderator from a user in the current channel."""
return self.__call_api_post('channels.removeModerator', roomId=room_id, userId=user_id, kwargs=kwargs)
|
Removes the role of moderator from a user in the current channel.
|
def get_queue_bindings(self, vhost, qname):
"""
Return a list of dicts, one dict per binding. The dict format coming
from RabbitMQ for queue named 'testq' is:
{"source":"sourceExch","vhost":"/","destination":"testq",
"destination_type":"queue","routing_key":"*.*","arguments":{},
"properties_key":"%2A.%2A"}
"""
vhost = quote(vhost, '')
qname = quote(qname, '')
path = Client.urls['bindings_on_queue'] % (vhost, qname)
bindings = self._call(path, 'GET')
return bindings
|
Return a list of dicts, one dict per binding. The dict format coming
from RabbitMQ for queue named 'testq' is:
{"source":"sourceExch","vhost":"/","destination":"testq",
"destination_type":"queue","routing_key":"*.*","arguments":{},
"properties_key":"%2A.%2A"}
|
def get(self, sid):
"""
Constructs a MemberContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.chat.v2.service.channel.member.MemberContext
:rtype: twilio.rest.chat.v2.service.channel.member.MemberContext
"""
return MemberContext(
self._version,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=sid,
)
|
Constructs a MemberContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.chat.v2.service.channel.member.MemberContext
:rtype: twilio.rest.chat.v2.service.channel.member.MemberContext
|
def bear_push(title, content, send_key=None):
"""使用PushBear推送消息给所有订阅者微信,关于PushBear,
请参考:https://pushbear.ftqq.com/admin/#/
:param title: str
消息标题
:param content: str
消息内容,最长64Kb,可空,支持MarkDown
:param send_key: str
从[PushBear](https://pushbear.ftqq.com/admin/#/)获取的通道send_key
:return: None
"""
if not send_key:
raise ValueError("请配置通道send_key,如果还没有,"
"可以到这里创建通道获取:https://pushbear.ftqq.com/admin/#/")
api = "https://pushbear.ftqq.com/sub"
requests.post(api, data={'text': title, 'desp': content, "sendkey": send_key})
|
使用PushBear推送消息给所有订阅者微信,关于PushBear,
请参考:https://pushbear.ftqq.com/admin/#/
:param title: str
消息标题
:param content: str
消息内容,最长64Kb,可空,支持MarkDown
:param send_key: str
从[PushBear](https://pushbear.ftqq.com/admin/#/)获取的通道send_key
:return: None
|
async def await_event(self, event=None, timeout=30):
"""Wait for an event from QTM.
:param event: A :class:`qtm.QRTEvent`
to wait for a specific event. Otherwise wait for any event.
:param timeout: Max time to wait for event.
:rtype: A :class:`qtm.QRTEvent`
"""
return await self._protocol.await_event(event, timeout=timeout)
|
Wait for an event from QTM.
:param event: A :class:`qtm.QRTEvent`
to wait for a specific event. Otherwise wait for any event.
:param timeout: Max time to wait for event.
:rtype: A :class:`qtm.QRTEvent`
|
def generate_moffat_profile(seeing_fwhm, alpha):
"""Generate a normalized Moffat profile from its FWHM and alpha"""
scale = 2 * math.sqrt(2**(1.0 / alpha) - 1)
gamma = seeing_fwhm / scale
amplitude = 1.0 / math.pi * (alpha - 1) / gamma**2
seeing_model = Moffat2D(amplitude=amplitude,
x_mean=0.0,
y_mean=0.0,
gamma=gamma,
alpha=alpha)
return seeing_model
|
Generate a normalized Moffat profile from its FWHM and alpha
|
def add(self, origin, rel, target, attrs=None):
'''
Add one relationship to the model
origin - origin of the relationship (similar to an RDF subject)
rel - type IRI of the relationship (similar to an RDF predicate)
target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object
attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}
'''
if not origin:
raise ValueError('Relationship origin cannot be null')
if not rel:
raise ValueError('Relationship ID cannot be null')
attrs = attrs or {}
origin_item = self._db_coll.find_one({'origin': origin})
rel = self._abbreviate(rel)
target = self._abbreviate(target)
rel_info = {'rid': rel, 'instances': [[target, attrs]]}
if origin_item is None:
self._db_coll.insert_one(
{
'origin': origin,
'rels': [rel_info],
}
)
else:
origin_item['rels'].append(rel_info)
self._db_coll.replace_one(
{'origin': origin}, origin_item
)
return
|
Add one relationship to the model
origin - origin of the relationship (similar to an RDF subject)
rel - type IRI of the relationship (similar to an RDF predicate)
target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object
attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}
|
def _minimally_quoted_parameter_value(value):
"""
Per RFC 7321 (https://tools.ietf.org/html/rfc7231#section-3.1.1.1):
Parameters values don't need to be quoted if they are a "token".
Token characters are defined by RFC 7320 (https://tools.ietf.org/html/rfc7230#section-3.2.6).
Otherwise, parameters values can be a "quoted-string".
So we will quote values that contain characters other than the standard token characters.
"""
if re.match("^[{charset}]*$".format(charset=MediaType.RFC7320_TOKEN_CHARSET), value):
return value
else:
return MediaType._quote(value)
|
Per RFC 7321 (https://tools.ietf.org/html/rfc7231#section-3.1.1.1):
Parameters values don't need to be quoted if they are a "token".
Token characters are defined by RFC 7320 (https://tools.ietf.org/html/rfc7230#section-3.2.6).
Otherwise, parameters values can be a "quoted-string".
So we will quote values that contain characters other than the standard token characters.
|
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to the Alexa Skills Kit sample. " \
"Please tell me your favorite color by saying, " \
"my favorite color is red"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Please tell me your favorite color by saying, " \
"my favorite color is red."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
|
If we wanted to initialize the session to have some attributes we could
add those here
|
def get_resource_subscription(self, device_id, resource_path, fix_path=True):
"""Read subscription status.
:param device_id: Name of device to set the subscription on (Required)
:param resource_path: The resource path on device to observe (Required)
:param fix_path: Removes leading / on resource_path if found
:returns: status of subscription
"""
# When path starts with / we remove the slash, as the API can't handle //.
# Keep the original path around however, as we use that for queue registration.
fixed_path = resource_path
if fix_path and resource_path.startswith("/"):
fixed_path = resource_path[1:]
api = self._get_api(mds.SubscriptionsApi)
try:
api.check_resource_subscription(device_id, fixed_path)
except Exception as e:
if e.status == 404:
return False
raise
return True
|
Read subscription status.
:param device_id: Name of device to set the subscription on (Required)
:param resource_path: The resource path on device to observe (Required)
:param fix_path: Removes leading / on resource_path if found
:returns: status of subscription
|
def get_update_service(self):
"""Return a HPEUpdateService object
:returns: The UpdateService object
"""
update_service_url = utils.get_subresource_path_by(self,
'UpdateService')
return (update_service.
HPEUpdateService(self._conn, update_service_url,
redfish_version=self.redfish_version))
|
Return a HPEUpdateService object
:returns: The UpdateService object
|
def title(self, value=None):
"""Get or set the document's title from/in the metadata
No arguments: Get the document's title from metadata
Argument: Set the document's title in metadata
"""
if not (value is None):
if (self.metadatatype == "native"):
self.metadata['title'] = value
else:
self._title = value
if (self.metadatatype == "native"):
if 'title' in self.metadata:
return self.metadata['title']
else:
return None
else:
return self._title
|
Get or set the document's title from/in the metadata
No arguments: Get the document's title from metadata
Argument: Set the document's title in metadata
|
def set(self, section, option, value=None):
"""
Extends :meth:`~configparser.ConfigParser.set` by auto formatting byte strings into unicode strings.
"""
if isinstance(section, bytes):
section = section.decode('utf8')
if isinstance(option, bytes):
option = option.decode('utf8')
if isinstance(value, bytes):
value = value.decode('utf8')
return super(VSGConfigParser, self).set(section, option, value)
|
Extends :meth:`~configparser.ConfigParser.set` by auto formatting byte strings into unicode strings.
|
def down_by_name(*filters, remote_dir=DEFAULT_REMOTE_DIR, local_dir=".", count=1):
"""Sync files whose filename attribute is highest in alphanumeric order"""
files = command.list_files(*filters, remote_dir=remote_dir)
greatest = sorted(files, key=lambda f: f.filename)
to_sync = greatest[-count:]
_notify_sync(Direction.down, to_sync)
down_by_files(to_sync[::-1], local_dir=local_dir)
|
Sync files whose filename attribute is highest in alphanumeric order
|
def iplot(self, places=-1, c_poly='default', c_holes='default',
c_sop='r', s_sop=25, extra_height=0, ret=False, ax=None):
"""
Improved plot that allows to visualize the Places in the Space
selectively. It also allows to plot polygons and holes in
different colors and to change the size and the color of the
set of points.
The points can be plotted accordingly to a ndarray colormap.
:param places: Indexes of the Places to visualize.
:type places: int, list or ndarray
:param c_poly: Polygons color.
:type c_poly: matplotlib color, 'default' or 't' (transparent)
:param c_holes: Holes color.
:type c_holes: matplotlib color, 'default' or 't' (transparent)
:param c_sop: Set of points color.
:type c_sop: matplotlib color or colormap
:param s_sop: Set of points size.
:type s_sop: float or ndarray
:param ret: If True, returns the figure. It can be used to add
more elements to the plot or to modify it.
:type ret: bool
:param ax: If a matplotlib axes given, this method will
represent the plot on top of this axes. This is used to
represent multiple plots from multiple geometries,
overlapping them recursively.
:type ax: mplot3d.Axes3D, None
:returns: None, axes
:rtype: None, mplot3d.Axes3D
"""
if places == -1:
places = range(len(self.places))
elif type(places) == int:
places = [places]
places = np.array(places)
places[places<0] = len(self.places) + places[places<0]
places = np.unique(places)
aux_space = Space([self[i] for i in places])
for place in aux_space:
ax = place.iplot(c_poly, c_holes, c_sop, s_sop, extra_height,
ret=True, ax=ax)
aux_space.center_plot(ax)
if ret: return ax
|
Improved plot that allows to visualize the Places in the Space
selectively. It also allows to plot polygons and holes in
different colors and to change the size and the color of the
set of points.
The points can be plotted accordingly to a ndarray colormap.
:param places: Indexes of the Places to visualize.
:type places: int, list or ndarray
:param c_poly: Polygons color.
:type c_poly: matplotlib color, 'default' or 't' (transparent)
:param c_holes: Holes color.
:type c_holes: matplotlib color, 'default' or 't' (transparent)
:param c_sop: Set of points color.
:type c_sop: matplotlib color or colormap
:param s_sop: Set of points size.
:type s_sop: float or ndarray
:param ret: If True, returns the figure. It can be used to add
more elements to the plot or to modify it.
:type ret: bool
:param ax: If a matplotlib axes given, this method will
represent the plot on top of this axes. This is used to
represent multiple plots from multiple geometries,
overlapping them recursively.
:type ax: mplot3d.Axes3D, None
:returns: None, axes
:rtype: None, mplot3d.Axes3D
|
def binomial(n):
"""
Return all binomial coefficients for a given order.
For n > 5, scipy.special.binom is used, below we hardcode
to avoid the scipy.special dependency.
Parameters
--------------
n : int
Order
Returns
---------------
binom : (n + 1,) int
Binomial coefficients of a given order
"""
if n == 1:
return [1, 1]
elif n == 2:
return [1, 2, 1]
elif n == 3:
return [1, 3, 3, 1]
elif n == 4:
return [1, 4, 6, 4, 1]
elif n == 5:
return [1, 5, 10, 10, 5, 1]
else:
from scipy.special import binom
return binom(n, np.arange(n + 1))
|
Return all binomial coefficients for a given order.
For n > 5, scipy.special.binom is used, below we hardcode
to avoid the scipy.special dependency.
Parameters
--------------
n : int
Order
Returns
---------------
binom : (n + 1,) int
Binomial coefficients of a given order
|
def ascii(graph):
"""Format graph as an ASCII art."""
from .._ascii import DAG
from .._echo import echo_via_pager
echo_via_pager(str(DAG(graph)))
|
Format graph as an ASCII art.
|
def _replication_request(command, host=None, core_name=None, params=None):
'''
PRIVATE METHOD
Performs the requested replication command and returns a dictionary with
success, errors and data as keys. The data object will contain the JSON
response.
command : str
The replication command to execute.
host : str (None)
The solr host to query. __opts__['host'] is default
core_name: str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
params : list<str> ([])
Any additional parameters you want to send. Should be a lsit of
strings in name=value format. e.g. ['name=value']
Return: dict<str, obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
'''
params = [] if params is None else params
extra = ["command={0}".format(command)] + params
url = _format_url('replication', host=host, core_name=core_name,
extra=extra)
return _http_request(url)
|
PRIVATE METHOD
Performs the requested replication command and returns a dictionary with
success, errors and data as keys. The data object will contain the JSON
response.
command : str
The replication command to execute.
host : str (None)
The solr host to query. __opts__['host'] is default
core_name: str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
params : list<str> ([])
Any additional parameters you want to send. Should be a lsit of
strings in name=value format. e.g. ['name=value']
Return: dict<str, obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
|
def get_issuer_keys(self, issuer):
"""
Get all the keys that belong to an entity.
:param issuer: The entity ID
:return: A possibly empty list of keys
"""
res = []
for kbl in self.issuer_keys[issuer]:
res.extend(kbl.keys())
return res
|
Get all the keys that belong to an entity.
:param issuer: The entity ID
:return: A possibly empty list of keys
|
def sum_tbl(tbl, kfield, vfields):
"""
Aggregate a composite array and compute the totals on a given key.
>>> dt = numpy.dtype([('name', (bytes, 10)), ('value', int)])
>>> tbl = numpy.array([('a', 1), ('a', 2), ('b', 3)], dt)
>>> sum_tbl(tbl, 'name', ['value'])['value']
array([3, 3])
"""
pairs = [(n, tbl.dtype[n]) for n in [kfield] + vfields]
dt = numpy.dtype(pairs + [('counts', int)])
def sum_all(group):
vals = numpy.zeros(1, dt)[0]
for rec in group:
for vfield in vfields:
vals[vfield] += rec[vfield]
vals['counts'] += 1
vals[kfield] = rec[kfield]
return vals
rows = groupby(tbl, operator.itemgetter(kfield), sum_all).values()
array = numpy.zeros(len(rows), dt)
for i, row in enumerate(rows):
for j, name in enumerate(dt.names):
array[i][name] = row[j]
return array
|
Aggregate a composite array and compute the totals on a given key.
>>> dt = numpy.dtype([('name', (bytes, 10)), ('value', int)])
>>> tbl = numpy.array([('a', 1), ('a', 2), ('b', 3)], dt)
>>> sum_tbl(tbl, 'name', ['value'])['value']
array([3, 3])
|
def cohort_queryplan(plan):
"""
Input:
{
'source': 'kronos', # Name of data source from settings
'cohort':
{'stream': CohortTest.EMAIL_STREAM, # Kronos stream to define cohort from.
'transform': lambda x: x, # Transformations on the kstream.
'start': date.now(), # The day of the first cohort.
'unit': DateUnit.XX, # Users are in the same cohort
# if they are in the same day/week.
'cohorts': 5 # How many cohorts (days/weeks/months)
# to track.
'grouping_key': 'user'}, # What key in an event should we tie
# to a key in the action stream?
'action':
{'stream': CohortTest.FRONTPAGE_STREAM, # Stream users take actions on.
'transform': lambda x: x # Transformations on the stream.
'unit': DateUnit.XX, # Track events in day/week/months.
'repetitions': 14 # How many days/weeks/months to track.
'grouping_key': 'user_id'} # What key in an event should we tie
# to a key in the action stream?
}
Output:
A metis-compatible query plan to return a cohort analysis.
"""
cohort = plan['cohort']
action = plan['action']
source = plan['source']
# Calculate the start and end dates, in Kronos time, of the
# beginning and end of the cohort and action streams that will be
# relevant.
cohort_start = datetime_to_kronos_time(_date_to_datetime(cohort['start']))
cohort_span = timedelta(**{cohort['unit']: cohort['cohorts']})
cohort_end = cohort['start'] + cohort_span
action_span = timedelta(**{action['unit']: action['repetitions']})
action_end = cohort_end + action_span
cohort_end = datetime_to_kronos_time(_date_to_datetime(cohort_end)) + 1
action_end = datetime_to_kronos_time(_date_to_datetime(action_end)) + 1
left = _cohort_stream_transform(source,
cohort['stream'], cohort_start, cohort_end,
cohort.get('transform'),
cohort['grouping_key'], cohort['unit'])
right = _cohort_stream_transform(source,
action['stream'], cohort_start, action_end,
action.get('transform'),
action['grouping_key'], action['unit'])
additional_action_time = (DateUnit.unit_to_kronos_time(action['unit']) *
action['repetitions'])
left.alias = 'cohort'
right.alias = 'action'
joined = Join(left,
right,
(Condition(Condition.Op.EQ,
Property('cohort.%s' % cohort['grouping_key']),
Property('action.%s' % action['grouping_key'])) &
Condition(Condition.Op.GTE,
Property('action.%s' % TIMESTAMP_FIELD),
Property('cohort.%s' % TIMESTAMP_FIELD)) &
Condition(Condition.Op.LT,
Property('action.%s' % TIMESTAMP_FIELD),
Add([Property('cohort.%s' % TIMESTAMP_FIELD),
Constant(additional_action_time)]))))
user_aggregated = Aggregate(
joined,
GroupBy([Property('cohort.date', alias=TIMESTAMP_FIELD),
Property('cohort.%s' % cohort['grouping_key'], alias='group'),
Floor([Subtract([Property('action.%s' % TIMESTAMP_FIELD),
Property('cohort.%s' % TIMESTAMP_FIELD)]),
Constant(DateUnit.unit_to_kronos_time(action['unit']))],
alias='action_step')]),
[Count([], alias='count')]
)
aggregated = Aggregate(
user_aggregated,
GroupBy([Property(TIMESTAMP_FIELD, alias=TIMESTAMP_FIELD),
Property('action_step', alias='action_step')]),
[Count([], alias='cohort_actions')])
# TODO(marcua): Also sum up the cohort sizes, join with the plan.
return aggregated.to_dict()
|
Input:
{
'source': 'kronos', # Name of data source from settings
'cohort':
{'stream': CohortTest.EMAIL_STREAM, # Kronos stream to define cohort from.
'transform': lambda x: x, # Transformations on the kstream.
'start': date.now(), # The day of the first cohort.
'unit': DateUnit.XX, # Users are in the same cohort
# if they are in the same day/week.
'cohorts': 5 # How many cohorts (days/weeks/months)
# to track.
'grouping_key': 'user'}, # What key in an event should we tie
# to a key in the action stream?
'action':
{'stream': CohortTest.FRONTPAGE_STREAM, # Stream users take actions on.
'transform': lambda x: x # Transformations on the stream.
'unit': DateUnit.XX, # Track events in day/week/months.
'repetitions': 14 # How many days/weeks/months to track.
'grouping_key': 'user_id'} # What key in an event should we tie
# to a key in the action stream?
}
Output:
A metis-compatible query plan to return a cohort analysis.
|
def from_string(cls, s):
"""Return a :class:`JobStatus` instance from its string representation."""
for num, text in cls._STATUS_TABLE.items():
if text == s: return cls(num)
else:
#raise ValueError("Wrong string %s" % s)
logger.warning("Got unknown status: %s" % s)
return cls.from_string("UNKNOWN")
|
Return a :class:`JobStatus` instance from its string representation.
|
def choose_key(gpg_private_keys):
"""Displays gpg key choice and returns key"""
uid_strings_fp = []
uid_string_fp2key = {}
current_key_index = None
for i, key in enumerate(gpg_private_keys):
fingerprint = key['fingerprint']
if fingerprint == config["gpg_key_fingerprint"]:
current_key_index = i
for uid_string in key['uids']:
uid_string_fp = '"' + uid_string + ' (' + fingerprint + ')'
uid_strings_fp.append(uid_string_fp)
uid_string_fp2key[uid_string_fp] = key
msg = _('Choose a GPG key for signing pyspread save files.\n'
'The GPG key must not have a passphrase set.')
dlg = wx.SingleChoiceDialog(None, msg, _('Choose key'), uid_strings_fp,
wx.CHOICEDLG_STYLE)
childlist = list(dlg.GetChildren())
childlist[-3].SetLabel(_("Use chosen key"))
childlist[-2].SetLabel(_("Create new key"))
if current_key_index is not None:
# Set choice to current key
dlg.SetSelection(current_key_index)
if dlg.ShowModal() == wx.ID_OK:
uid_string_fp = dlg.GetStringSelection()
key = uid_string_fp2key[uid_string_fp]
else:
key = None
dlg.Destroy()
return key
|
Displays gpg key choice and returns key
|
def hidden_item_tags(self):
""" Returns a list of tags which hide an item from the 'ls' output. """
hidden_item_tags = self.cp.get('ls', 'hidden_item_tags')
# pylint: disable=no-member
return [] if hidden_item_tags == '' else [tag.strip() for tag in
hidden_item_tags.split(',')]
|
Returns a list of tags which hide an item from the 'ls' output.
|
def numpyStr(array, format='%f', includeIndices=False, includeZeros=True):
""" Pretty print a numpy matrix using the given format string for each
value. Return the string representation
Parameters:
------------------------------------------------------------
array: The numpy array to print. This can be either a 1D vector or 2D matrix
format: The format string to use for each value
includeIndices: If true, include [row,col] label for each value
includeZeros: Can only be set to False if includeIndices is on.
If True, include 0 values in the print-out
If False, exclude 0 values from the print-out.
"""
shape = array.shape
assert (len(shape) <= 2)
items = ['[']
if len(shape) == 1:
if includeIndices:
format = '%d:' + format
if includeZeros:
rowItems = [format % (c,x) for (c,x) in enumerate(array)]
else:
rowItems = [format % (c,x) for (c,x) in enumerate(array) if x != 0]
else:
rowItems = [format % (x) for x in array]
items.extend(rowItems)
else:
(rows, cols) = shape
if includeIndices:
format = '%d,%d:' + format
for r in xrange(rows):
if includeIndices:
rowItems = [format % (r,c,x) for c,x in enumerate(array[r])]
else:
rowItems = [format % (x) for x in array[r]]
if r > 0:
items.append('')
items.append('[')
items.extend(rowItems)
if r < rows-1:
items.append(']\n')
else:
items.append(']')
items.append(']')
return ' '.join(items)
|
Pretty print a numpy matrix using the given format string for each
value. Return the string representation
Parameters:
------------------------------------------------------------
array: The numpy array to print. This can be either a 1D vector or 2D matrix
format: The format string to use for each value
includeIndices: If true, include [row,col] label for each value
includeZeros: Can only be set to False if includeIndices is on.
If True, include 0 values in the print-out
If False, exclude 0 values from the print-out.
|
def download_align(from_idx, to_idx, _params):
"""
download aligns
"""
succ = set()
fail = set()
for idx in range(from_idx, to_idx):
name = 's' + str(idx)
if idx == 0:
continue
script = "http://spandh.dcs.shef.ac.uk/gridcorpus/{nm}/align/{nm}.tar".format(nm=name)
down_sc = 'cd {align_path} && wget {script} && \
tar -xvf {nm}.tar'.format(script=script,
nm=name,
align_path=_params['align_path'])
try:
print(down_sc)
os.system(down_sc)
succ.add(idx)
except OSError as error:
print(error)
fail.add(idx)
return (succ, fail)
|
download aligns
|
def predict(self, X):
"""Predict inside or outside AD for X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
ad : array of shape = [n_samples]
Array contains True (reaction in AD) and False (reaction residing outside AD).
"""
# Check is fit had been called
check_is_fitted(self, ['inverse_influence_matrix'])
# Check that X have correct shape
X = check_array(X)
return self.__find_leverages(X, self.inverse_influence_matrix) <= self.threshold_value
|
Predict inside or outside AD for X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
ad : array of shape = [n_samples]
Array contains True (reaction in AD) and False (reaction residing outside AD).
|
def geopotential_to_height(geopot):
r"""Compute height from a given geopotential.
Parameters
----------
geopotential : `pint.Quantity`
Geopotential (array_like)
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Examples
--------
>>> from metpy.constants import g, G, me, Re
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0,10000, num = 11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305888
39251.39289118 49056.54621087 58858.62446525 68657.62910064
78453.56156253 88246.42329545 98036.21574306], 'meter ** 2 / second ** 2')>
>>> height = metpy.calc.geopotential_to_height(geopot)
>>> height
<Quantity([ 0. 1000. 2000. 3000. 4000. 5000. 6000. 7000. 8000.
9000. 10000.], 'meter')>
Notes
-----
Derived from definition of geopotential in [Hobbs2006]_ pg.14 Eq.1.8.
"""
# Calculate geopotential
height = (((1 / mpconsts.Re) - (geopot / (mpconsts.G * mpconsts.me))) ** -1) - mpconsts.Re
return height
|
r"""Compute height from a given geopotential.
Parameters
----------
geopotential : `pint.Quantity`
Geopotential (array_like)
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Examples
--------
>>> from metpy.constants import g, G, me, Re
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0,10000, num = 11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305888
39251.39289118 49056.54621087 58858.62446525 68657.62910064
78453.56156253 88246.42329545 98036.21574306], 'meter ** 2 / second ** 2')>
>>> height = metpy.calc.geopotential_to_height(geopot)
>>> height
<Quantity([ 0. 1000. 2000. 3000. 4000. 5000. 6000. 7000. 8000.
9000. 10000.], 'meter')>
Notes
-----
Derived from definition of geopotential in [Hobbs2006]_ pg.14 Eq.1.8.
|
def get_focus(self, filt=False, samples=None, subset=None, nominal=False):
"""
Collect all data from all samples into a single array.
Data from standards is not collected.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
samples : str or list
which samples to get
subset : str or int
which subset to get
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
# t = 0
focus = {'uTime': []}
focus.update({a: [] for a in self.analytes})
for sa in samples:
s = self.data[sa]
focus['uTime'].append(s.uTime)
ind = s.filt.grab_filt(filt)
for a in self.analytes:
tmp = s.focus[a].copy()
tmp[~ind] = np.nan
focus[a].append(tmp)
if nominal:
self.focus.update({k: nominal_values(np.concatenate(v)) for k, v, in focus.items()})
else:
self.focus.update({k: np.concatenate(v) for k, v, in focus.items()})
return
|
Collect all data from all samples into a single array.
Data from standards is not collected.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
samples : str or list
which samples to get
subset : str or int
which subset to get
Returns
-------
None
|
def reboot(self, target_mode=None, timeout_ms=None):
"""Reboots the device.
Args:
target_mode: Normal reboot when unspecified (or None). Can specify
other target modes, such as 'recovery' or 'bootloader'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
Returns:
Usually the empty string. Depends on the bootloader and the target_mode.
"""
return self._simple_command('reboot', arg=target_mode,
timeout_ms=timeout_ms)
|
Reboots the device.
Args:
target_mode: Normal reboot when unspecified (or None). Can specify
other target modes, such as 'recovery' or 'bootloader'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
Returns:
Usually the empty string. Depends on the bootloader and the target_mode.
|
def make_argument_subquery(arg):
"""
Decide when a Join argument needs to be wrapped in a subquery
"""
return Subquery.create(arg) if isinstance(arg, (GroupBy, Projection)) or arg.restriction else arg
|
Decide when a Join argument needs to be wrapped in a subquery
|
def setAccessRules(self, pid, public=False):
"""
Set access rules for a resource. Current only allows for setting the public or private setting.
:param pid: The HydroShare ID of the resource
:param public: True if the resource should be made public.
"""
url = "{url_base}/resource/accessRules/{pid}/".format(url_base=self.url_base,
pid=pid)
params = {'public': public}
r = self._request('PUT', url, data=params)
if r.status_code != 200:
if r.status_code == 403:
raise HydroShareNotAuthorized(('PUT', url))
elif r.status_code == 404:
raise HydroShareNotFound((pid,))
else:
raise HydroShareHTTPException((url, 'PUT', r.status_code, params))
resource = r.json()
assert(resource['resource_id'] == pid)
return resource['resource_id']
|
Set access rules for a resource. Current only allows for setting the public or private setting.
:param pid: The HydroShare ID of the resource
:param public: True if the resource should be made public.
|
def img_search_bing(album):
''' Bing image search '''
setup()
album = album + " Album Art"
api_key = "Key"
endpoint = "https://api.cognitive.microsoft.com/bing/v5.0/images/search"
links_dict = {}
headers = {'Ocp-Apim-Subscription-Key': str(BING_KEY)}
param = {'q': album, 'count': '1'}
response = requests.get(endpoint, headers=headers, params=param)
response = response.json()
key = 0
try:
for i in response['value']:
links_dict[str(key)] = str((i['contentUrl']))
key = key + 1
return links_dict["0"]
except KeyError:
return None
|
Bing image search
|
def current_values(self):
"""Return a dict of all the 'current' parameters."""
current_dict = {
'date': self.current_session_date,
'score': self.current_sleep_score,
'stage': self.current_sleep_stage,
'breakdown': self.current_sleep_breakdown,
'tnt': self.current_tnt,
'bed_temp': self.current_bed_temp,
'room_temp': self.current_room_temp,
'resp_rate': self.current_resp_rate,
'heart_rate': self.current_heart_rate,
'processing': self.current_session_processing,
}
return current_dict
|
Return a dict of all the 'current' parameters.
|
def _move_cursor_to_column(self, column):
"""
Moves the cursor to the specified column, if possible.
"""
last_col = len(self._cursor.block().text())
self._cursor.movePosition(self._cursor.EndOfBlock)
to_insert = ''
for i in range(column - last_col):
to_insert += ' '
if to_insert:
self._cursor.insertText(to_insert)
self._cursor.movePosition(self._cursor.StartOfBlock)
self._cursor.movePosition(self._cursor.Right, self._cursor.MoveAnchor, column)
self._last_cursor_pos = self._cursor.position()
|
Moves the cursor to the specified column, if possible.
|
def _discover_mac(self):
""" Discovers MAC address of device.
Discovery is done by sending a UDP broadcast.
All configured devices reply. The response contains
the MAC address in both needed formats.
Discovery of multiple switches must be done synchronously.
:returns: Tuple of MAC address and reversed MAC address.
"""
mac = None
mac_reversed = None
cmd = MAGIC + DISCOVERY
resp = self._udp_transact(cmd, self._discovery_resp,
broadcast=True,
timeout=DISCOVERY_TIMEOUT)
if resp:
(mac, mac_reversed) = resp
if mac is None:
raise S20Exception("Couldn't discover {}".format(self.host))
return (mac, mac_reversed)
|
Discovers MAC address of device.
Discovery is done by sending a UDP broadcast.
All configured devices reply. The response contains
the MAC address in both needed formats.
Discovery of multiple switches must be done synchronously.
:returns: Tuple of MAC address and reversed MAC address.
|
def keep_impute(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
C = np.cov(X_train.T)
C += np.eye(C.shape[0]) * 1e-6
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
observe_inds = ordering[:nkeep[i]]
impute_inds = ordering[nkeep[i]:]
# impute missing data assuming it follows a multivariate normal distribution
Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds])
Cio = C[impute_inds,:][:,observe_inds]
impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds])
X_test_tmp[i, impute_inds] = impute
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
|
The model is revaluated for each test sample with the non-important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
|
def _get_ignore_from_manifest(filename):
"""Gather the various ignore patterns from a MANIFEST.in.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore.
"""
class MyTextFile(TextFile):
def error(self, msg, line=None): # pragma: nocover
# (this is never called by TextFile in current versions of CPython)
raise Failure(self.gen_error(msg, line))
def warn(self, msg, line=None):
warning(self.gen_error(msg, line))
template = MyTextFile(filename,
strip_comments=True,
skip_blanks=True,
join_lines=True,
lstrip_ws=True,
rstrip_ws=True,
collapse_join=True)
try:
lines = template.readlines()
finally:
template.close()
return _get_ignore_from_manifest_lines(lines)
|
Gather the various ignore patterns from a MANIFEST.in.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore.
|
def get_public_rooms(self, **kwargs):
"""
Get a listing of all public rooms with their names and IDs
"""
return GetPublicRooms(settings=self.settings, **kwargs).call(**kwargs)
|
Get a listing of all public rooms with their names and IDs
|
def selected(script, face=True, vert=True):
""" Delete selected vertices and/or faces
Note: if the mesh has no faces (e.g. a point cloud) you must
set face=False, or the vertices will not be deleted
Args:
script: the FilterScript object or script filename to write
the filter to.
face (bool): if True the selected faces will be deleted. If vert
is also True, then all the vertices surrounded by those faces will
also be deleted. Note that if no faces are selected (only vertices)
then this filter will not do anything. For example, if you want to
delete a point cloud selection, you must set this to False.
vert (bool): if True the selected vertices will be deleted.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
if face and vert:
filter_xml = ' <filter name="Delete Selected Faces and Vertices"/>\n'
elif face and not vert:
filter_xml = ' <filter name="Delete Selected Faces"/>\n'
elif not face and vert:
filter_xml = ' <filter name="Delete Selected Vertices"/>\n'
util.write_filter(script, filter_xml)
return None
|
Delete selected vertices and/or faces
Note: if the mesh has no faces (e.g. a point cloud) you must
set face=False, or the vertices will not be deleted
Args:
script: the FilterScript object or script filename to write
the filter to.
face (bool): if True the selected faces will be deleted. If vert
is also True, then all the vertices surrounded by those faces will
also be deleted. Note that if no faces are selected (only vertices)
then this filter will not do anything. For example, if you want to
delete a point cloud selection, you must set this to False.
vert (bool): if True the selected vertices will be deleted.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
|
async def on_reaction_add(reaction, user):
"""The on_message event handler for this module
Args:
reaction (discord.Reaction): Input reaction
user (discord.User): The user that added the reaction
"""
# Simplify reaction info
server = reaction.message.server
emoji = reaction.emoji
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
# Commands section
if user != reaction.message.channel.server.me:
if server.id not in _data.cache or _data.cache[server.id].state == 'destroyed':
return
try:
valid_reaction = reaction.message.id == _data.cache[server.id].embed.sent_embed.id
except AttributeError:
pass
else:
if valid_reaction:
# Remove reaction
try:
await client.remove_reaction(reaction.message, emoji, user)
except discord.errors.NotFound:
pass
except discord.errors.Forbidden:
pass
# Commands
if emoji == "⏯":
await _data.cache[server.id].toggle()
if emoji == "⏹":
await _data.cache[server.id].stop()
if emoji == "⏭":
await _data.cache[server.id].skip("1")
if emoji == "⏮":
await _data.cache[server.id].rewind("1")
if emoji == "🔀":
await _data.cache[server.id].shuffle()
if emoji == "🔉":
await _data.cache[server.id].setvolume('-')
if emoji == "🔊":
await _data.cache[server.id].setvolume('+')
|
The on_message event handler for this module
Args:
reaction (discord.Reaction): Input reaction
user (discord.User): The user that added the reaction
|
def require(self, fieldname, allow_blank=False):
"""fieldname is required"""
if self.request.form and fieldname not in self.request.form.keys():
raise Exception("Required field not found in request: %s" % fieldname)
if self.request.form and (not self.request.form[fieldname] or allow_blank):
raise Exception("Required field %s may not have blank value")
|
fieldname is required
|
def by_date(self, chamber, date):
"Return votes cast in a chamber on a single day"
date = parse_date(date)
return self.by_range(chamber, date, date)
|
Return votes cast in a chamber on a single day
|
def plugins(self):
"""
Get the set of plugins that this field may display.
"""
from fluent_contents import extensions
if self._plugins is None:
return extensions.plugin_pool.get_plugins()
else:
try:
return extensions.plugin_pool.get_plugins_by_name(*self._plugins)
except extensions.PluginNotFound as e:
raise extensions.PluginNotFound(str(e) + " Update the plugin list of '{0}.{1}' field or FLUENT_CONTENTS_PLACEHOLDER_CONFIG['{2}'] setting.".format(self.model._meta.object_name, self.name, self.slot))
|
Get the set of plugins that this field may display.
|
def uniform(self, a: float, b: float, precision: int = 15) -> float:
"""Get a random number in the range [a, b) or [a, b] depending on rounding.
:param a: Minimum value.
:param b: Maximum value.
:param precision: Round a number to a given
precision in decimal digits, default is 15.
"""
return round(a + (b - a) * self.random(), precision)
|
Get a random number in the range [a, b) or [a, b] depending on rounding.
:param a: Minimum value.
:param b: Maximum value.
:param precision: Round a number to a given
precision in decimal digits, default is 15.
|
def refresh(self) -> None:
"""Update the actual simulation values based on the toy-value pairs.
Usually, one does not need to call refresh explicitly. The
"magic" methods __call__, __setattr__, and __delattr__ invoke
it automatically, when required.
Instantiate a 1-dimensional |SeasonalParameter| object:
>>> from hydpy.core.parametertools import SeasonalParameter
>>> class Par(SeasonalParameter):
... NDIM = 1
... TYPE = float
... TIME = None
>>> par = Par(None)
>>> par.simulationstep = '1d'
>>> par.shape = (None,)
When a |SeasonalParameter| object does not contain any toy-value
pairs yet, the method |SeasonalParameter.refresh| sets all actual
simulation values to zero:
>>> par.values = 1.
>>> par.refresh()
>>> par.values[0]
0.0
When there is only one toy-value pair, its values are relevant
for all actual simulation values:
>>> par.toy_1 = 2. # calls refresh automatically
>>> par.values[0]
2.0
Method |SeasonalParameter.refresh| performs a linear interpolation
for the central time points of each simulation time step. Hence,
in the following example, the original values of the toy-value
pairs do not show up:
>>> par.toy_12_31 = 4.
>>> from hydpy import round_
>>> round_(par.values[0])
2.00274
>>> round_(par.values[-2])
3.99726
>>> par.values[-1]
3.0
If one wants to preserve the original values in this example, one
would have to set the corresponding toy instances in the middle of
some simulation step intervals:
>>> del par.toy_1
>>> del par.toy_12_31
>>> par.toy_1_1_12 = 2
>>> par.toy_12_31_12 = 4.
>>> par.values[0]
2.0
>>> round_(par.values[1])
2.005479
>>> round_(par.values[-2])
3.994521
>>> par.values[-1]
4.0
"""
if not self:
self.values[:] = 0.
elif len(self) == 1:
values = list(self._toy2values.values())[0]
self.values[:] = self.apply_timefactor(values)
else:
for idx, date in enumerate(
timetools.TOY.centred_timegrid(self.simulationstep)):
values = self.interp(date)
self.values[idx] = self.apply_timefactor(values)
|
Update the actual simulation values based on the toy-value pairs.
Usually, one does not need to call refresh explicitly. The
"magic" methods __call__, __setattr__, and __delattr__ invoke
it automatically, when required.
Instantiate a 1-dimensional |SeasonalParameter| object:
>>> from hydpy.core.parametertools import SeasonalParameter
>>> class Par(SeasonalParameter):
... NDIM = 1
... TYPE = float
... TIME = None
>>> par = Par(None)
>>> par.simulationstep = '1d'
>>> par.shape = (None,)
When a |SeasonalParameter| object does not contain any toy-value
pairs yet, the method |SeasonalParameter.refresh| sets all actual
simulation values to zero:
>>> par.values = 1.
>>> par.refresh()
>>> par.values[0]
0.0
When there is only one toy-value pair, its values are relevant
for all actual simulation values:
>>> par.toy_1 = 2. # calls refresh automatically
>>> par.values[0]
2.0
Method |SeasonalParameter.refresh| performs a linear interpolation
for the central time points of each simulation time step. Hence,
in the following example, the original values of the toy-value
pairs do not show up:
>>> par.toy_12_31 = 4.
>>> from hydpy import round_
>>> round_(par.values[0])
2.00274
>>> round_(par.values[-2])
3.99726
>>> par.values[-1]
3.0
If one wants to preserve the original values in this example, one
would have to set the corresponding toy instances in the middle of
some simulation step intervals:
>>> del par.toy_1
>>> del par.toy_12_31
>>> par.toy_1_1_12 = 2
>>> par.toy_12_31_12 = 4.
>>> par.values[0]
2.0
>>> round_(par.values[1])
2.005479
>>> round_(par.values[-2])
3.994521
>>> par.values[-1]
4.0
|
def _extract_dot15d4address(pkt, source=True):
"""This function extracts the source/destination address of a 6LoWPAN
from its upper Dot15d4Data (802.15.4 data) layer.
params:
- source: if True, the address is the source one. Otherwise, it is the
destination.
returns: the packed & processed address
"""
underlayer = pkt.underlayer
while underlayer is not None and not isinstance(underlayer, Dot15d4Data): # noqa: E501
underlayer = underlayer.underlayer
if type(underlayer) == Dot15d4Data:
addr = underlayer.src_addr if source else underlayer.dest_addr
if underlayer.underlayer.fcf_destaddrmode == 3:
tmp_ip = LINK_LOCAL_PREFIX[0:8] + struct.pack(">Q", addr) # noqa: E501
# Turn off the bit 7.
tmp_ip = tmp_ip[0:8] + struct.pack("B", (orb(tmp_ip[8]) ^ 0x2)) + tmp_ip[9:16] # noqa: E501
elif underlayer.underlayer.fcf_destaddrmode == 2:
tmp_ip = LINK_LOCAL_PREFIX[0:8] + \
b"\x00\x00\x00\xff\xfe\x00" + \
struct.pack(">Q", addr)[6:]
return tmp_ip
else:
# Most of the times, it's necessary the IEEE 802.15.4 data to extract this address # noqa: E501
raise Exception('Unimplemented: IP Header is contained into IEEE 802.15.4 frame, in this case it\'s not available.')
|
This function extracts the source/destination address of a 6LoWPAN
from its upper Dot15d4Data (802.15.4 data) layer.
params:
- source: if True, the address is the source one. Otherwise, it is the
destination.
returns: the packed & processed address
|
def to_statement(self, parameter_values):
"""
With the given values for each parameter, this method will return a policy statement that can be used
directly with IAM.
:param dict parameter_values: Dict containing values for each parameter defined in the template
:return dict: Dictionary containing policy statement
:raises InvalidParameterValues: If parameter values is not a valid dictionary or does not contain values
for all parameters
:raises InsufficientParameterValues: If the parameter values don't have values for all required parameters
"""
missing = self.missing_parameter_values(parameter_values)
if len(missing) > 0:
# str() of elements of list to prevent any `u` prefix from being displayed in user-facing error message
raise InsufficientParameterValues("Following required parameters of template '{}' don't have values: {}"
.format(self.name, [str(m) for m in missing]))
# Select only necessary parameter_values. this is to prevent malicious or accidental
# injection of values for parameters not intended in the template. This is important because "Ref" resolution
# will substitute any references for which a value is provided.
necessary_parameter_values = {name: value for name, value in parameter_values.items()
if name in self.parameters}
# Only "Ref" is supported
supported_intrinsics = {
RefAction.intrinsic_name: RefAction()
}
resolver = IntrinsicsResolver(necessary_parameter_values, supported_intrinsics)
definition_copy = copy.deepcopy(self.definition)
return resolver.resolve_parameter_refs(definition_copy)
|
With the given values for each parameter, this method will return a policy statement that can be used
directly with IAM.
:param dict parameter_values: Dict containing values for each parameter defined in the template
:return dict: Dictionary containing policy statement
:raises InvalidParameterValues: If parameter values is not a valid dictionary or does not contain values
for all parameters
:raises InsufficientParameterValues: If the parameter values don't have values for all required parameters
|
def kde(data, grid, package, **kwargs):
"""
Kernel Density Estimation
Parameters
----------
package : str
Package whose kernel density estimation to use.
Should be one of
`['statsmodels-u', 'statsmodels-m', 'scipy', 'sklearn']`.
data : numpy.array
Data points used to compute a density estimator. It
has `n x p` dimensions, representing n points and p
variables.
grid : numpy.array
Data points at which the desity will be estimated. It
has `m x p` dimensions, representing m points and p
variables.
Returns
-------
out : numpy.array
Density estimate. Has `m x 1` dimensions
"""
if package == 'statsmodels':
package = 'statsmodels-m'
func = KDE_FUNCS[package]
return func(data, grid, **kwargs)
|
Kernel Density Estimation
Parameters
----------
package : str
Package whose kernel density estimation to use.
Should be one of
`['statsmodels-u', 'statsmodels-m', 'scipy', 'sklearn']`.
data : numpy.array
Data points used to compute a density estimator. It
has `n x p` dimensions, representing n points and p
variables.
grid : numpy.array
Data points at which the desity will be estimated. It
has `m x p` dimensions, representing m points and p
variables.
Returns
-------
out : numpy.array
Density estimate. Has `m x 1` dimensions
|
def check_for_cores(self):
"""! @brief Init task: verify that at least one core was discovered."""
if not len(self.cores):
# Allow the user to override the exception to enable uses like chip bringup.
if self.session.options.get('allow_no_cores', False):
logging.error("No cores were discovered!")
else:
raise exceptions.DebugError("No cores were discovered!")
|
! @brief Init task: verify that at least one core was discovered.
|
def _indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
"""
if predicate is None:
predicate = lambda line: line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield prefix + line if predicate(line) else line
return "".join(prefixed_lines())
|
Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
|
def get_sites_in_sphere(self, pt, r, include_index=False, include_image=False):
"""
Find all sites within a sphere from the point. This includes sites
in other periodic images.
Algorithm:
1. place sphere of radius r in crystal and determine minimum supercell
(parallelpiped) which would contain a sphere of radius r. for this
we need the projection of a_1 on a unit vector perpendicular
to a_2 & a_3 (i.e. the unit vector in the direction b_1) to
determine how many a_1"s it will take to contain the sphere.
Nxmax = r * length_of_b_1 / (2 Pi)
2. keep points falling within r.
Args:
pt (3x1 array): cartesian coordinates of center of sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
site_fcoords = np.mod(self.frac_coords, 1)
neighbors = []
for fcoord, dist, i, img in self._lattice.get_points_in_sphere(
site_fcoords, pt, r):
nnsite = PeriodicSite(self[i].species,
fcoord, self._lattice,
properties=self[i].properties)
# Get the neighbor data
nn_data = (nnsite, dist) if not include_index else (nnsite, dist, i)
if include_image:
nn_data += (img,)
neighbors.append(nn_data)
return neighbors
|
Find all sites within a sphere from the point. This includes sites
in other periodic images.
Algorithm:
1. place sphere of radius r in crystal and determine minimum supercell
(parallelpiped) which would contain a sphere of radius r. for this
we need the projection of a_1 on a unit vector perpendicular
to a_2 & a_3 (i.e. the unit vector in the direction b_1) to
determine how many a_1"s it will take to contain the sphere.
Nxmax = r * length_of_b_1 / (2 Pi)
2. keep points falling within r.
Args:
pt (3x1 array): cartesian coordinates of center of sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
|
def get_board_mapping_parent_items(self, team_context, child_backlog_context_category_ref_name, workitem_ids):
"""GetBoardMappingParentItems.
[Preview API] Returns the list of parent field filter model for the given list of workitem ids
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str child_backlog_context_category_ref_name:
:param [int] workitem_ids:
:rtype: [ParentChildWIMap]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if child_backlog_context_category_ref_name is not None:
query_parameters['childBacklogContextCategoryRefName'] = self._serialize.query('child_backlog_context_category_ref_name', child_backlog_context_category_ref_name, 'str')
if workitem_ids is not None:
workitem_ids = ",".join(map(str, workitem_ids))
query_parameters['workitemIds'] = self._serialize.query('workitem_ids', workitem_ids, 'str')
response = self._send(http_method='GET',
location_id='186abea3-5c35-432f-9e28-7a15b4312a0e',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ParentChildWIMap]', self._unwrap_collection(response))
|
GetBoardMappingParentItems.
[Preview API] Returns the list of parent field filter model for the given list of workitem ids
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str child_backlog_context_category_ref_name:
:param [int] workitem_ids:
:rtype: [ParentChildWIMap]
|
def from_labels_and_predictions(labels, predictions, num_classes):
'''Compute a confusion matrix from labels and predictions.
A drop-in replacement for tf.confusion_matrix that works on CPU data
and not tensors.
Params
------
labels : array-like
1-D array of real labels for classification
predicitions: array-like
1-D array of predicted label classes
num_classes: scalar
Total number of classes
Returns
-------
matrix : NxN array
Array of shape [num_classes, num_classes] containing the confusion values.
'''
assert len(labels) == len(predictions)
cm = np.zeros((num_classes, num_classes), dtype=np.int32)
for i in range(len(labels)):
cm[labels[i], predictions[i]] += 1
return cm
|
Compute a confusion matrix from labels and predictions.
A drop-in replacement for tf.confusion_matrix that works on CPU data
and not tensors.
Params
------
labels : array-like
1-D array of real labels for classification
predicitions: array-like
1-D array of predicted label classes
num_classes: scalar
Total number of classes
Returns
-------
matrix : NxN array
Array of shape [num_classes, num_classes] containing the confusion values.
|
def create_with(
cls, event: str = None, observable: T.Union[str, Observable] = None
) -> T.Callable[..., "ObservableProperty"]:
"""Creates a partial application of ObservableProperty with
event and observable preset."""
return functools.partial(cls, event=event, observable=observable)
|
Creates a partial application of ObservableProperty with
event and observable preset.
|
def iter_relation(self):
"""Iterate through all (point, element) pairs in the relation."""
for point in iter_points(self.inputs):
yield (point, self.restrict(point))
|
Iterate through all (point, element) pairs in the relation.
|
def hash_args(*args, **kwargs):
"""Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args])
kwarg_string = '_'.join([str(key) + '=' + str(value)
for key, value in iteritems(kwargs)])
combined = ':'.join([arg_string, kwarg_string])
hasher = md5()
hasher.update(b(combined))
return hasher.hexdigest()
|
Define a unique string for any set of representable args.
|
def alphafilter(request, queryset, template):
"""
Render the template with the filtered queryset
"""
qs_filter = {}
for key in list(request.GET.keys()):
if '__istartswith' in key:
qs_filter[str(key)] = request.GET[key]
break
return render_to_response(
template,
{'objects': queryset.filter(**qs_filter),
'unfiltered_objects': queryset},
context_instance=RequestContext(request)
)
|
Render the template with the filtered queryset
|
def update_domain_base_path_mapping(self, domain_name, lambda_name, stage, base_path):
"""
Update domain base path mapping on API Gateway if it was changed
"""
api_id = self.get_api_id(lambda_name)
if not api_id:
print("Warning! Can't update base path mapping!")
return
base_path_mappings = self.apigateway_client.get_base_path_mappings(domainName=domain_name)
found = False
for base_path_mapping in base_path_mappings.get('items', []):
if base_path_mapping['restApiId'] == api_id and base_path_mapping['stage'] == stage:
found = True
if base_path_mapping['basePath'] != base_path:
self.apigateway_client.update_base_path_mapping(domainName=domain_name,
basePath=base_path_mapping['basePath'],
patchOperations=[
{"op" : "replace",
"path" : "/basePath",
"value" : '' if base_path is None else base_path}
])
if not found:
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath='' if base_path is None else base_path,
restApiId=api_id,
stage=stage
)
|
Update domain base path mapping on API Gateway if it was changed
|
def get_metric(run_id, metric_id):
"""
Get a specific Sacred metric from the database.
Returns a JSON response or HTTP 404 if not found.
Issue: https://github.com/chovanecm/sacredboard/issues/58
"""
data = current_app.config["data"] # type: DataStorage
dao = data.get_metrics_dao()
metric = dao.get(run_id, metric_id)
return Response(render_template(
"api/metric.js",
run_id=metric["run_id"],
metric_id=metric["metric_id"],
name=metric["name"],
steps=metric["steps"],
timestamps=metric["timestamps"],
values=metric["values"]),
mimetype="application/json")
|
Get a specific Sacred metric from the database.
Returns a JSON response or HTTP 404 if not found.
Issue: https://github.com/chovanecm/sacredboard/issues/58
|
def tradingStatusSSE(symbols=None, on_data=None, token='', version=''):
'''The Trading status message is used to indicate the current trading status of a security.
For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.
For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.
IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.
In the spin, IEX will send out a Trading status message with “T” (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.
After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:
Halted
Paused*
Released into an Order Acceptance Period*
Released for trading
*The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.
https://iexcloud.io/docs/api/#deep-trading-status
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version
'''
return _runSSE('trading-status', symbols, on_data, token, version)
|
The Trading status message is used to indicate the current trading status of a security.
For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.
For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.
IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.
In the spin, IEX will send out a Trading status message with “T” (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.
After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:
Halted
Paused*
Released into an Order Acceptance Period*
Released for trading
*The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.
https://iexcloud.io/docs/api/#deep-trading-status
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version
|
def binary_operator(op):
"""
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
"""
# When combining a Factor with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted implementation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
@with_doc("Binary Operator: '%s'" % op)
@with_name(method_name_for_op(op))
@coerce_numbers_to_my_dtype
def binary_operator(self, other):
# This can't be hoisted up a scope because the types returned by
# binop_return_type aren't defined when the top-level function is
# invoked in the class body of Factor.
return_type = binop_return_type(op)
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
# NumericalExpression overrides ops to correctly handle merging of
# inputs. Look up and call the appropriate reflected operator with
# ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
# .dtype access is safe here because coerce_numbers_to_my_dtype
# will convert any input numbers to numpy equivalents.
dtype=binop_return_dtype(op, self.dtype, other.dtype)
)
raise BadBinaryOperator(op, self, other)
return binary_operator
|
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
|
def parse_simpleexprsp(self, tup_tree):
# pylint: disable=unused-argument
"""
This Function not implemented. This response is for export senders
(indication senders) so it is not implemented in the pywbem
client.
"""
raise CIMXMLParseError(
_format("Internal Error: Parsing support for element {0!A} is not "
"implemented", name(tup_tree)),
conn_id=self.conn_id)
|
This Function not implemented. This response is for export senders
(indication senders) so it is not implemented in the pywbem
client.
|
def env(key, default=None, required=False):
"""
Retrieves environment variables and returns Python natives. The (optional)
default will be returned if the environment variable does not exist.
"""
try:
value = os.environ[key]
return ast.literal_eval(value)
except (SyntaxError, ValueError):
return value
except KeyError:
if default or not required:
return default
raise ImproperlyConfigured(
"Missing required environment variable '%s'" % key)
|
Retrieves environment variables and returns Python natives. The (optional)
default will be returned if the environment variable does not exist.
|
def cmd_long(self, args):
'''execute supplied command long'''
if len(args) < 1:
print("Usage: long <command> [arg1] [arg2]...")
return
command = None
if args[0].isdigit():
command = int(args[0])
else:
try:
command = eval("mavutil.mavlink." + args[0])
except AttributeError as e:
try:
command = eval("mavutil.mavlink.MAV_CMD_" + args[0])
except AttributeError as e:
pass
if command is None:
print("Unknown command long ({0})".format(args[0]))
return
floating_args = [ float(x) for x in args[1:] ]
while len(floating_args) < 7:
floating_args.append(float(0))
self.master.mav.command_long_send(self.settings.target_system,
self.settings.target_component,
command,
0,
*floating_args)
|
execute supplied command long
|
def set_unavailable(self):
"""Sets the agent availability to False."""
show = PresenceShow.NONE
self.set_presence(PresenceState(available=False, show=show))
|
Sets the agent availability to False.
|
def get_group_value(self, token, match):
"""Return value of regex match for the specified group"""
try:
value = match.group('{}_{}'.format(token.name, self.group))
except IndexError:
value = ''
return self.func(value) if callable(self.func) else value
|
Return value of regex match for the specified group
|
def send_rsp_recv_cmd(self, target, data, timeout):
"""While operating as *target* send response *data* to the remote
device and return new command data if received within
*timeout* seconds.
"""
return super(Device, self).send_rsp_recv_cmd(target, data, timeout)
|
While operating as *target* send response *data* to the remote
device and return new command data if received within
*timeout* seconds.
|
def get_thumbprint(self):
"""
Calculates the current thumbprint of the item being tracked.
"""
extensions = self.extensions.split(' ')
name_str = ' -or '.join('-name "%s"' % ext for ext in extensions)
cmd = 'find ' + self.base_dir + r' -type f \( ' + name_str + r' \) -exec md5sum {} \; | sort -k 2 | md5sum'
return getoutput(cmd)
|
Calculates the current thumbprint of the item being tracked.
|
def apply_lens(df, lens='pca', dist='euclidean', n_dim=2, **kwargs):
"""
input: N x F dataframe of observations
output: N x n_dim image of input data under lens function
"""
if n_dim != 2:
raise 'error: image of data set must be two-dimensional'
if dist not in ['euclidean', 'correlation']:
raise 'error: only euclidean and correlation distance metrics are supported'
if lens == 'pca' and dist != 'euclidean':
raise 'error: PCA requires the use of euclidean distance metric'
if lens == 'pca':
df_lens = pd.DataFrame(decomposition.PCA(n_components=n_dim, **kwargs).fit_transform(df), df.index)
elif lens == 'mds':
D = metrics.pairwise.pairwise_distances(df, metric=dist)
df_lens = pd.DataFrame(manifold.MDS(n_components=n_dim, **kwargs).fit_transform(D), df.index)
elif lens == 'neighbor':
D = metrics.pairwise.pairwise_distances(df, metric=dist)
df_lens = pd.DataFrame(manifold.SpectralEmbedding(n_components=n_dim, **kwargs).fit_transform(D), df.index)
else:
raise 'error: only PCA, MDS, neighborhood lenses are supported'
return df_lens
|
input: N x F dataframe of observations
output: N x n_dim image of input data under lens function
|
def p_route_version(self, p):
"""route_version : COLON INTEGER
| empty"""
if len(p) > 2:
if p[2] <= 0:
msg = "Version number should be a positive integer."
self.errors.append((msg, p.lineno(2), self.path))
p[0] = p[2]
else:
p[0] = 1
|
route_version : COLON INTEGER
| empty
|
def energy(self, spins, break_aux_symmetry=True):
"""A formula for the exact energy of Theta with spins fixed.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
break_aux_symmetry (bool, optional): Default True. If True, break
the aux variable symmetry by setting all aux variable to 1
for one of the feasible configurations. If the energy ranges
are not symmetric then this can make finding models impossible.
Returns:
Formula for the exact energy of Theta with spins fixed.
"""
subtheta = self.theta.copy()
subtheta.fix_variables(spins)
# we need aux variables
av = next(self._auxvar_counter)
auxvars = {v: Symbol('aux{}_{}'.format(av, v), BOOL) for v in subtheta.linear}
if break_aux_symmetry and av == 0:
# without loss of generality, we can assume that the aux variables are all
# spin-up for one configuration
self.assertions.update(set(auxvars.values()))
trees = self._trees
if not trees:
# if there are no variables to eliminate, then the offset of
# subtheta is the exact value and we can just return it
assert not subtheta.linear and not subtheta.quadratic
return subtheta.offset
energy = Plus(self.message(trees, {}, subtheta, auxvars), subtheta.offset)
return energy
|
A formula for the exact energy of Theta with spins fixed.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
break_aux_symmetry (bool, optional): Default True. If True, break
the aux variable symmetry by setting all aux variable to 1
for one of the feasible configurations. If the energy ranges
are not symmetric then this can make finding models impossible.
Returns:
Formula for the exact energy of Theta with spins fixed.
|
def density(pressure, temperature, mixing, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate density.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.67.
Parameters
----------
temperature: `pint.Quantity`
The temperature
pressure: `pint.Quantity`
Total atmospheric pressure
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding density of the parcel
Notes
-----
.. math:: \rho = \frac{p}{R_dT_v}
"""
virttemp = virtual_temperature(temperature, mixing, molecular_weight_ratio)
return (pressure / (mpconsts.Rd * virttemp)).to(units.kilogram / units.meter ** 3)
|
r"""Calculate density.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.67.
Parameters
----------
temperature: `pint.Quantity`
The temperature
pressure: `pint.Quantity`
Total atmospheric pressure
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding density of the parcel
Notes
-----
.. math:: \rho = \frac{p}{R_dT_v}
|
def file_to_str(fname):
"""
Read a file into a string
PRE: fname is a small file (to avoid hogging memory and its discontents)
"""
data = None
# rU = read with Universal line terminator
with open(fname, 'rU') as fd:
data = fd.read()
return data
|
Read a file into a string
PRE: fname is a small file (to avoid hogging memory and its discontents)
|
def prioritize():
"""
Yield the messages in the queue in the order they should be sent.
"""
while True:
hp_qs = Message.objects.high_priority().using('default')
mp_qs = Message.objects.medium_priority().using('default')
lp_qs = Message.objects.low_priority().using('default')
while hp_qs.count() or mp_qs.count():
while hp_qs.count():
for message in hp_qs.order_by("when_added"):
yield message
while hp_qs.count() == 0 and mp_qs.count():
yield mp_qs.order_by("when_added")[0]
while hp_qs.count() == 0 and mp_qs.count() == 0 and lp_qs.count():
yield lp_qs.order_by("when_added")[0]
if Message.objects.non_deferred().using('default').count() == 0:
break
|
Yield the messages in the queue in the order they should be sent.
|
def get(self, timeout=None):
"""Retrieve results from all the output tubes."""
valid = False
result = None
for tube in self._output_tubes:
if timeout:
valid, result = tube.get(timeout)
if valid:
result = result[0]
else:
result = tube.get()[0]
if timeout:
return valid, result
return result
|
Retrieve results from all the output tubes.
|
def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: List Load Activation Profiles (requires classic mode)."""
cpc_oid = uri_parms[0]
query_str = uri_parms[1]
try:
cpc = hmc.cpcs.lookup_by_oid(cpc_oid)
except KeyError:
raise InvalidResourceError(method, uri)
assert not cpc.dpm_enabled # TODO: Verify error or empty result?
result_profiles = []
filter_args = parse_query_parms(method, uri, query_str)
for profile in cpc.load_activation_profiles.list(filter_args):
result_profile = {}
for prop in profile.properties:
if prop in ('element-uri', 'name'):
result_profile[prop] = profile.properties[prop]
result_profiles.append(result_profile)
return {'load-activation-profiles': result_profiles}
|
Operation: List Load Activation Profiles (requires classic mode).
|
def is_lop(ch,block_op_pairs_dict=get_block_op_pairs('{}[]()')):
'''
# is_lop('{',block_op_pairs_dict)
# is_lop('[',block_op_pairs_dict)
# is_lop('}',block_op_pairs_dict)
# is_lop(']',block_op_pairs_dict)
# is_lop('a',block_op_pairs_dict)
'''
for i in range(1,block_op_pairs_dict.__len__()+1):
if(ch == block_op_pairs_dict[i][0]):
return(True)
else:
pass
return(False)
|
# is_lop('{',block_op_pairs_dict)
# is_lop('[',block_op_pairs_dict)
# is_lop('}',block_op_pairs_dict)
# is_lop(']',block_op_pairs_dict)
# is_lop('a',block_op_pairs_dict)
|
def query(*args, **kwargs):
'''
Query the node for specific information.
Parameters:
* **scope**: Specify scope of the query.
* **System**: Return system data.
* **Software**: Return software information.
* **Services**: Return known services.
* **Identity**: Return user accounts information for this system.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts.
* **payload**: Payload scope parameters:
filter
Include only results which path starts from the filter string.
time
Display time in Unix ticks or format according to the configured TZ (default)
Values: ticks, tz (default)
size
Format size. Values: B, KB, MB, GB
type
Include payload type.
Values (comma-separated): directory (or dir), link, file (default)
Example (returns everything): type=directory,link,file
owners
Resolve UID/GID to an actual names or leave them numeric (default).
Values: name (default), id
brief
Return just a list of payload elements, if True. Default: False.
* **all**: Return all information (default).
CLI Example:
.. code-block:: bash
salt '*' inspector.query scope=system
salt '*' inspector.query scope=payload type=file,link filter=/etc size=Kb brief=False
'''
query = _("query")
try:
return query.Query(kwargs.get('scope'), cachedir=__opts__['cachedir'])(*args, **kwargs)
except InspectorQueryException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
|
Query the node for specific information.
Parameters:
* **scope**: Specify scope of the query.
* **System**: Return system data.
* **Software**: Return software information.
* **Services**: Return known services.
* **Identity**: Return user accounts information for this system.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts.
* **payload**: Payload scope parameters:
filter
Include only results which path starts from the filter string.
time
Display time in Unix ticks or format according to the configured TZ (default)
Values: ticks, tz (default)
size
Format size. Values: B, KB, MB, GB
type
Include payload type.
Values (comma-separated): directory (or dir), link, file (default)
Example (returns everything): type=directory,link,file
owners
Resolve UID/GID to an actual names or leave them numeric (default).
Values: name (default), id
brief
Return just a list of payload elements, if True. Default: False.
* **all**: Return all information (default).
CLI Example:
.. code-block:: bash
salt '*' inspector.query scope=system
salt '*' inspector.query scope=payload type=file,link filter=/etc size=Kb brief=False
|
def get_lecture_filename(combined_section_lectures_nums,
section_dir,
secnum,
lecnum,
lecname,
title,
fmt):
"""
Prepare a destination lecture filename.
@param combined_section_lectures_nums: Flag that indicates whether
section lectures should have combined numbering.
@type combined_section_lectures_nums: bool
@param section_dir: Path to current section directory.
@type section_dir: str
@param secnum: Section number.
@type secnum: int
@param lecnum: Lecture number.
@type lecnum: int
@param lecname: Lecture name.
@type lecname: str
@param title: Resource title.
@type title: str
@param fmt: Format of the resource (pdf, csv, etc)
@type fmt: str
@return: Lecture file name.
@rtype: str
"""
# FIXME: this is a quick and dirty solution to Filename too long
# problem. We need to think of a more general way to solve this
# issue.
fmt = fmt[:FORMAT_MAX_LENGTH]
title = title[:TITLE_MAX_LENGTH]
# Format lecture file name
if combined_section_lectures_nums:
lecture_filename = os.path.join(
section_dir,
format_combine_number_resource(
secnum + 1, lecnum + 1, lecname, title, fmt))
else:
lecture_filename = os.path.join(
section_dir, format_resource(lecnum + 1, lecname, title, fmt))
return lecture_filename
|
Prepare a destination lecture filename.
@param combined_section_lectures_nums: Flag that indicates whether
section lectures should have combined numbering.
@type combined_section_lectures_nums: bool
@param section_dir: Path to current section directory.
@type section_dir: str
@param secnum: Section number.
@type secnum: int
@param lecnum: Lecture number.
@type lecnum: int
@param lecname: Lecture name.
@type lecname: str
@param title: Resource title.
@type title: str
@param fmt: Format of the resource (pdf, csv, etc)
@type fmt: str
@return: Lecture file name.
@rtype: str
|
def _get_block_publisher(self, state_hash):
"""Returns the block publisher based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
state_hash (str): The current state root hash for reading settings.
Raises:
InvalidGenesisStateError: if any errors occur getting the
BlockPublisher.
"""
state_view = self._state_view_factory.create_view(state_hash)
try:
class BatchPublisher:
def send(self, transactions):
# Consensus implementations are expected to have handling
# in place for genesis operation. This should includes
# adding any authorization and registrations required
# for the genesis node to the Genesis Batch list and
# detecting validation of the Genesis Block and handle it
# correctly. Batch publication is not allowed during
# genesis operation since there is no network to validate
# the batch yet.
raise InvalidGenesisConsensusError(
'Consensus cannot send transactions during genesis.')
consensus = ConsensusFactory.get_configured_consensus_module(
NULL_BLOCK_IDENTIFIER,
state_view)
return consensus.BlockPublisher(
BlockCache(self._block_store),
state_view_factory=self._state_view_factory,
batch_publisher=BatchPublisher(),
data_dir=self._data_dir,
config_dir=self._config_dir,
validator_id=self._identity_signer.get_public_key().as_hex())
except UnknownConsensusModuleError as e:
raise InvalidGenesisStateError(e)
|
Returns the block publisher based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
state_hash (str): The current state root hash for reading settings.
Raises:
InvalidGenesisStateError: if any errors occur getting the
BlockPublisher.
|
def output_args(f):
"""decorator for output-formatting args
applied to %pxresult and %%px
"""
args = [
magic_arguments.argument('-r', action="store_const", dest='groupby',
const='order',
help="collate outputs in order (same as group-outputs=order)"
),
magic_arguments.argument('-e', action="store_const", dest='groupby',
const='engine',
help="group outputs by engine (same as group-outputs=engine)"
),
magic_arguments.argument('--group-outputs', dest='groupby', type=str,
choices=['engine', 'order', 'type'], default='type',
help="""Group the outputs in a particular way.
Choices are:
type: group outputs of all engines by type (stdout, stderr, displaypub, etc.).
engine: display all output for each engine together.
order: like type, but individual displaypub output from each engine is collated.
For example, if multiple plots are generated by each engine, the first
figure of each engine will be displayed, then the second of each, etc.
"""
),
magic_arguments.argument('-o', '--out', dest='save_name', type=str,
help="""store the AsyncResult object for this computation
in the global namespace under this name.
"""
),
]
for a in args:
f = a(f)
return f
|
decorator for output-formatting args
applied to %pxresult and %%px
|
def _Connect(self):
"""Connects to an Elasticsearch server."""
elastic_host = {'host': self._host, 'port': self._port}
if self._url_prefix:
elastic_host['url_prefix'] = self._url_prefix
elastic_http_auth = None
if self._username is not None:
elastic_http_auth = (self._username, self._password)
self._client = elasticsearch.Elasticsearch(
[elastic_host],
http_auth=elastic_http_auth,
use_ssl=self._use_ssl,
ca_certs=self._ca_certs
)
logger.debug(
('Connected to Elasticsearch server: {0:s} port: {1:d}'
'URL prefix {2!s}.').format(self._host, self._port, self._url_prefix))
|
Connects to an Elasticsearch server.
|
def draw_special_char_key(self, surface, key):
"""Default drawing method for special char key. Drawn as character key.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
"""
key.value = u'#'
if key.is_activated():
key.value = u'Ab'
self.draw_character_key(surface, key, True)
|
Default drawing method for special char key. Drawn as character key.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
|
def rows(self):
"""Iterate over all of the rows"""
for s_name, s in self.sections.items():
# Yield the section header
if s.name != 'Root':
yield [''] # Unecessary, but makes for nice formatting. Should actually be done just before write
yield ['Section', s.value] + s.property_names
# Yield all of the rows for terms in the section
for row in s.rows:
term, value = row
term = term.replace('root.', '').title()
try:
yield [term] + value
except:
yield [term] + [value]
|
Iterate over all of the rows
|
def results(self, Pc):
r"""
This method determines which pores and throats are filled with invading
phase at the specified capillary pressure, and creates several arrays
indicating the occupancy status of each pore and throat for the given
pressure.
Parameters
----------
Pc : scalar
The capillary pressure for which an invading phase configuration
is desired.
Returns
-------
A dictionary containing an assortment of data about distribution
of the invading phase at the specified capillary pressure. The data
include:
**'pore.occupancy'** : A value between 0 and 1 indicating the
fractional volume of each pore that is invaded. If no late pore
filling model was applied, then this will only be integer values
(either filled or not).
**'throat.occupancy'** : The same as 'pore.occupancy' but for throats.
This dictionary can be passed directly to the ``update`` method of
the *Phase* object. These values can then be accessed by models
or algorithms.
"""
Psatn = self['pore.invasion_pressure'] <= Pc
Tsatn = self['throat.invasion_pressure'] <= Pc
inv_phase = {}
inv_phase['pore.occupancy'] = sp.array(Psatn, dtype=float)
inv_phase['throat.occupancy'] = sp.array(Tsatn, dtype=float)
return inv_phase
|
r"""
This method determines which pores and throats are filled with invading
phase at the specified capillary pressure, and creates several arrays
indicating the occupancy status of each pore and throat for the given
pressure.
Parameters
----------
Pc : scalar
The capillary pressure for which an invading phase configuration
is desired.
Returns
-------
A dictionary containing an assortment of data about distribution
of the invading phase at the specified capillary pressure. The data
include:
**'pore.occupancy'** : A value between 0 and 1 indicating the
fractional volume of each pore that is invaded. If no late pore
filling model was applied, then this will only be integer values
(either filled or not).
**'throat.occupancy'** : The same as 'pore.occupancy' but for throats.
This dictionary can be passed directly to the ``update`` method of
the *Phase* object. These values can then be accessed by models
or algorithms.
|
def convert_directory_2_to_3(meas_fname="magic_measurements.txt", input_dir=".",
output_dir=".", meas_only=False, data_model=None):
"""
Convert 2.0 measurements file into 3.0 measurements file.
Merge and convert specimen, sample, site, and location data.
Also translates criteria data.
Parameters
----------
meas_name : name of measurement file (do not include full path,
default is "magic_measurements.txt")
input_dir : name of input directory (default is ".")
output_dir : name of output directory (default is ".")
meas_only : boolean, convert only measurement data (default is False)
data_model : data_model3.DataModel object (default is None)
Returns
---------
NewMeas : 3.0 measurements data (output of pmag.convert_items)
upgraded : list of files successfully upgraded to 3.0
no_upgrade: list of 2.5 files not upgraded to 3.0
"""
convert = {'specimens': map_magic.spec_magic2_2_magic3_map,
'samples': map_magic.samp_magic2_2_magic3_map,
'sites': map_magic.site_magic2_2_magic3_map,
'locations': map_magic.loc_magic2_2_magic3_map,
'ages': map_magic.age_magic2_2_magic3_map}
full_name = os.path.join(input_dir, meas_fname)
if not os.path.exists(full_name):
print("-W- {} is not a file".format(full_name))
return False, False, False
# read in data model 2.5 measurements file
data2, filetype = magic_read(full_name)
# convert list of dicts to 3.0
NewMeas = convert_items(data2, map_magic.meas_magic2_2_magic3_map)
# write 3.0 output to file
ofile = os.path.join(output_dir, 'measurements.txt')
magic_write(ofile, NewMeas, 'measurements')
upgraded = []
if os.path.exists(ofile):
print("-I- 3.0 format measurements file was successfully created: {}".format(ofile))
upgraded.append("measurements.txt")
else:
print("-W- 3.0 format measurements file could not be created")
#
no_upgrade = []
if not meas_only:
# try to convert specimens, samples, sites, & locations
for dtype in ['specimens', 'samples', 'sites', 'locations', 'ages']:
mapping = convert[dtype]
res = convert_and_combine_2_to_3(
dtype, mapping, input_dir, output_dir, data_model)
if res:
upgraded.append(res)
# try to upgrade criteria file
if os.path.exists(os.path.join(input_dir, 'pmag_criteria.txt')):
crit_file = convert_criteria_file_2_to_3(input_dir=input_dir,
output_dir=output_dir,
data_model=data_model)[0]
if crit_file:
upgraded.append(crit_file)
else:
no_upgrade.append("pmag_criteria.txt")
# create list of all un-upgradeable files
for fname in os.listdir(input_dir):
if fname in ['measurements.txt', 'specimens.txt', 'samples.txt',
'sites.txt', 'locations.txt']:
continue
elif 'rmag' in fname:
no_upgrade.append(fname)
elif fname in ['pmag_results.txt', 'er_synthetics.txt', 'er_images.txt',
'er_plots.txt']:
no_upgrade.append(fname)
return NewMeas, upgraded, no_upgrade
|
Convert 2.0 measurements file into 3.0 measurements file.
Merge and convert specimen, sample, site, and location data.
Also translates criteria data.
Parameters
----------
meas_name : name of measurement file (do not include full path,
default is "magic_measurements.txt")
input_dir : name of input directory (default is ".")
output_dir : name of output directory (default is ".")
meas_only : boolean, convert only measurement data (default is False)
data_model : data_model3.DataModel object (default is None)
Returns
---------
NewMeas : 3.0 measurements data (output of pmag.convert_items)
upgraded : list of files successfully upgraded to 3.0
no_upgrade: list of 2.5 files not upgraded to 3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.