code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def startup_config_file(self):
"""
Returns the startup-config file for this IOU VM.
:returns: path to config file. None if the file doesn't exist
"""
path = os.path.join(self.working_dir, 'startup-config.cfg')
if os.path.exists(path):
return path
else:
return None
|
Returns the startup-config file for this IOU VM.
:returns: path to config file. None if the file doesn't exist
|
def parse_clusterflow_runfiles(self, f):
""" Parse run files generated by Cluster Flow.
Currently gets pipeline IDs and associated steps."""
data = dict()
in_comment = False
seen_pipeline = False
cf_file = False
for l in f['f']:
l = l.rstrip()
# Check that this is from Cluster Flow
if 'Cluster Flow' in l:
cf_file = True
# Header
if l.startswith('Pipeline: '):
data['pipeline_name'] = l[10:]
if l.startswith('Pipeline ID: '):
data['pipeline_id'] = l[13:]
if l.startswith('Created at '):
data['pipeline_start'] = l[11:]
# Config settings
if l.startswith('@'):
s = l.split(None, 1)
key = s[0].replace('@', '').strip()
try:
data[key] = "\t".join(s[1:])
except IndexError:
data[key] = True
# Comments
if l.startswith('/*'):
in_comment = True
if l.startswith('*/'):
in_comment = False
if in_comment:
if 'comment' not in data:
data['comment'] = ''
data['comment'] += l+"\n"
# Pipeline steps
if l.strip().startswith('#'):
if 'pipeline_steps' not in data:
data['pipeline_steps'] = []
data['pipeline_steps'].append(l)
seen_pipeline = True
# Step output files
elif seen_pipeline:
s = l.split("\t")
if len(s) > 1:
if 'files' not in data:
data['files'] = OrderedDict()
if s[0] not in data['files']:
data['files'][s[0]] = []
data['files'][s[0]].append(s[1:])
# Parse the start date
dt = None
if 'pipeline_id' in data:
s = data['pipeline_id'].split('_')
dt = datetime.datetime.fromtimestamp(int(s[-1]))
elif 'pipeline_start' in data:
dt_r = re.match(r'(\d{2}):(\d{2}), (\d{2})-(\d{2})-(\d{4})', data['pipeline_start'])
if dt_r:
dt = datetime.datetime(
int(dt_r.group(5)), # year
int(dt_r.group(4)), # month
int(dt_r.group(3)), # day
int(dt_r.group(1)), # hour
int(dt_r.group(2)) # minute
)
# Not a Cluster Flow file (eg. Nextflow .run file)
if not cf_file:
return None
if dt is not None:
data['pipeline_start_dateparts'] = {
'year': dt.year,
'month': dt.month,
'day': dt.day,
'hour': dt.hour,
'minute': dt.minute,
'second': dt.second,
'microsecond': dt.microsecond,
'timestamp': time.mktime(dt.timetuple())
}
# Cluster Flow v0.4 and before did not print the pipeline ID in run files
# Try to guess - will be wrong as no microsecond info, but hopefully unique
# and reproducible for other run files
if 'pipeline_id' not in data:
if 'pipeline_name' in data and 'pipeline_start_dateparts' in data:
log.debug('Trying to guess pipeline ID for file "{}"'.format(f['fn']))
data['pipeline_id'] = 'cf_{}_{}'.format(data['pipeline_name'], data['pipeline_start_dateparts']['timestamp'])
return data
|
Parse run files generated by Cluster Flow.
Currently gets pipeline IDs and associated steps.
|
def getPrice(self, searches):
""" Prices all quest items and returns result
Searches the shop wizard x times (x being number given in searches) for each
quest item and finds the lowest price for each item. Combines all item prices
and sets KitchenQuest.npSpent to the final value. Returns whether or not this
process was successful.
Parameters:
searches (int) -- The number of times to search the Shop Wizard for each quest item
Returns
bool - True if successful, otherwise False
"""
totalPrice = 0
for item in self.items:
res = ShopWizard.priceItem(self.usr, item.name, searches, ShopWizard.RETLOW)
if not res:
self.failedItem = item.name
return False
item.price, item.owner, item.id = res
totalPrice += item.price
self.npSpent = totalPrice
return True
|
Prices all quest items and returns result
Searches the shop wizard x times (x being number given in searches) for each
quest item and finds the lowest price for each item. Combines all item prices
and sets KitchenQuest.npSpent to the final value. Returns whether or not this
process was successful.
Parameters:
searches (int) -- The number of times to search the Shop Wizard for each quest item
Returns
bool - True if successful, otherwise False
|
async def get_novel(self, term, hide_nsfw=False):
"""
If term is an ID will return that specific ID. If it's a string, it will return the details of the first search result for that term.
Returned Dictionary Has the following structure:
Please note, if it says list or dict, it means the python types.
Indentation indicates level. So English is ['Titles']['English']
'Titles' - Contains all the titles found for the anime
'English' - English title of the novel
'Alt' - Alternative title (Usually the Japanese one, but other languages exist)
'Aliases' - A list of str that define the aliases as given in VNDB.
'Img' - Link to the Image shown on VNDB for that Visual Novel
'Length' - Length given by VNDB
'Developers' - A list containing the Developers of the VN.
'Publishers' - A list containing the Publishers of the VN.
'Tags' - Contains 3 lists of different tag categories
'Content' - List of tags that have to do with the story's content as defined by VNDB. Ex: Edo Era
'Technology' - List of tags that have to do with the VN's technology. Ex: Protagonist with a Face (Wew Lad, 21st century)
'Erotic' - List of tags that have to do with the VN's sexual content. Ex: Tentacles
'Releases' - A list of dictionaries. They have the following format.
'Date' - Date VNDB lists for release
'Ages' - Age group appropriate for as determined on VNDB
'Platform' - Release Platform
'Name' - The name for this particular Release
'ID' - The id for this release, also doubles as the link if you append https://vndb.org/ to it
'Description' - Contains novel description text if there is any.
'ID' - The id for this novel, also doubles as the link if you append https://vndb.org/ to it
:param term: id or name to get details of.
:param hide_nsfw: bool if 'Img' should filter links flagged as NSFW or not. (no reason to be kwargs...yet)
:return dict: Dictionary with the parsed results of a novel
"""
if not term.isdigit() and not term.startswith('v'):
try:
vnid = await self.search_vndb('v', term)
vnid = vnid[0]['id']
except VNDBOneResult as e:
vnid = e.vnid
else:
vnid = str(term)
if not vnid.startswith('v'):
vnid = 'v' + vnid
async with self.session.get(self.base_url + "/{}".format(vnid), headers=self.headers) as response:
if response.status == 404:
raise aiohttp.HttpBadRequest("VNDB reported that there is no data for ID {}".format(vnid))
text = await response.text()
soup = BeautifulSoup(text, 'lxml')
data = {'titles': {'english': [], 'alt': [], 'aliases': []}, 'img': None, 'length': None, 'developers': [], 'publishers': [], 'tags': {}, 'releases': {}, 'id': vnid}
data['titles']['english'] = soup.find_all('div', class_='mainbox')[0].h1.string
try:
data['titles']['alt'] = soup.find_all('h2', class_='alttitle')[0].string
except IndexError:
data['titles']['alt'] = None
try:
imgdiv = soup.find_all('div', class_='vnimg')[0]
if not (hide_nsfw and 'class' in imgdiv.p.attrs):
data['img'] = 'https:' + imgdiv.img.get('src')
except AttributeError:
pass
for item in soup.find_all('tr'):
if 'class' in item.attrs or len(list(item.children)) == 1:
continue
if item.td.string == 'Aliases':
tlist = []
for alias in list(item.children)[1:]:
tlist.append(alias.string)
data['titles']['aliases'] = tlist
elif item.td.string == 'Length':
data['length'] = list(item.children)[1].string
elif item.td.string == 'Developer':
tl = []
for item in list(list(item.children)[1].children):
if isinstance(item, NavigableString):
continue
if 'href' in item.attrs:
tl.append(item.string)
data['developers'] = tl
del tl
elif item.td.string == 'Publishers':
tl = []
for item in list(list(item.children)[1].children):
if isinstance(item, NavigableString):
continue
if 'href' in item.attrs:
tl.append(item.string)
data['publishers'] = tl
conttags = []
techtags = []
erotags = []
test = soup.find('div', attrs={'id': 'vntags'})
if test:
for item in list(test.children):
if isinstance(item, NavigableString):
continue
if 'class' not in item.attrs:
continue
if 'cont' in " ".join(item.get('class')):
conttags.append(item.a.string)
if 'tech' in " ".join(item.get('class')):
techtags.append(item.a.string)
if 'ero' in " ".join(item.get('class')):
erotags.append(item.a.string)
data['tags']['content'] = conttags if len(conttags) else None
data['tags']['technology'] = techtags if len(techtags) else None
data['tags']['erotic'] = erotags if len(erotags) else None
del conttags
del techtags
del erotags
releases = []
cur_lang = None
for item in list(soup.find('div', class_='mainbox releases').table.children):
if isinstance(item, NavigableString):
continue
if 'class' in item.attrs:
if cur_lang is None:
cur_lang = item.td.abbr.get('title')
else:
data['releases'][cur_lang] = releases
releases = []
cur_lang = item.td.abbr.get('title')
else:
temp_rel = {'date': 0, 'ages': 0, 'platform': 0, 'name': 0, 'id': 0}
children = list(item.children)
temp_rel['date'] = children[0].string
temp_rel['ages'] = children[1].string
temp_rel['platform'] = children[2].abbr.get('title')
temp_rel['name'] = children[3].a.string
temp_rel['id'] = children[3].a.get('href')[1:]
del children
releases.append(temp_rel)
del temp_rel
if len(releases) > 0 and cur_lang is not None:
data['releases'][cur_lang] = releases
del releases
del cur_lang
desc = ""
for item in list(soup.find_all('td', class_='vndesc')[0].children)[1].contents:
if not isinstance(item, NavigableString):
continue
if item.startswith('['):
continue
if item.endswith(']'):
continue
desc += item.string + "\n"
data['description'] = desc
return data
|
If term is an ID will return that specific ID. If it's a string, it will return the details of the first search result for that term.
Returned Dictionary Has the following structure:
Please note, if it says list or dict, it means the python types.
Indentation indicates level. So English is ['Titles']['English']
'Titles' - Contains all the titles found for the anime
'English' - English title of the novel
'Alt' - Alternative title (Usually the Japanese one, but other languages exist)
'Aliases' - A list of str that define the aliases as given in VNDB.
'Img' - Link to the Image shown on VNDB for that Visual Novel
'Length' - Length given by VNDB
'Developers' - A list containing the Developers of the VN.
'Publishers' - A list containing the Publishers of the VN.
'Tags' - Contains 3 lists of different tag categories
'Content' - List of tags that have to do with the story's content as defined by VNDB. Ex: Edo Era
'Technology' - List of tags that have to do with the VN's technology. Ex: Protagonist with a Face (Wew Lad, 21st century)
'Erotic' - List of tags that have to do with the VN's sexual content. Ex: Tentacles
'Releases' - A list of dictionaries. They have the following format.
'Date' - Date VNDB lists for release
'Ages' - Age group appropriate for as determined on VNDB
'Platform' - Release Platform
'Name' - The name for this particular Release
'ID' - The id for this release, also doubles as the link if you append https://vndb.org/ to it
'Description' - Contains novel description text if there is any.
'ID' - The id for this novel, also doubles as the link if you append https://vndb.org/ to it
:param term: id or name to get details of.
:param hide_nsfw: bool if 'Img' should filter links flagged as NSFW or not. (no reason to be kwargs...yet)
:return dict: Dictionary with the parsed results of a novel
|
def id(self):
""" Unique identifier of user object"""
return sa.Column(sa.Integer, primary_key=True, autoincrement=True)
|
Unique identifier of user object
|
def load_weight(weight_file: str,
weight_name: str,
weight_file_cache: Dict[str, Dict]) -> mx.nd.NDArray:
"""
Load wight fron a file or the cache if it was loaded before.
:param weight_file: Weight file.
:param weight_name: Weight name.
:param weight_file_cache: Cache of loaded files.
:return: Loaded weight.
"""
logger.info('Loading input weight file: %s', weight_file)
if weight_file.endswith(".npy"):
return np.load(weight_file)
elif weight_file.endswith(".npz"):
if weight_file not in weight_file_cache:
weight_file_cache[weight_file] = np.load(weight_file)
return weight_file_cache[weight_file][weight_name]
else:
if weight_file not in weight_file_cache:
weight_file_cache[weight_file] = mx.nd.load(weight_file)
return weight_file_cache[weight_file]['arg:%s' % weight_name].asnumpy()
|
Load wight fron a file or the cache if it was loaded before.
:param weight_file: Weight file.
:param weight_name: Weight name.
:param weight_file_cache: Cache of loaded files.
:return: Loaded weight.
|
def base_url(self):
"""Base URL for resolving resource URLs"""
if self.doc.package_url:
return self.doc.package_url
return self.doc._ref
|
Base URL for resolving resource URLs
|
def tag_clause_annotations(self):
"""Tag clause annotations in ``words`` layer.
Depends on morphological analysis.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if self.__clause_segmenter is None:
self.__clause_segmenter = load_default_clausesegmenter()
return self.__clause_segmenter.tag(self)
|
Tag clause annotations in ``words`` layer.
Depends on morphological analysis.
|
def set_differentiable_objective(self):
"""Function that constructs minimization objective from dual variables."""
# Checking if graphs are already created
if self.vector_g is not None:
return
# Computing the scalar term
bias_sum = 0
for i in range(0, self.nn_params.num_hidden_layers):
bias_sum = bias_sum + tf.reduce_sum(
tf.multiply(self.nn_params.biases[i], self.lambda_pos[i + 1]))
lu_sum = 0
for i in range(0, self.nn_params.num_hidden_layers + 1):
lu_sum = lu_sum + tf.reduce_sum(
tf.multiply(tf.multiply(self.lower[i], self.upper[i]),
self.lambda_lu[i]))
self.scalar_f = -bias_sum - lu_sum + self.final_constant
# Computing the vector term
g_rows = []
for i in range(0, self.nn_params.num_hidden_layers):
if i > 0:
current_row = (self.lambda_neg[i] + self.lambda_pos[i] -
self.nn_params.forward_pass(self.lambda_pos[i+1],
i, is_transpose=True) +
tf.multiply(self.lower[i]+self.upper[i],
self.lambda_lu[i]) +
tf.multiply(self.lambda_quad[i],
self.nn_params.biases[i-1]))
else:
current_row = (-self.nn_params.forward_pass(self.lambda_pos[i+1],
i, is_transpose=True)
+ tf.multiply(self.lower[i]+self.upper[i],
self.lambda_lu[i]))
g_rows.append(current_row)
# Term for final linear term
g_rows.append((self.lambda_pos[self.nn_params.num_hidden_layers] +
self.lambda_neg[self.nn_params.num_hidden_layers] +
self.final_linear +
tf.multiply((self.lower[self.nn_params.num_hidden_layers]+
self.upper[self.nn_params.num_hidden_layers]),
self.lambda_lu[self.nn_params.num_hidden_layers])
+ tf.multiply(
self.lambda_quad[self.nn_params.num_hidden_layers],
self.nn_params.biases[
self.nn_params.num_hidden_layers-1])))
self.vector_g = tf.concat(g_rows, axis=0)
self.unconstrained_objective = self.scalar_f + 0.5 * self.nu
|
Function that constructs minimization objective from dual variables.
|
def draw_line(self, img, pixmapper, pt1, pt2, colour, linewidth):
'''draw a line on the image'''
pix1 = pixmapper(pt1)
pix2 = pixmapper(pt2)
(width, height) = image_shape(img)
(ret, pix1, pix2) = cv2.clipLine((0, 0, width, height), pix1, pix2)
if ret is False:
return
cv2.line(img, pix1, pix2, colour, linewidth)
cv2.circle(img, pix2, linewidth*2, colour)
|
draw a line on the image
|
def DeserializeExclusiveData(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
Raises:
Exception: If the transaction type is incorrect or if there are no claims.
"""
self.Type = TransactionType.StateTransaction
self.Descriptors = reader.ReadSerializableArray('neo.Core.State.StateDescriptor.StateDescriptor')
|
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
Raises:
Exception: If the transaction type is incorrect or if there are no claims.
|
def challenge(self,
shutit,
task_desc,
expect=None,
hints=None,
congratulations='OK',
failed='FAILED',
expect_type='exact',
challenge_type='command',
timeout=None,
check_exit=None,
fail_on_empty_before=True,
record_command=True,
exit_values=None,
echo=True,
escape=False,
pause=1,
loglevel=logging.DEBUG,
follow_on_context=None,
difficulty=1.0,
reduction_per_minute=0.2,
reduction_per_reset=0,
reduction_per_hint=0.5,
grace_period=30,
new_stage=True,
final_stage=False,
num_stages=None):
"""Set the user a task to complete, success being determined by matching the output.
Either pass in regexp(s) desired from the output as a string or a list, or an md5sum of the output wanted.
@param follow_on_context On success, move to this context. A dict of information about that context.
context = the type of context, eg docker, bash
ok_container_name = if passed, send user to this container
reset_container_name = if resetting, send user to this container
@param challenge_type Behaviour of challenge made to user
command = check for output of single command
golf = user gets a pause point, and when leaving, command follow_on_context['check_command'] is run to check the output
"""
shutit = self.shutit
if new_stage and shutit.build['exam_object']:
if num_stages is None:
num_stages = shutit.build['exam_object'].num_stages
elif shutit.build['exam_object'].num_stages < 1:
shutit.build['exam_object'].num_stages = num_stages
elif shutit.build['exam_object'].num_stages > 0:
shutit.fail('Error! num_stages passed in should be None if already set in exam object (ie > 0)') # pragma: no cover
curr_stage = str(shutit.build['exam_object'].curr_stage)
if num_stages > 0:
task_desc = 80*'*' + '\n' + '* QUESTION ' + str(curr_stage) + '/' + str(num_stages) + '\n' + 80*'*' + '\n' + task_desc
else:
task_desc = 80*'*' + '\n' + '* QUESTION \n' + 80*'*' + '\n' + task_desc
shutit.build['exam_object'].new_stage(difficulty=difficulty,
reduction_per_minute=reduction_per_minute,
reduction_per_reset=reduction_per_reset,
reduction_per_hint=reduction_per_hint,
grace_period=grace_period)
# If this is an exam, then remove history.
self.send(ShutItSendSpec(self,
send=' history -c',
check_exit=False,
ignore_background=True))
# don't catch CTRL-C, pass it through.
shutit.build['ctrlc_passthrough'] = True
preserve_newline = False
skipped = False
if expect_type == 'regexp':
if isinstance(expect, str):
expect = [expect]
if not isinstance(expect, list):
shutit.fail('expect_regexps should be list') # pragma: no cover
elif expect_type == 'md5sum':
preserve_newline = True
elif expect_type == 'exact':
pass
else:
shutit.fail('Must pass either expect_regexps or md5sum in') # pragma: no cover
if hints:
shutit.build['pause_point_hints'] = hints
else:
shutit.build['pause_point_hints'] = []
if challenge_type == 'command':
help_text = shutit_util.colorise('32','''\nType 'help' or 'h' to get a hint, 'exit' to skip, 'shutitreset' to reset state.''')
ok = False
while not ok:
shutit.log(shutit_util.colorise('32','''\nChallenge!'''),transient=True, level=logging.INFO)
if hints:
shutit.log(shutit_util.colorise('32',help_text),transient=True, level=logging.INFO)
time.sleep(pause)
# TODO: bash path completion
send = shutit_util.get_input(task_desc + ' => ',color='31')
if not send or send.strip() == '':
continue
if send in ('help','h'):
if hints:
shutit.log(help_text,transient=True, level=logging.CRITICAL)
shutit.log(shutit_util.colorise('32',hints.pop()),transient=True, level=logging.CRITICAL)
else:
shutit.log(help_text,transient=True, level=logging.CRITICAL)
shutit.log(shutit_util.colorise('32','No hints left, sorry! CTRL-g to reset state, CTRL-s to skip this step, CTRL-] to submit for checking'),transient=True, level=logging.CRITICAL)
time.sleep(pause)
continue
if send == 'shutitreset':
self._challenge_done(shutit, result='reset',follow_on_context=follow_on_context,final_stage=False)
continue
if send == 'shutitquit':
self._challenge_done(shutit, result='reset',follow_on_context=follow_on_context,final_stage=True)
shutit_global.shutit_global_object.handle_exit(exit_code=1)
if send == 'exit':
self._challenge_done(shutit, result='exited',follow_on_context=follow_on_context,final_stage=True)
shutit.build['pause_point_hints'] = []
return True
output = self.send_and_get_output(send,
timeout=timeout,
retry=1,
record_command=record_command,
echo=echo,
loglevel=loglevel,
fail_on_empty_before=False,
preserve_newline=preserve_newline)
md5sum_output = md5(output).hexdigest()
shutit.log('output: ' + output + ' is md5sum: ' + md5sum_output, level=logging.DEBUG)
if expect_type == 'md5sum':
output = md5sum_output
if output == expect:
ok = True
elif expect_type == 'exact':
if output == expect:
ok = True
elif expect_type == 'regexp':
for regexp in expect:
if shutit.match_string(output, regexp):
ok = True
break
if not ok and failed:
if shutit.build['exam_object']:
shutit.build['exam_object'].add_fail()
shutit.build['exam_object'].end_timer()
shutit.log('\n\n' + shutit_util.colorise('32','failed') + '\n',transient=True, level=logging.CRITICAL)
self._challenge_done(shutit, result='failed',final_stage=final_stage)
continue
elif challenge_type == 'golf':
# pause, and when done, it checks your working based on check_command.
ok = False
# hints
if hints:
# TODO: debug this, it doesn't work!
task_desc_new = task_desc + '\r\n\r\nHit CTRL-h for help, CTRL-g to reset state, CTRL-s to skip, CTRL-] to submit for checking'
else:
task_desc_new = '\r\n' + task_desc
while not ok:
if shutit.build['exam_object'] and new_stage:
shutit.build['exam_object'].start_timer()
# Set the new_stage to False, as we're in a loop that doesn't need to mark a new state.
new_stage = False
self.pause_point(shutit_util.colorise('31',task_desc_new),color='31')
if shutit_global.shutit_global_object.signal_id == 8:
if shutit.build['exam_object']:
shutit.build['exam_object'].add_hint()
if shutit.build['pause_point_hints']:
shutit.log(shutit_util.colorise('31','\r\n========= HINT ==========\r\n\r\n' + shutit.build['pause_point_hints'].pop(0)),transient=True, level=logging.CRITICAL)
else:
shutit.log(shutit_util.colorise('31','\r\n\r\n' + 'No hints available!'),transient=True, level=logging.CRITICAL)
time.sleep(1)
# clear the signal
shutit_global.shutit_global_object.signal_id = 0
continue
elif shutit_global.shutit_global_object.signal_id == 17:
# clear the signal and ignore CTRL-q
shutit_global.shutit_global_object.signal_id = 0
continue
elif shutit_global.shutit_global_object.signal_id == 7:
if shutit.build['exam_object']:
shutit.build['exam_object'].add_reset()
shutit.log(shutit_util.colorise('31','\r\n========= RESETTING STATE ==========\r\n\r\n'),transient=True, level=logging.CRITICAL)
self._challenge_done(shutit, result='reset', follow_on_context=follow_on_context,final_stage=False)
# clear the signal
shutit_global.shutit_global_object.signal_id = 0
# Get the new target child, which is the new 'self'
target_child = shutit.get_shutit_pexpect_session_from_id('target_child')
return target_child.challenge(
shutit,
task_desc=task_desc,
expect=expect,
hints=hints,
congratulations=congratulations,
failed=failed,
expect_type=expect_type,
challenge_type=challenge_type,
timeout=timeout,
check_exit=check_exit,
fail_on_empty_before=fail_on_empty_before,
record_command=record_command,
exit_values=exit_values,
echo=echo,
escape=escape,
pause=pause,
loglevel=loglevel,
follow_on_context=follow_on_context,
new_stage=False
)
elif shutit_global.shutit_global_object.signal_id == 19:
if shutit.build['exam_object']:
shutit.build['exam_object'].add_skip()
shutit.build['exam_object'].end_timer()
# Clear the signal.
shutit_global.shutit_global_object.signal_id = 0
# Skip test.
shutit.log('\r\nTest skipped... please wait', level=logging.CRITICAL,transient=True)
skipped=True
self._challenge_done(shutit, result='skipped',follow_on_context=follow_on_context,skipped=True,final_stage=final_stage)
return True
elif shutit_global.shutit_global_object.signal_id == 29:
# Clear the signal
shutit_global.shutit_global_object.signal_id = 0
else:
shutit.log('Signal not handled: ' + str(shutit_global.shutit_global_object.signal_id), level=logging.CRITICAL,transient=True)
shutit.log('\r\nState submitted, checking your work...', level=logging.CRITICAL,transient=True)
check_command = follow_on_context.get('check_command')
output = self.send_and_get_output(check_command,
timeout=timeout,
retry=1,
record_command=record_command,
echo=False,
loglevel=loglevel,
fail_on_empty_before=False,
preserve_newline=preserve_newline)
shutit.log('output: ' + output, level=logging.DEBUG)
md5sum_output = md5(output).hexdigest()
if expect_type == 'md5sum':
shutit.log('output: ' + output + ' is md5sum: ' + md5sum_output, level=logging.DEBUG)
output = md5sum_output
if output == expect:
ok = True
elif expect_type == 'exact':
if output == expect:
ok = True
elif expect_type == 'regexp':
for regexp in expect:
if shutit.match_string(output,regexp):
ok = True
break
if not ok and failed:
shutit.log('\r\n\n' + shutit_util.colorise('31','Failed! CTRL-g to reset state, CTRL-h for a hint, CTRL-] to submit for checking') + '\n',transient=True, level=logging.CRITICAL)
# No second chances if exam!
if shutit.build['exam_object']:
shutit.build['exam_object'].add_fail()
shutit.build['exam_object'].end_timer()
self._challenge_done(shutit, result='failed_test',follow_on_context=follow_on_context,final_stage=final_stage)
return False
else:
continue
else:
shutit.fail('Challenge type: ' + challenge_type + ' not supported') # pragma: no cover
self._challenge_done(shutit,
result='ok',
follow_on_context=follow_on_context,
congratulations=congratulations,
skipped=skipped,
final_stage=final_stage)
if shutit.build['exam_object']:
shutit.build['exam_object'].add_ok()
shutit.build['exam_object'].end_timer()
# Tidy up hints
shutit.build['pause_point_hints'] = []
return True
|
Set the user a task to complete, success being determined by matching the output.
Either pass in regexp(s) desired from the output as a string or a list, or an md5sum of the output wanted.
@param follow_on_context On success, move to this context. A dict of information about that context.
context = the type of context, eg docker, bash
ok_container_name = if passed, send user to this container
reset_container_name = if resetting, send user to this container
@param challenge_type Behaviour of challenge made to user
command = check for output of single command
golf = user gets a pause point, and when leaving, command follow_on_context['check_command'] is run to check the output
|
async def FindActionTagsByPrefix(self, prefixes):
'''
prefixes : typing.Sequence[str]
Returns -> typing.Sequence[~Entity]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Action',
request='FindActionTagsByPrefix',
version=3,
params=_params)
_params['prefixes'] = prefixes
reply = await self.rpc(msg)
return reply
|
prefixes : typing.Sequence[str]
Returns -> typing.Sequence[~Entity]
|
def parse_response(fields, records):
"""Parse an API response into usable objects.
Args:
fields (list[str]): List of strings indicating the fields that
are represented in the records, in the order presented in
the records.::
[
'number1',
'number2',
'number3',
'first_name',
'last_name',
'company',
'street',
'city',
'state',
'zip',
]
records (list[dict]): A really crappy data structure representing
records as returned by Five9::
[
{
'values': {
'data': [
'8881234567',
None,
None,
'Dave',
'Lasley',
'LasLabs Inc',
None,
'Las Vegas',
'NV',
'89123',
]
}
}
]
Returns:
list[dict]: List of parsed records.
"""
data = [i['values']['data'] for i in records]
return [
{fields[idx]: row for idx, row in enumerate(d)}
for d in data
]
|
Parse an API response into usable objects.
Args:
fields (list[str]): List of strings indicating the fields that
are represented in the records, in the order presented in
the records.::
[
'number1',
'number2',
'number3',
'first_name',
'last_name',
'company',
'street',
'city',
'state',
'zip',
]
records (list[dict]): A really crappy data structure representing
records as returned by Five9::
[
{
'values': {
'data': [
'8881234567',
None,
None,
'Dave',
'Lasley',
'LasLabs Inc',
None,
'Las Vegas',
'NV',
'89123',
]
}
}
]
Returns:
list[dict]: List of parsed records.
|
def function(self,p):
"""
Return a square-wave grating (alternating black and white bars).
"""
return np.around(
0.5 +
0.5*np.sin(pi*(p.duty_cycle-0.5)) +
0.5*np.sin(p.frequency*2*pi*self.pattern_y + p.phase))
|
Return a square-wave grating (alternating black and white bars).
|
def is_repeated_suggestion(params, history):
"""
Parameters
----------
params : dict
Trial param set
history : list of 3-tuples
History of past function evaluations. Each element in history
should be a tuple `(params, score, status)`, where `params` is a
dict mapping parameter names to values
Returns
-------
is_repeated_suggestion : bool
"""
if any(params == hparams and hstatus == 'SUCCEEDED'
for hparams, hscore, hstatus in history):
return True
else:
return False
|
Parameters
----------
params : dict
Trial param set
history : list of 3-tuples
History of past function evaluations. Each element in history
should be a tuple `(params, score, status)`, where `params` is a
dict mapping parameter names to values
Returns
-------
is_repeated_suggestion : bool
|
def get(cls):
"""
Use the masking function (``sigprocmask(2)`` or ``pthread_sigmask(3)``)
to obtain the mask of blocked signals
:returns:
A fresh :class:`sigprocmask` object.
The returned object behaves as it was constructed with the list of
currently blocked signals, ``setmask=False`` and as if the
:meth:`block()` was immediately called.
That is, calling :meth:`unblock()` will will cause those signals not to
be blocked anymore while calling :meth:`block()` will re-block them (if
they were unblocked after this method returns).
"""
mask = sigset_t()
sigemptyset(mask)
cls._do_mask(0, None, mask)
signals = []
for sig_num in range(1, NSIG):
if sigismember(mask, sig_num):
signals.append(sig_num)
self = cls(signals)
self._is_active = True
self._old_mask = mask
return self
|
Use the masking function (``sigprocmask(2)`` or ``pthread_sigmask(3)``)
to obtain the mask of blocked signals
:returns:
A fresh :class:`sigprocmask` object.
The returned object behaves as it was constructed with the list of
currently blocked signals, ``setmask=False`` and as if the
:meth:`block()` was immediately called.
That is, calling :meth:`unblock()` will will cause those signals not to
be blocked anymore while calling :meth:`block()` will re-block them (if
they were unblocked after this method returns).
|
def get_series_by_name(self, series_name):
"""Perform lookup for series
:param str series_name: series name found within filename
:returns: instance of series
:rtype: object
"""
try:
return self.api.search_series(name=series_name), None
except exceptions.TVDBRequestException as err:
LOG.exception('search for series %s failed', series_name)
return None, _as_str(err)
|
Perform lookup for series
:param str series_name: series name found within filename
:returns: instance of series
:rtype: object
|
def username(self, value):
"""gets/sets the username"""
if isinstance(value, str):
self._username = value
self._handler = None
|
gets/sets the username
|
def read_time(self, content):
""" Core function used to generate the read_time for content.
Parameters:
:param content: Instance of pelican.content.Content
Returns:
None
"""
if get_class_name(content) in self.content_type_supported:
# Exit if readtime is already set
if hasattr(content, 'readtime'):
return None
default_lang_conf = self.lang_settings['default']
lang_conf = self.lang_settings.get(content.lang, default_lang_conf)
avg_reading_wpm = lang_conf['wpm']
num_words = len(content._content.split())
# Floor division so we don't have to convert float -> int
minutes = num_words // avg_reading_wpm
# Get seconds to read, then subtract our minutes as seconds from
# the time to get remainder seconds
seconds = int((num_words / avg_reading_wpm * 60) - (minutes * 60))
minutes_str = self.pluralize(
minutes,
lang_conf['min_singular'],
lang_conf['min_plural']
)
seconds_str = self.pluralize(
seconds,
lang_conf['sec_singular'],
lang_conf['sec_plural']
)
content.readtime = minutes
content.readtime_string = minutes_str
content.readtime_with_seconds = (minutes, seconds,)
content.readtime_string_with_seconds = "{}, {}".format(
minutes_str, seconds_str)
|
Core function used to generate the read_time for content.
Parameters:
:param content: Instance of pelican.content.Content
Returns:
None
|
def _reference_rmvs(self, removes):
"""Prints all removed packages
"""
print("")
self.msg.template(78)
msg_pkg = "package"
if len(removes) > 1:
msg_pkg = "packages"
print("| Total {0} {1} removed".format(len(removes), msg_pkg))
self.msg.template(78)
for pkg in removes:
if not GetFromInstalled(pkg).name():
print("| Package {0} removed".format(pkg))
else:
print("| Package {0} not found".format(pkg))
self.msg.template(78)
print("")
|
Prints all removed packages
|
def get_delivery_stats(api_key=None, secure=None, test=None, **request_args):
'''Get delivery stats for your Postmark account.
:param api_key: Your Postmark API key. Required, if `test` is not `True`.
:param secure: Use the https scheme for the Postmark API.
Defaults to `True`
:param test: Use the Postmark Test API. Defaults to `False`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`DeliveryStatsResponse`
'''
return _default_delivery_stats.get(api_key=api_key, secure=secure,
test=test, **request_args)
|
Get delivery stats for your Postmark account.
:param api_key: Your Postmark API key. Required, if `test` is not `True`.
:param secure: Use the https scheme for the Postmark API.
Defaults to `True`
:param test: Use the Postmark Test API. Defaults to `False`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`DeliveryStatsResponse`
|
def tasks_by_tag(self, registry_tag):
""" Get tasks from registry by its tag
:param registry_tag: any hash-able object
:return: Return task (if :attr:`.WTaskRegistryStorage.__multiple_tasks_per_tag__` is not True) or \
list of tasks
"""
if registry_tag not in self.__registry.keys():
return None
tasks = self.__registry[registry_tag]
return tasks if self.__multiple_tasks_per_tag__ is True else tasks[0]
|
Get tasks from registry by its tag
:param registry_tag: any hash-able object
:return: Return task (if :attr:`.WTaskRegistryStorage.__multiple_tasks_per_tag__` is not True) or \
list of tasks
|
def unchanged(self):
'''
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _unchanged(current_dict, diffs, prefix):
keys = []
for key in current_dict.keys():
if key not in diffs:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(current_dict[key], dict):
if 'new' in diffs[key]:
# There is a diff
continue
else:
keys.extend(
_unchanged(current_dict[key],
diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_unchanged(self.current_dict, self._diffs, prefix=''))
|
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
|
def merge_svg_files(svg_file1, svg_file2, x_coord, y_coord, scale=1):
""" Merge `svg_file2` in `svg_file1` in the given positions `x_coord`, `y_coord` and `scale`.
Parameters
----------
svg_file1: str or svgutils svg document object
Path to a '.svg' file.
svg_file2: str or svgutils svg document object
Path to a '.svg' file.
x_coord: float
Horizontal axis position of the `svg_file2` content.
y_coord: float
Vertical axis position of the `svg_file2` content.
scale: float
Scale to apply to `svg_file2` content.
Returns
-------
`svg1` svgutils object with the content of 'svg_file2'
"""
svg1 = _check_svg_file(svg_file1)
svg2 = _check_svg_file(svg_file2)
svg2_root = svg2.getroot()
svg1.append([svg2_root])
svg2_root.moveto(x_coord, y_coord, scale=scale)
return svg1
|
Merge `svg_file2` in `svg_file1` in the given positions `x_coord`, `y_coord` and `scale`.
Parameters
----------
svg_file1: str or svgutils svg document object
Path to a '.svg' file.
svg_file2: str or svgutils svg document object
Path to a '.svg' file.
x_coord: float
Horizontal axis position of the `svg_file2` content.
y_coord: float
Vertical axis position of the `svg_file2` content.
scale: float
Scale to apply to `svg_file2` content.
Returns
-------
`svg1` svgutils object with the content of 'svg_file2'
|
def update_object_from_dictionary_representation(dictionary, instance):
"""Given a dictionary and an object instance, will set all object attributes equal to the dictionary's keys and
values. Assumes dictionary does not have any keys for which object does not have attributes
@type dictionary: dict
@param dictionary: Dictionary representation of the object
@param instance: Object instance to populate
@return: None
"""
for key, value in dictionary.iteritems():
if hasattr(instance, key):
setattr(instance, key, value)
return instance
|
Given a dictionary and an object instance, will set all object attributes equal to the dictionary's keys and
values. Assumes dictionary does not have any keys for which object does not have attributes
@type dictionary: dict
@param dictionary: Dictionary representation of the object
@param instance: Object instance to populate
@return: None
|
def switch_toggle(self, device):
"""Toggles the current state of the given device"""
state = self.get_state(device)
if(state == '1'):
return self.switch_off(device)
elif(state == '0'):
return self.switch_on(device)
else:
return state
|
Toggles the current state of the given device
|
def requires_libsodium(func):
"""
Mark a function as requiring libsodium.
If no libsodium support is detected, a `RuntimeError` is thrown.
"""
@wraps(func)
def wrapper(*args, **kwargs):
libsodium_check()
return func(*args, **kwargs)
return wrapper
|
Mark a function as requiring libsodium.
If no libsodium support is detected, a `RuntimeError` is thrown.
|
def connect_head_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
"""connect_head_namespaced_pod_proxy_with_path # noqa: E501
connect HEAD requests to proxy of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_head_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_head_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
else:
(data) = self.connect_head_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
return data
|
connect_head_namespaced_pod_proxy_with_path # noqa: E501
connect HEAD requests to proxy of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_head_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
|
def _write_scalar(self, name:str, scalar_value, iteration:int)->None:
"Writes single scalar value to Tensorboard."
tag = self.metrics_root + name
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
|
Writes single scalar value to Tensorboard.
|
def ip_between(ip, start, finish):
"""Checks to see if IP is between start and finish"""
if is_IPv4Address(ip) and is_IPv4Address(start) and is_IPv4Address(finish):
return IPAddress(ip) in IPRange(start, finish)
else:
return False
|
Checks to see if IP is between start and finish
|
def _get_npcap_config(param_key):
"""
Get a Npcap parameter matching key in the registry.
List:
AdminOnly, DefaultFilterSettings, DltNull, Dot11Adapters, Dot11Support
LoopbackAdapter, LoopbackSupport, NdisImPlatformBindingOptions, VlanSupport
WinPcapCompatible
"""
hkey = winreg.HKEY_LOCAL_MACHINE
node = r"SYSTEM\CurrentControlSet\Services\npcap\Parameters"
try:
key = winreg.OpenKey(hkey, node)
dot11_adapters, _ = winreg.QueryValueEx(key, param_key)
winreg.CloseKey(key)
except WindowsError:
return None
return dot11_adapters
|
Get a Npcap parameter matching key in the registry.
List:
AdminOnly, DefaultFilterSettings, DltNull, Dot11Adapters, Dot11Support
LoopbackAdapter, LoopbackSupport, NdisImPlatformBindingOptions, VlanSupport
WinPcapCompatible
|
def sentiment(self):
"""Return a tuple of form (polarity, subjectivity ) where polarity
is a float within the range [-1.0, 1.0] and subjectivity is a float
within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is
very subjective.
:rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)``
"""
#: Enhancement Issue #2
#: adapted from 'textblob.en.sentiments.py'
#: Return type declaration
_RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity'])
_polarity = 0
_subjectivity = 0
for s in self.sentences:
_polarity += s.polarity
_subjectivity += s.subjectivity
try:
polarity = _polarity / len(self.sentences)
except ZeroDivisionError:
polarity = 0.0
try:
subjectivity = _subjectivity / len(self.sentences)
except ZeroDivisionError:
subjectivity = 0.0
return _RETURN_TYPE(polarity, subjectivity)
|
Return a tuple of form (polarity, subjectivity ) where polarity
is a float within the range [-1.0, 1.0] and subjectivity is a float
within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is
very subjective.
:rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)``
|
def process(inFile,force=False,newpath=None, inmemory=False, num_cores=None,
headerlets=True, align_to_gaia=True):
""" Run astrodrizzle on input file/ASN table
using default values for astrodrizzle parameters.
"""
# We only need to import this package if a user run the task
import drizzlepac
from drizzlepac import processInput # used for creating new ASNs for _flc inputs
from stwcs import updatewcs
from drizzlepac import alignimages
# interpret envvar variable, if specified
if envvar_compute_name in os.environ:
val = os.environ[envvar_compute_name].lower()
if val not in envvar_bool_dict:
msg = "ERROR: invalid value for {}.".format(envvar_compute_name)
msg += " \n Valid Values: on, off, yes, no, true, false"
raise ValueError(msg)
align_to_gaia = envvar_bool_dict[val]
if envvar_new_apriori_name in os.environ:
# Reset ASTROMETRY_STEP_CONTROL based on this variable
# This provides backward-compatibility until ASTROMETRY_STEP_CONTROL
# gets removed entirely.
val = os.environ[envvar_new_apriori_name].lower()
if val not in envvar_dict:
msg = "ERROR: invalid value for {}.".format(envvar_new_apriori_name)
msg += " \n Valid Values: on, off, yes, no, true, false"
raise ValueError(msg)
os.environ[envvar_old_apriori_name] = envvar_dict[val]
if headerlets or align_to_gaia:
from stwcs.wcsutil import headerlet
# Open the input file
try:
# Make sure given filename is complete and exists...
inFilename = fileutil.buildRootname(inFile,ext=['.fits'])
if not os.path.exists(inFilename):
print("ERROR: Input file - %s - does not exist." % inFilename)
return
except TypeError:
print("ERROR: Inappropriate input file.")
return
#If newpath was specified, move all files to that directory for processing
if newpath:
orig_processing_dir = os.getcwd()
new_processing_dir = _createWorkingDir(newpath,inFilename)
_copyToNewWorkingDir(new_processing_dir,inFilename)
os.chdir(new_processing_dir)
# Initialize for later use...
_mname = None
_new_asn = None
_calfiles = []
# Identify WFPC2 inputs to account for differences in WFPC2 inputs
wfpc2_input = fits.getval(inFilename, 'instrume') == 'WFPC2'
cal_ext = None
# Check input file to see if [DRIZ/DITH]CORR is set to PERFORM
if '_asn' in inFilename:
# We are working with an ASN table.
# Use asnutil code to extract filename
inFilename = _lowerAsn(inFilename)
_new_asn = [inFilename]
_asndict = asnutil.readASNTable(inFilename,None,prodonly=False)
_cal_prodname = _asndict['output'].lower()
#_fname = fileutil.buildRootname(_cal_prodname,ext=['_drz.fits'])
# Retrieve the first member's rootname for possible use later
_fimg = fits.open(inFilename, memmap=False)
for name in _fimg[1].data.field('MEMNAME'):
if name[-1] != '*':
_mname = name.split('\0', 1)[0].lower()
break
_fimg.close()
del _fimg
else:
# Check to see if input is a _RAW file
# If it is, strip off the _raw.fits extension...
_indx = inFilename.find('_raw')
if _indx < 0: _indx = len(inFilename)
# ... and build the CALXXX product rootname.
if wfpc2_input:
# force code to define _c0m file as calibrated product to be used
cal_ext = ['_c0m.fits']
_mname = fileutil.buildRootname(inFilename[:_indx], ext=cal_ext)
_cal_prodname = inFilename[:_indx]
# Reset inFilename to correspond to appropriate input for
# drizzle: calibrated product name.
inFilename = _mname
if _mname is None:
errorMsg = 'Could not find calibrated product!'
raise Exception(errorMsg)
# Create trailer filenames based on ASN output filename or
# on input name for single exposures
if '_raw' in inFile:
# Output trailer file to RAW file's trailer
_trlroot = inFile[:inFile.find('_raw')]
elif '_asn' in inFile:
# Output trailer file to ASN file's trailer, not product's trailer
_trlroot = inFile[:inFile.find('_asn')]
else:
# Default: trim off last suffix of input filename
# and replacing with .tra
_indx = inFile.rfind('_')
if _indx > 0:
_trlroot = inFile[:_indx]
else:
_trlroot = inFile
_trlfile = _trlroot + '.tra'
# Open product and read keyword value
# Check to see if product already exists...
dkey = 'DRIZCORR'
# ...if product does NOT exist, interrogate input file
# to find out whether 'dcorr' has been set to PERFORM
# Check if user wants to process again regardless of DRIZCORR keyword value
if force:
dcorr = 'PERFORM'
else:
if _mname :
_fimg = fits.open(fileutil.buildRootname(_mname,ext=['_raw.fits']), memmap=False)
_phdr = _fimg['PRIMARY'].header
if dkey in _phdr:
dcorr = _phdr[dkey]
else:
dcorr = None
_fimg.close()
del _fimg
else:
dcorr = None
time_str = _getTime()
_tmptrl = _trlroot + '_tmp.tra'
_drizfile = _trlroot + '_pydriz'
_drizlog = _drizfile + ".log" # the '.log' gets added automatically by astrodrizzle
_alignlog = _trlroot + '_align.log'
if dcorr == 'PERFORM':
if '_asn.fits' not in inFilename:
# Working with a singleton
# However, we always want to make sure we always use
# a calibrated product as input, if available.
_infile = fileutil.buildRootname(_cal_prodname, ext=cal_ext)
_infile_flc = fileutil.buildRootname(_cal_prodname,ext=['_flc.fits'])
_cal_prodname = _infile
_inlist = _calfiles = [_infile]
# Add CTE corrected filename as additional input if present
if os.path.exists(_infile_flc) and _infile_flc != _infile:
_inlist.append(_infile_flc)
else:
# Working with an ASN table...
_infile = inFilename
flist,duplist = processInput.checkForDuplicateInputs(_asndict['order'])
_calfiles = flist
if len(duplist) > 0:
origasn = processInput.changeSuffixinASN(inFilename,'flt')
dupasn = processInput.changeSuffixinASN(inFilename,'flc')
_inlist = [origasn,dupasn]
else:
_inlist = [_infile]
# We want to keep the original specification of the calibration
# product name, though, not a lower-case version...
_cal_prodname = inFilename
_new_asn.extend(_inlist) # kept so we can delete it when finished
# check to see whether FLC files are also present, and need to be updated
# generate list of FLC files
align_files = None
_calfiles_flc = [f.replace('_flt.fits','_flc.fits') for f in _calfiles]
# insure these files exist, if not, blank them out
# Also pick out what files will be used for additional alignment to GAIA
if not os.path.exists(_calfiles_flc[0]):
_calfiles_flc = None
align_files = _calfiles
align_update_files = None
else:
align_files = _calfiles_flc
align_update_files = _calfiles
# Run updatewcs on each list of images
updatewcs.updatewcs(_calfiles)
if _calfiles_flc:
updatewcs.updatewcs(_calfiles_flc)
if align_to_gaia:
# Perform additional alignment on the FLC files, if present
###############
#
# call hlapipeline code here on align_files list of files
#
###############
# Create trailer marker message for start of align_to_GAIA processing
_trlmsg = _timestamp("Align_to_GAIA started ")
print(_trlmsg)
ftmp = open(_tmptrl,'w')
ftmp.writelines(_trlmsg)
ftmp.close()
_appendTrlFile(_trlfile,_tmptrl)
_trlmsg = ""
# Create an empty astropy table so it can be used as input/output for the perform_align function
#align_table = Table()
try:
align_table = alignimages.perform_align(align_files,update_hdr_wcs=True, runfile=_alignlog)
for row in align_table:
if row['status'] == 0:
trlstr = "Successfully aligned {} to {} astrometric frame\n"
_trlmsg += trlstr.format(row['imageName'], row['catalog'])
else:
trlstr = "Could not align {} to absolute astrometric frame\n"
_trlmsg += trlstr.format(row['imageName'])
except Exception:
# Something went wrong with alignment to GAIA, so report this in
# trailer file
_trlmsg = "EXCEPTION encountered in alignimages...\n"
_trlmsg += " No correction to absolute astrometric frame applied!\n"
# Write the perform_align log to the trailer file...(this will delete the _alignlog)
_appendTrlFile(_trlfile,_alignlog)
# Append messages from this calling routine post-perform_align
ftmp = open(_tmptrl,'w')
ftmp.writelines(_trlmsg)
ftmp.close()
_appendTrlFile(_trlfile,_tmptrl)
_trlmsg = ""
#Check to see whether there are any additional input files that need to
# be aligned (namely, FLT images)
if align_update_files and align_table:
# Apply headerlets from alignment to FLT version of the files
for fltfile, flcfile in zip(align_update_files, align_files):
row = align_table[align_table['imageName']==flcfile]
headerletFile = row['headerletFile'][0]
if headerletFile != "None":
headerlet.apply_headerlet_as_primary(fltfile, headerletFile,
attach=True, archive=True)
# append log file contents to _trlmsg for inclusion in trailer file
_trlstr = "Applying headerlet {} as Primary WCS to {}\n"
_trlmsg += _trlstr.format(headerletFile, fltfile)
else:
_trlmsg += "No absolute astrometric headerlet applied to {}\n".format(fltfile)
# Finally, append any further messages associated with alignement from this calling routine
_trlmsg += _timestamp('Align_to_GAIA completed ')
print(_trlmsg)
ftmp = open(_tmptrl,'w')
ftmp.writelines(_trlmsg)
ftmp.close()
_appendTrlFile(_trlfile,_tmptrl)
# Run astrodrizzle and send its processing statements to _trlfile
_pyver = drizzlepac.astrodrizzle.__version__
for _infile in _inlist: # Run astrodrizzle for all inputs
# Create trailer marker message for start of astrodrizzle processing
_trlmsg = _timestamp('astrodrizzle started ')
_trlmsg += __trlmarker__
_trlmsg += '%s: Processing %s with astrodrizzle Version %s\n' % (time_str,_infile,_pyver)
print(_trlmsg)
# Write out trailer comments to trailer file...
ftmp = open(_tmptrl,'w')
ftmp.writelines(_trlmsg)
ftmp.close()
_appendTrlFile(_trlfile,_tmptrl)
_pyd_err = _trlroot+'_pydriz.stderr'
try:
b = drizzlepac.astrodrizzle.AstroDrizzle(input=_infile,runfile=_drizfile,
configobj='defaults',in_memory=inmemory,
num_cores=num_cores, **pipeline_pars)
except Exception as errorobj:
_appendTrlFile(_trlfile,_drizlog)
_appendTrlFile(_trlfile,_pyd_err)
_ftrl = open(_trlfile,'a')
_ftrl.write('ERROR: Could not complete astrodrizzle processing of %s.\n' % _infile)
_ftrl.write(str(sys.exc_info()[0])+': ')
_ftrl.writelines(str(errorobj))
_ftrl.write('\n')
_ftrl.close()
print('ERROR: Could not complete astrodrizzle processing of %s.' % _infile)
raise Exception(str(errorobj))
# Now, append comments created by PyDrizzle to CALXXX trailer file
print('Updating trailer file %s with astrodrizzle comments.' % _trlfile)
_appendTrlFile(_trlfile,_drizlog)
# Save this for when astropy.io.fits can modify a file 'in-place'
# Update calibration switch
_fimg = fits.open(_cal_prodname, mode='update', memmap=False)
_fimg['PRIMARY'].header[dkey] = 'COMPLETE'
_fimg.close()
del _fimg
# Enforce pipeline convention of all lower-case product
# names
_prodlist = glob.glob('*drz.fits')
for _prodname in _prodlist:
_plower = _prodname.lower()
if _prodname != _plower: os.rename(_prodname,_plower)
else:
# Create default trailer file messages when astrodrizzle is not
# run on a file. This will typically apply only to BIAS,DARK
# and other reference images.
# Start by building up the message...
_trlmsg = _timestamp('astrodrizzle skipped ')
_trlmsg = _trlmsg + __trlmarker__
_trlmsg = _trlmsg + '%s: astrodrizzle processing not requested for %s.\n' % (time_str,inFilename)
_trlmsg = _trlmsg + ' astrodrizzle will not be run at this time.\n'
print(_trlmsg)
# Write message out to temp file and append it to full trailer file
ftmp = open(_tmptrl,'w')
ftmp.writelines(_trlmsg)
ftmp.close()
_appendTrlFile(_trlfile,_tmptrl)
# Append final timestamp to trailer file...
_final_msg = '%s: Finished processing %s \n' % (time_str,inFilename)
_final_msg += _timestamp('astrodrizzle completed ')
_trlmsg += _final_msg
ftmp = open(_tmptrl,'w')
ftmp.writelines(_trlmsg)
ftmp.close()
_appendTrlFile(_trlfile,_tmptrl)
# If we created a new ASN table, we need to remove it
if _new_asn is not None:
for _name in _new_asn: fileutil.removeFile(_name)
# Clean up any generated OrIg_files directory
if os.path.exists("OrIg_files"):
# check to see whether this directory is empty
flist = glob.glob('OrIg_files/*.fits')
if len(flist) == 0:
os.rmdir("OrIg_files")
else:
print('OrIg_files directory NOT removed as it still contained images...')
# If headerlets have already been written out by alignment code,
# do NOT write out this version of the headerlets
if headerlets:
# Generate headerlets for each updated FLT image
hlet_msg = _timestamp("Writing Headerlets started")
for fname in _calfiles:
frootname = fileutil.buildNewRootname(fname)
hname = "%s_flt_hlet.fits"%frootname
# Write out headerlet file used by astrodrizzle, however,
# do not overwrite any that was already written out by alignimages
if not os.path.exists(hname):
hlet_msg += "Created Headerlet file %s \n"%hname
try:
headerlet.write_headerlet(fname,'OPUS',output='flt', wcskey='PRIMARY',
author="OPUS",descrip="Default WCS from Pipeline Calibration",
attach=False,clobber=True,logging=False)
except ValueError:
hlet_msg += _timestamp("SKIPPED: Headerlet not created for %s \n"%fname)
# update trailer file to log creation of headerlet files
hlet_msg += _timestamp("Writing Headerlets completed")
ftrl = open(_trlfile,'a')
ftrl.write(hlet_msg)
ftrl.close()
# If processing was done in a temp working dir, restore results to original
# processing directory, return to original working dir and remove temp dir
if newpath:
_restoreResults(new_processing_dir,orig_processing_dir)
os.chdir(orig_processing_dir)
_removeWorkingDir(new_processing_dir)
# Provide feedback to user
print(_final_msg)
|
Run astrodrizzle on input file/ASN table
using default values for astrodrizzle parameters.
|
def _addSpecfile(self, specfile, path):
"""Adds a new specfile entry to MsrunContainer.info. See also
:class:`MsrunContainer.addSpecfile()`.
:param specfile: the name of an ms-run file
:param path: filedirectory used for loading and saving ``mrc`` files
"""
datatypeStatus = {'rm': False, 'ci': False, 'smi': False, 'sai': False,
'si': False
}
self.info[specfile] = {'path': path, 'status': datatypeStatus}
|
Adds a new specfile entry to MsrunContainer.info. See also
:class:`MsrunContainer.addSpecfile()`.
:param specfile: the name of an ms-run file
:param path: filedirectory used for loading and saving ``mrc`` files
|
def policyChange(self, updateParams, func):
""" update defaultPolicy dict """
for k,v in updateParams.items():
k = k.replace('-','_')
c = globals()[k](v)
try:
self.defaultPolicies[k] = getattr(c,func)(self.defaultPolicies[k])
except Exception, e:
raise
|
update defaultPolicy dict
|
def serialize_list(out, lst, delimiter=u'', max_length=20):
"""This method is used to serialize list of text
pieces like ["some=u'Another'", "blah=124"]
Depending on how many lines are in these items,
they are concatenated in row or as a column.
Concatenation result is appended to the `out` argument.
"""
have_multiline_items = any(map(is_multiline, lst))
result_will_be_too_long = sum(map(len, lst)) > max_length
if have_multiline_items or result_will_be_too_long:
padding = len(out)
add_padding = padding_adder(padding)
# we need to add padding to all lines
# except the first one
head, rest = cut_head(lst)
rest = map(add_padding, rest)
# add padding to the head, but not for it's first line
head = add_padding(head, ignore_first_line=True)
# now join lines back
lst = chain((head,), rest)
delimiter += u'\n'
else:
delimiter += u' '
return out + delimiter.join(lst)
|
This method is used to serialize list of text
pieces like ["some=u'Another'", "blah=124"]
Depending on how many lines are in these items,
they are concatenated in row or as a column.
Concatenation result is appended to the `out` argument.
|
def toggle_state(self, state, active=TOGGLE):
"""
Toggle the given state for this conversation.
The state will be set ``active`` is ``True``, otherwise the state will be removed.
If ``active`` is not given, it will default to the inverse of the current state
(i.e., ``False`` if the state is currently set, ``True`` if it is not; essentially
toggling the state).
For example::
conv.toggle_state('{relation_name}.foo', value=='foo')
This will set the state if ``value`` is equal to ``foo``.
"""
if active is TOGGLE:
active = not self.is_state(state)
if active:
self.set_state(state)
else:
self.remove_state(state)
|
Toggle the given state for this conversation.
The state will be set ``active`` is ``True``, otherwise the state will be removed.
If ``active`` is not given, it will default to the inverse of the current state
(i.e., ``False`` if the state is currently set, ``True`` if it is not; essentially
toggling the state).
For example::
conv.toggle_state('{relation_name}.foo', value=='foo')
This will set the state if ``value`` is equal to ``foo``.
|
def dragEnterEvent(self, event):
"""
Listens for query's being dragged and dropped onto this tree.
:param event | <QDragEnterEvent>
"""
data = event.mimeData()
if data.hasFormat('application/x-orb-table') and \
data.hasFormat('application/x-orb-query'):
tableName = self.tableTypeName()
if nstr(data.data('application/x-orb-table')) == tableName:
event.acceptProposedAction()
return
elif data.hasFormat('application/x-orb-records'):
event.acceptProposedAction()
return
super(XOrbRecordBox, self).dragEnterEvent(event)
|
Listens for query's being dragged and dropped onto this tree.
:param event | <QDragEnterEvent>
|
def parse_requirements_list(requirements_list):
"""
Take a list and return a list of dicts with {package, versions) based on the requirements specs
:param requirements_list: string
:return: string
"""
req_list = []
for requirement in requirements_list:
requirement_no_comments = requirement.split('#')[0].strip()
# if matching requirement line (Thing==1.2.3), update dict, continue
req_match = re.match(
r'\s*(?P<package>[^\s\[\]]+)(?P<extras>\[\S+\])?==(?P<version>\S+)',
requirement_no_comments
)
if req_match:
req_list.append({
'package': req_match.group('package'),
'version': req_match.group('version'),
})
return req_list
|
Take a list and return a list of dicts with {package, versions) based on the requirements specs
:param requirements_list: string
:return: string
|
def get_length_task_loss(config: LossConfig) -> 'Loss':
"""
Returns a Loss instance.
:param config: Loss configuration.
:return: Instance implementing Loss.
"""
if config.length_task_link is not None:
if config.length_task_link == C.LINK_NORMAL:
return MSELoss(config,
output_names=[C.LENRATIO_OUTPUT_NAME],
label_names=[C.LENRATIO_LABEL_NAME])
elif config.length_task_link == C.LINK_POISSON:
return PoissonLoss(config,
output_names=[C.LENRATIO_OUTPUT_NAME],
label_names=[C.LENRATIO_LABEL_NAME])
else:
raise ValueError("unknown link function name for length task: %s" % config.length_task_link)
return None
|
Returns a Loss instance.
:param config: Loss configuration.
:return: Instance implementing Loss.
|
def listen(self):
'''Listen for events as they come in'''
try:
self._pubsub.subscribe(self._channels)
for message in self._pubsub.listen():
if message['type'] == 'message':
yield message
finally:
self._channels = []
|
Listen for events as they come in
|
def random_string(length=8, charset=None):
'''
Generates a string with random characters. If no charset is specified, only
letters and digits are used.
Args:
length (int) length of the returned string
charset (string) list of characters to choose from
Returns:
(str) with random characters from charset
Raises:
-
'''
if length < 1:
raise ValueError('Length must be > 0')
if not charset:
charset = string.letters + string.digits
return ''.join(random.choice(charset) for unused in xrange(length))
|
Generates a string with random characters. If no charset is specified, only
letters and digits are used.
Args:
length (int) length of the returned string
charset (string) list of characters to choose from
Returns:
(str) with random characters from charset
Raises:
-
|
def upload_predictions(self, file_path, tournament=1):
"""Upload predictions from file.
Args:
file_path (str): CSV file with predictions that will get uploaded
tournament (int): ID of the tournament (optional, defaults to 1)
Returns:
str: submission_id
Example:
>>> api = NumerAPI(secret_key="..", public_id="..")
>>> api.upload_predictions()
'93c46857-fed9-4594-981e-82db2b358daf'
"""
self.logger.info("uploading predictions...")
auth_query = '''
query($filename: String!
$tournament: Int!) {
submission_upload_auth(filename: $filename
tournament: $tournament) {
filename
url
}
}
'''
arguments = {'filename': os.path.basename(file_path),
'tournament': tournament}
submission_resp = self.raw_query(auth_query, arguments,
authorization=True)
submission_auth = submission_resp['data']['submission_upload_auth']
with open(file_path, 'rb') as fh:
requests.put(submission_auth['url'], data=fh.read())
create_query = '''
mutation($filename: String!
$tournament: Int!) {
create_submission(filename: $filename
tournament: $tournament) {
id
}
}
'''
arguments = {'filename': submission_auth['filename'],
'tournament': tournament}
create = self.raw_query(create_query, arguments, authorization=True)
self.submission_id = create['data']['create_submission']['id']
return self.submission_id
|
Upload predictions from file.
Args:
file_path (str): CSV file with predictions that will get uploaded
tournament (int): ID of the tournament (optional, defaults to 1)
Returns:
str: submission_id
Example:
>>> api = NumerAPI(secret_key="..", public_id="..")
>>> api.upload_predictions()
'93c46857-fed9-4594-981e-82db2b358daf'
|
def calculate_metrics(self, model, train_loader, valid_loader, metrics_dict):
"""Add standard and custom metrics to metrics_dict"""
# Check whether or not it's time for validation as well
self.log_count += 1
log_valid = (
valid_loader is not None
and self.valid_every_X
and not (self.log_count % self.valid_every_X)
)
metrics_dict = {}
# Calculate custom metrics
if self.config["log_train_metrics_func"] is not None:
func = self.config["log_train_metrics_func"]
func_list = func if isinstance(func, list) else [func]
for func in func_list:
metrics_dict = self._calculate_custom_metrics(
model, train_loader, func, metrics_dict, split="train"
)
if self.config["log_valid_metrics_func"] is not None and log_valid:
func = self.config["log_valid_metrics_func"]
func_list = func if isinstance(func, list) else [func]
for func in func_list:
metrics_dict = self._calculate_custom_metrics(
model, valid_loader, func, metrics_dict, split="valid"
)
# Calculate standard metrics
metrics_dict = self._calculate_standard_metrics(
model, train_loader, self.log_train_metrics, metrics_dict, "train"
)
if log_valid:
metrics_dict = self._calculate_standard_metrics(
model, valid_loader, self.log_valid_metrics, metrics_dict, "valid"
)
return metrics_dict
|
Add standard and custom metrics to metrics_dict
|
def _new_from_cdata(cls, cdata: Any) -> "Color":
"""new in libtcod-cffi"""
return cls(cdata.r, cdata.g, cdata.b)
|
new in libtcod-cffi
|
def _is_bhyve_hyper():
'''
Returns a bool whether or not this node is a bhyve hypervisor
'''
sysctl_cmd = 'sysctl hw.vmm.create'
vmm_enabled = False
try:
stdout = subprocess.Popen(sysctl_cmd,
shell=True,
stdout=subprocess.PIPE).communicate()[0]
vmm_enabled = len(salt.utils.stringutils.to_str(stdout).split('"')[1]) != 0
except IndexError:
pass
return vmm_enabled
|
Returns a bool whether or not this node is a bhyve hypervisor
|
def _ttm_me_compute(self, V, edims, sdims, transp):
"""
Assume Y = T x_i V_i for i = 1...n can fit into memory
"""
shapeY = np.copy(self.shape)
# Determine size of Y
for n in np.union1d(edims, sdims):
shapeY[n] = V[n].shape[1] if transp else V[n].shape[0]
# Allocate Y (final result) and v (vectors for elementwise computations)
Y = zeros(shapeY)
shapeY = array(shapeY)
v = [None for _ in range(len(edims))]
for i in range(np.prod(shapeY[edims])):
rsubs = unravel_index(shapeY[edims], i)
|
Assume Y = T x_i V_i for i = 1...n can fit into memory
|
def list_eids(self):
"""
Returns a list of all known eids
"""
entities = self.list()
return sorted([int(eid) for eid in entities])
|
Returns a list of all known eids
|
def get_stranger_info(self, *, user_id, no_cache=False):
"""
获取陌生人信息
------------
:param int user_id: QQ 号(不可以是登录号)
:param bool no_cache: 是否不使用缓存(使用缓存可能更新不及时,但响应更快)
:return: { "user_id": (QQ 号: int), "nickname": (昵称: str), "sex": (性别: str in ['male', 'female', 'unknown']), "age": (年龄: int) }
:rtype: dict[ str, int | str ]
------------
======== ========= ======================================
响应数据
-----------------------------------------------------------
数据类型 字段名 说明
======== ========= ======================================
int user_id QQ 号
str nickname 昵称
str sex 性别,`male` 或 `female` 或 `unknown`
int age 年龄
======== ========= ======================================
"""
return super().__getattr__('get_stranger_info') \
(user_id=user_id, no_cache=no_cache)
|
获取陌生人信息
------------
:param int user_id: QQ 号(不可以是登录号)
:param bool no_cache: 是否不使用缓存(使用缓存可能更新不及时,但响应更快)
:return: { "user_id": (QQ 号: int), "nickname": (昵称: str), "sex": (性别: str in ['male', 'female', 'unknown']), "age": (年龄: int) }
:rtype: dict[ str, int | str ]
------------
======== ========= ======================================
响应数据
-----------------------------------------------------------
数据类型 字段名 说明
======== ========= ======================================
int user_id QQ 号
str nickname 昵称
str sex 性别,`male` 或 `female` 或 `unknown`
int age 年龄
======== ========= ======================================
|
def set_file_type(self, doc, type_value):
"""
Raises OrderError if no package or file defined.
Raises CardinalityError if more than one type set.
Raises SPDXValueError if type is unknown.
"""
type_dict = {
'SOURCE': file.FileType.SOURCE,
'BINARY': file.FileType.BINARY,
'ARCHIVE': file.FileType.ARCHIVE,
'OTHER': file.FileType.OTHER
}
if self.has_package(doc) and self.has_file(doc):
if not self.file_type_set:
self.file_type_set = True
if type_value in type_dict.keys():
self.file(doc).type = type_dict[type_value]
return True
else:
raise SPDXValueError('File::Type')
else:
raise CardinalityError('File::Type')
else:
raise OrderError('File::Type')
|
Raises OrderError if no package or file defined.
Raises CardinalityError if more than one type set.
Raises SPDXValueError if type is unknown.
|
def mouse_event(dwFlags: int, dx: int, dy: int, dwData: int, dwExtraInfo: int) -> None:
"""mouse_event from Win32."""
ctypes.windll.user32.mouse_event(dwFlags, dx, dy, dwData, dwExtraInfo)
|
mouse_event from Win32.
|
def _sample(probability_vec):
"""Return random binary string, with given probabilities."""
return map(int,
numpy.random.random(probability_vec.size) <= probability_vec)
|
Return random binary string, with given probabilities.
|
def cprint(message, status=None):
"""color printing based on status:
None -> BRIGHT
'ok' -> GREEN
'err' -> RED
'warn' -> YELLOW
"""
# TODO use less obscure dict, probably "error", "warn", "success" as keys
status = {'warn': Fore.YELLOW, 'err': Fore.RED,
'ok': Fore.GREEN, None: Style.BRIGHT}[status]
print(status + message + Style.RESET_ALL)
|
color printing based on status:
None -> BRIGHT
'ok' -> GREEN
'err' -> RED
'warn' -> YELLOW
|
def three_way_information_gain(W, X, Y, Z, base=2):
"""Calculates the three-way information gain between three variables, I(W;X;Y;Z), in the given base
IG(W;X;Y;Z) indicates the information gained about variable Z by the joint variable W_X_Y, after removing
the information that W, X, and Y have about Z individually and jointly in pairs. Thus, 3-way information gain
measures the synergistic predictive value of variables W, X, and Y about variable Z.
Parameters
----------
W: array-like (# samples)
An array of values for which to compute the 3-way information gain
X: array-like (# samples)
An array of values for which to compute the 3-way information gain
Y: array-like (# samples)
An array of values for which to compute the 3-way information gain
Z: array-like (# samples)
An array of outcome values for which to compute the 3-way information gain
base: integer (default: 2)
The base in which to calculate 3-way information
Returns
----------
mutual_information: float
The information gain calculated according to the equation:
IG(W;X;Y;Z) = I(W,X,Y;Z) - IG(W;X;Z) - IG(W;Y;Z) - IG(X;Y;Z) - I(W;Z) - I(X;Z) - I(Y;Z)
"""
W_X_Y = ['{}{}{}'.format(w, x, y) for w, x, y in zip(W, X, Y)]
return (mutual_information(W_X_Y, Z, base=base) -
two_way_information_gain(W, X, Z, base=base) -
two_way_information_gain(W, Y, Z, base=base) -
two_way_information_gain(X, Y, Z, base=base) -
mutual_information(W, Z, base=base) -
mutual_information(X, Z, base=base) -
mutual_information(Y, Z, base=base))
|
Calculates the three-way information gain between three variables, I(W;X;Y;Z), in the given base
IG(W;X;Y;Z) indicates the information gained about variable Z by the joint variable W_X_Y, after removing
the information that W, X, and Y have about Z individually and jointly in pairs. Thus, 3-way information gain
measures the synergistic predictive value of variables W, X, and Y about variable Z.
Parameters
----------
W: array-like (# samples)
An array of values for which to compute the 3-way information gain
X: array-like (# samples)
An array of values for which to compute the 3-way information gain
Y: array-like (# samples)
An array of values for which to compute the 3-way information gain
Z: array-like (# samples)
An array of outcome values for which to compute the 3-way information gain
base: integer (default: 2)
The base in which to calculate 3-way information
Returns
----------
mutual_information: float
The information gain calculated according to the equation:
IG(W;X;Y;Z) = I(W,X,Y;Z) - IG(W;X;Z) - IG(W;Y;Z) - IG(X;Y;Z) - I(W;Z) - I(X;Z) - I(Y;Z)
|
def do(self, params):
"""发起对 api 的请求并过滤返回结果
:param params: 交易所需的动态参数"""
request_params = self.create_basic_params()
request_params.update(params)
response_data = self.request(request_params)
try:
format_json_data = self.format_response_data(response_data)
# pylint: disable=broad-except
except Exception:
# Caused by server force logged out
return None
return_data = self.fix_error_data(format_json_data)
try:
self.check_login_status(return_data)
except exceptions.NotLoginError:
self.autologin()
return return_data
|
发起对 api 的请求并过滤返回结果
:param params: 交易所需的动态参数
|
async def on_isupport_maxlist(self, value):
""" Limits on channel modes involving lists. """
self._list_limits = {}
for entry in value.split(','):
modes, limit = entry.split(':')
# Assign limit to mode group and add lookup entry for mode.
self._list_limits[frozenset(modes)] = int(limit)
for mode in modes:
self._list_limit_groups[mode] = frozenset(modes)
|
Limits on channel modes involving lists.
|
def bartlett(timeseries, segmentlength, noverlap=None, window=None, plan=None):
# pylint: disable=unused-argument
"""Calculate an PSD of this `TimeSeries` using Bartlett's method
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
input `TimeSeries` data.
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
window : `tuple`, `str`, optional
window parameters to apply to timeseries prior to FFT
plan : `REAL8FFTPlan`, optional
LAL FFT plan to use when generating average spectrum
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
See also
--------
lal.REAL8AverageSpectrumWelch
"""
return _lal_spectrum(timeseries, segmentlength, noverlap=0,
method='welch', window=window, plan=plan)
|
Calculate an PSD of this `TimeSeries` using Bartlett's method
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
input `TimeSeries` data.
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
window : `tuple`, `str`, optional
window parameters to apply to timeseries prior to FFT
plan : `REAL8FFTPlan`, optional
LAL FFT plan to use when generating average spectrum
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
See also
--------
lal.REAL8AverageSpectrumWelch
|
def fit_partial(
self,
interactions,
user_features=None,
item_features=None,
sample_weight=None,
epochs=1,
num_threads=1,
verbose=False,
):
"""
Fit the model.
Fit the model. Unlike fit, repeated calls to this method will
cause training to resume from the current model state.
For details on how to use feature matrices, see the documentation
on the :class:`lightfm.LightFM` class.
Arguments
---------
interactions: np.float32 coo_matrix of shape [n_users, n_items]
the matrix containing
user-item interactions. Will be converted to
numpy.float32 dtype if it is not of that type.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
sample_weight: np.float32 coo_matrix of shape [n_users, n_items], optional
matrix with entries expressing weights of individual
interactions from the interactions matrix.
Its row and col arrays must be the same as
those of the interactions matrix. For memory
efficiency its possible to use the same arrays
for both weights and interaction matrices.
Defaults to weight 1.0 for all interactions.
Not implemented for the k-OS loss.
epochs: int, optional
number of epochs to run
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
verbose: bool, optional
whether to print progress messages.
If `tqdm` is installed, a progress bar will be displayed instead.
Returns
-------
LightFM instance
the fitted model
"""
# We need this in the COO format.
# If that's already true, this is a no-op.
interactions = interactions.tocoo()
if interactions.dtype != CYTHON_DTYPE:
interactions.data = interactions.data.astype(CYTHON_DTYPE)
sample_weight_data = self._process_sample_weight(interactions, sample_weight)
n_users, n_items = interactions.shape
(user_features, item_features) = self._construct_feature_matrices(
n_users, n_items, user_features, item_features
)
for input_data in (
user_features.data,
item_features.data,
interactions.data,
sample_weight_data,
):
self._check_input_finite(input_data)
if self.item_embeddings is None:
# Initialise latent factors only if this is the first call
# to fit_partial.
self._initialize(
self.no_components, item_features.shape[1], user_features.shape[1]
)
# Check that the dimensionality of the feature matrices has
# not changed between runs.
if not item_features.shape[1] == self.item_embeddings.shape[0]:
raise ValueError("Incorrect number of features in item_features")
if not user_features.shape[1] == self.user_embeddings.shape[0]:
raise ValueError("Incorrect number of features in user_features")
if num_threads < 1:
raise ValueError("Number of threads must be 1 or larger.")
for _ in self._progress(epochs, verbose=verbose):
self._run_epoch(
item_features,
user_features,
interactions,
sample_weight_data,
num_threads,
self.loss,
)
self._check_finite()
return self
|
Fit the model.
Fit the model. Unlike fit, repeated calls to this method will
cause training to resume from the current model state.
For details on how to use feature matrices, see the documentation
on the :class:`lightfm.LightFM` class.
Arguments
---------
interactions: np.float32 coo_matrix of shape [n_users, n_items]
the matrix containing
user-item interactions. Will be converted to
numpy.float32 dtype if it is not of that type.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
sample_weight: np.float32 coo_matrix of shape [n_users, n_items], optional
matrix with entries expressing weights of individual
interactions from the interactions matrix.
Its row and col arrays must be the same as
those of the interactions matrix. For memory
efficiency its possible to use the same arrays
for both weights and interaction matrices.
Defaults to weight 1.0 for all interactions.
Not implemented for the k-OS loss.
epochs: int, optional
number of epochs to run
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
verbose: bool, optional
whether to print progress messages.
If `tqdm` is installed, a progress bar will be displayed instead.
Returns
-------
LightFM instance
the fitted model
|
def table_create(self, remove_existing=False):
"""Creates all tables.
"""
for engine in self.engines():
tables = self._get_tables(engine, create_drop=True)
logger.info('Create all tables for %s', engine)
try:
self.metadata.create_all(engine, tables=tables)
except Exception as exc:
raise
|
Creates all tables.
|
def _to_rule(self, lark_rule):
"""Converts a lark rule, (lhs, rhs, callback, options), to a Rule."""
assert isinstance(lark_rule.origin, NT)
assert all(isinstance(x, Symbol) for x in lark_rule.expansion)
return Rule(
lark_rule.origin, lark_rule.expansion,
weight=lark_rule.options.priority if lark_rule.options and lark_rule.options.priority else 0,
alias=lark_rule)
|
Converts a lark rule, (lhs, rhs, callback, options), to a Rule.
|
def import_libsvm_sparse(filename):
"""Imports dataset file in libsvm sparse format"""
from sklearn.datasets import load_svmlight_file
X, y = load_svmlight_file(filename)
return Dataset(X.toarray().tolist(), y.tolist())
|
Imports dataset file in libsvm sparse format
|
def random_forest(self):
""" Random Forest.
This function runs random forest and stores the,
1. Model
2. Model name
3. Max score
4. Metrics
"""
model = RandomForestRegressor(random_state=42)
scores = []
kfold = KFold(n_splits=self.cv, shuffle=True, random_state=42)
for i, (train, test) in enumerate(kfold.split(self.baseline_in, self.baseline_out)):
model.fit(self.baseline_in.iloc[train], self.baseline_out.iloc[train])
scores.append(model.score(self.baseline_in.iloc[test], self.baseline_out.iloc[test]))
mean_score = np.mean(scores)
self.models.append(model)
self.model_names.append('Random Forest Regressor')
self.max_scores.append(mean_score)
self.metrics['Random Forest Regressor'] = {}
self.metrics['Random Forest Regressor']['R2'] = mean_score
self.metrics['Random Forest Regressor']['Adj R2'] = self.adj_r2(mean_score, self.baseline_in.shape[0], self.baseline_in.shape[1])
|
Random Forest.
This function runs random forest and stores the,
1. Model
2. Model name
3. Max score
4. Metrics
|
def emit(self, record, closed=False):
'''Do nothing'''
HierarchicalOutput.emit(self, record, closed)
|
Do nothing
|
def model_eval(sess, x, y, predictions, X_test=None, Y_test=None,
feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
global _model_eval_cache
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument "
"must be supplied.")
# Define accuracy symbolically
key = (y, predictions)
if key in _model_eval_cache:
correct_preds = _model_eval_cache[key]
else:
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
_model_eval_cache[key] = correct_preds
# Init result var
accuracy = 0.0
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
feed_dict = {x: X_cur, y: Y_cur}
if feed is not None:
feed_dict.update(feed)
cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
accuracy += cur_corr_preds[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
|
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
|
def run_false_positive_experiment_dim(
numActive = 128,
dim = 500,
numSamples = 1000,
numDendrites = 500,
synapses = 24,
numTrials = 10000,
seed = 42,
nonlinearity = sigmoid_nonlinearity(11.5, 5)):
"""
Run an experiment to test the false positive rate based on number of synapses
per dendrite, dimension and sparsity. Uses two competing neurons, along the
P&M model.
Based on figure 5B in the original SDR paper.
"""
numpy.random.seed(seed)
fps = []
fns = []
totalUnclassified = 0
for trial in range(numTrials):
# data = generate_evenly_distributed_data_sparse(dim = dim,
# num_active = numActive,
# num_samples = numSamples)
# labels = numpy.asarray([1 for i in range(numSamples / 2)] +
# [-1 for i in range(numSamples / 2)])
# flipped_labels = labels * -1
negData = generate_evenly_distributed_data_sparse(dim = dim,
num_active = numActive,
num_samples = numSamples/2)
posData = generate_evenly_distributed_data_sparse(dim = dim,
num_active = numActive,
num_samples = numSamples/2)
halfLabels = numpy.asarray([1 for _ in range(numSamples / 2)])
flippedHalfLabels = halfLabels * -1
neuron = Neuron(size =synapses * numDendrites,
num_dendrites = numDendrites,
dendrite_length = synapses,
dim = dim, nonlinearity = nonlinearity)
neg_neuron = Neuron(size =synapses * numDendrites,
num_dendrites = numDendrites,
dendrite_length = synapses,
dim = dim, nonlinearity = nonlinearity)
neuron.HTM_style_initialize_on_positive_data(posData)
neg_neuron.HTM_style_initialize_on_positive_data(negData)
# Get error for positively labeled data
fp, fn, uc = get_error(posData, halfLabels, [neuron], [neg_neuron])
totalUnclassified += uc
fps.append(fp)
fns.append(fn)
# Get error for negatively labeled data
fp, fn, uc = get_error(negData, flippedHalfLabels, [neuron], [neg_neuron])
totalUnclassified += uc
fps.append(fp)
fns.append(fn)
print "Error with n = {} : {} FP, {} FN, {} unclassified".format(
dim, sum(fps), sum(fns), totalUnclassified)
result = {
"dim": dim,
"totalFP": sum(fps),
"totalFN": sum(fns),
"total mistakes": sum(fns + fps) + totalUnclassified,
"error": float(sum(fns + fps) + totalUnclassified) / (numTrials * numSamples),
"totalSamples": numTrials * numSamples,
"a": numActive,
"num_dendrites": numDendrites,
"totalUnclassified": totalUnclassified,
"synapses": 24,
"seed": seed,
}
return result
|
Run an experiment to test the false positive rate based on number of synapses
per dendrite, dimension and sparsity. Uses two competing neurons, along the
P&M model.
Based on figure 5B in the original SDR paper.
|
def is_multidex(self):
"""
Test if the APK has multiple DEX files
:return: True if multiple dex found, otherwise False
"""
dexre = re.compile("^classes(\d+)?.dex$")
return len([instance for instance in self.get_files() if dexre.search(instance)]) > 1
|
Test if the APK has multiple DEX files
:return: True if multiple dex found, otherwise False
|
def scrape(self, selector, cleaner=None, processor=None):
"""Scrape the value for this field from the selector."""
# Apply CSS or XPath expression to the selector
selected = selector.xpath(self.selection) if self.xpath else selector.css(self.selection)
# Extract the value and apply regular expression if specified
value = selected.re(self.re) if self.re else selected.extract(raw=self.raw, cleaner=cleaner)
return self._post_scrape(value, processor=processor)
|
Scrape the value for this field from the selector.
|
def load_gettext_translations(directory: str, domain: str) -> None:
"""Loads translations from `gettext`'s locale tree
Locale tree is similar to system's ``/usr/share/locale``, like::
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have your app translated:
1. Generate POT translation file::
xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
2. Merge against existing POT file::
msgmerge old.po mydomain.po > new.po
3. Compile::
msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
"""
global _translations
global _supported_locales
global _use_gettext
_translations = {}
for lang in os.listdir(directory):
if lang.startswith("."):
continue # skip .svn, etc
if os.path.isfile(os.path.join(directory, lang)):
continue
try:
os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo"))
_translations[lang] = gettext.translation(
domain, directory, languages=[lang]
)
except Exception as e:
gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
continue
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
_use_gettext = True
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
|
Loads translations from `gettext`'s locale tree
Locale tree is similar to system's ``/usr/share/locale``, like::
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have your app translated:
1. Generate POT translation file::
xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
2. Merge against existing POT file::
msgmerge old.po mydomain.po > new.po
3. Compile::
msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
|
def add_grad(left, right):
"""Recursively add the gradient of two objects.
Args:
left: The left value to add. Can be either an array, a number, list or
dictionary.
right: The right value. Must be of the same type (recursively) as the left.
Returns:
The sum of the two gradients, which will of the same type.
"""
# We assume that initial gradients are always identity WRT add_grad.
# We also assume that only init_grad could have created None values.
assert left is not None and right is not None
left_type = type(left)
right_type = type(right)
if left_type is ZeroGradient:
return right
if right_type is ZeroGradient:
return left
return grad_adders[(left_type, right_type)](left, right)
|
Recursively add the gradient of two objects.
Args:
left: The left value to add. Can be either an array, a number, list or
dictionary.
right: The right value. Must be of the same type (recursively) as the left.
Returns:
The sum of the two gradients, which will of the same type.
|
def refine_cell(self, tilde_obj):
'''
NB only used for perovskite_tilting app
'''
try: lattice, positions, numbers = spg.refine_cell(tilde_obj['structures'][-1], symprec=self.accuracy, angle_tolerance=self.angle_tolerance)
except Exception as ex:
self.error = 'Symmetry finder error: %s' % ex
else:
self.refinedcell = Atoms(numbers=numbers, cell=lattice, scaled_positions=positions, pbc=tilde_obj['structures'][-1].get_pbc())
self.refinedcell.periodicity = sum(self.refinedcell.get_pbc())
self.refinedcell.dims = abs(det(tilde_obj['structures'][-1].cell))
|
NB only used for perovskite_tilting app
|
def iresolve(self, *keys):
'''
Iterates over resolved instances for given provider keys.
:param keys: Provider keys
:type keys: tuple
:return: Iterator of resolved instances
:rtype: generator
'''
for key in keys:
missing = self.get_missing_deps(key)
if missing:
raise UnresolvableError("Missing dependencies for %s: %s" % (key, missing))
provider = self._providers.get(key)
if not provider:
raise UnresolvableError("Provider does not exist for %s" % key)
yield provider()
|
Iterates over resolved instances for given provider keys.
:param keys: Provider keys
:type keys: tuple
:return: Iterator of resolved instances
:rtype: generator
|
def _read_from_socket(self):
"""Read data from the socket.
:rtype: bytes
"""
if not self.use_ssl:
if not self.socket:
raise socket.error('connection/socket error')
return self.socket.recv(MAX_FRAME_SIZE)
with self._rd_lock:
if not self.socket:
raise socket.error('connection/socket error')
return self.socket.read(MAX_FRAME_SIZE)
|
Read data from the socket.
:rtype: bytes
|
def run(uri,
user_entry_point,
args,
env_vars=None,
wait=True,
capture_error=False,
runner=_runner.ProcessRunnerType,
extra_opts=None):
# type: (str, str, List[str], Dict[str, str], bool, bool, _runner.RunnerType, Dict[str, str]) -> None
"""Download, prepare and executes a compressed tar file from S3 or provided directory as an user
entrypoint. Runs the user entry point, passing env_vars as environment variables and args as command
arguments.
If the entry point is:
- A Python package: executes the packages as >>> env_vars python -m module_name + args
- A Python script: executes the script as >>> env_vars python module_name + args
- Any other: executes the command as >>> env_vars /bin/sh -c ./module_name + args
Example:
>>>import sagemaker_containers
>>>from sagemaker_containers.beta.framework import entry_point
>>>env = sagemaker_containers.training_env()
{'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...}
>>>hyperparameters = env.hyperparameters
{'batch-size': 128, 'model_dir': '/opt/ml/model'}
>>>args = mapping.to_cmd_args(hyperparameters)
['--batch-size', '128', '--model_dir', '/opt/ml/model']
>>>env_vars = mapping.to_env_vars()
['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training',
'MODEL_DIR':'/opt/ml/model', ...}
>>>entry_point.run('user_script', args, env_vars)
SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \
SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model
Args:
uri (str): the location of the module.
user_entry_point (str): name of the user provided entry point
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written (default: None).
wait (bool): If the user entry point should be run to completion before this method returns
(default: True).
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
runner (sagemaker_containers.beta.framework.runner.RunnerType): the type of runner object to
be created (default: sagemaker_containers.beta.framework.runner.ProcessRunnerType).
extra_opts (dict): Additional options for running the entry point (default: None).
Currently, this only applies for MPI.
Returns:
sagemaker_containers.beta.framework.process.ProcessRunner: the runner object responsible for
executing the entry point.
"""
env_vars = env_vars or {}
env_vars = env_vars.copy()
_files.download_and_extract(uri, user_entry_point, _env.code_dir)
install(user_entry_point, _env.code_dir, capture_error)
_env.write_env_vars(env_vars)
return _runner.get(runner, user_entry_point, args, env_vars, extra_opts).run(wait, capture_error)
|
Download, prepare and executes a compressed tar file from S3 or provided directory as an user
entrypoint. Runs the user entry point, passing env_vars as environment variables and args as command
arguments.
If the entry point is:
- A Python package: executes the packages as >>> env_vars python -m module_name + args
- A Python script: executes the script as >>> env_vars python module_name + args
- Any other: executes the command as >>> env_vars /bin/sh -c ./module_name + args
Example:
>>>import sagemaker_containers
>>>from sagemaker_containers.beta.framework import entry_point
>>>env = sagemaker_containers.training_env()
{'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...}
>>>hyperparameters = env.hyperparameters
{'batch-size': 128, 'model_dir': '/opt/ml/model'}
>>>args = mapping.to_cmd_args(hyperparameters)
['--batch-size', '128', '--model_dir', '/opt/ml/model']
>>>env_vars = mapping.to_env_vars()
['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training',
'MODEL_DIR':'/opt/ml/model', ...}
>>>entry_point.run('user_script', args, env_vars)
SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \
SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model
Args:
uri (str): the location of the module.
user_entry_point (str): name of the user provided entry point
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written (default: None).
wait (bool): If the user entry point should be run to completion before this method returns
(default: True).
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
runner (sagemaker_containers.beta.framework.runner.RunnerType): the type of runner object to
be created (default: sagemaker_containers.beta.framework.runner.ProcessRunnerType).
extra_opts (dict): Additional options for running the entry point (default: None).
Currently, this only applies for MPI.
Returns:
sagemaker_containers.beta.framework.process.ProcessRunner: the runner object responsible for
executing the entry point.
|
def load_json_from_string(string):
"""Load schema from JSON string"""
try:
json_data = json.loads(string)
except ValueError as e:
raise ValueError('Given string is not valid JSON: {}'.format(e))
else:
return json_data
|
Load schema from JSON string
|
def disconnect_pools(self):
"""Disconnects all connections from the internal pools."""
with self._lock:
for pool in self._pools.itervalues():
pool.disconnect()
self._pools.clear()
|
Disconnects all connections from the internal pools.
|
def chmod(path, mode, recursive=False):
"""Emulates bash chmod command
This method sets the file permissions to the specified mode.
:param path: (str) Full path to the file or directory
:param mode: (str) Mode to be set (e.g. 0755)
:param recursive: (bool) Set True to make a recursive call
:return: int exit code of the chmod command
:raises CommandError
"""
log = logging.getLogger(mod_logger + '.chmod')
# Validate args
if not isinstance(path, basestring):
msg = 'path argument is not a string'
log.error(msg)
raise CommandError(msg)
if not isinstance(mode, basestring):
msg = 'mode argument is not a string'
log.error(msg)
raise CommandError(msg)
# Ensure the item exists
if not os.path.exists(path):
msg = 'Item not found: {p}'.format(p=path)
log.error(msg)
raise CommandError(msg)
# Create the chmod command
command = ['chmod']
# Make it recursive if specified
if recursive:
command.append('-R')
command.append(mode)
command.append(path)
try:
result = run_command(command)
except CommandError:
raise
log.info('chmod command exited with code: {c}'.format(c=result['code']))
return result['code']
|
Emulates bash chmod command
This method sets the file permissions to the specified mode.
:param path: (str) Full path to the file or directory
:param mode: (str) Mode to be set (e.g. 0755)
:param recursive: (bool) Set True to make a recursive call
:return: int exit code of the chmod command
:raises CommandError
|
def handle_get_passphrase(self, conn, _):
"""Allow simple GPG symmetric encryption (using a passphrase)."""
p1 = self.client.device.ui.get_passphrase('Symmetric encryption:')
p2 = self.client.device.ui.get_passphrase('Re-enter encryption:')
if p1 == p2:
result = b'D ' + util.assuan_serialize(p1.encode('ascii'))
keyring.sendline(conn, result, confidential=True)
else:
log.warning('Passphrase does not match!')
|
Allow simple GPG symmetric encryption (using a passphrase).
|
def _as_array(arr, dtype=None):
"""Convert an object to a numerical NumPy array.
Avoid a copy if possible.
"""
if arr is None:
return None
if isinstance(arr, np.ndarray) and dtype is None:
return arr
if isinstance(arr, integer_types + (float,)):
arr = [arr]
out = np.asarray(arr)
if dtype is not None:
if out.dtype != dtype:
out = out.astype(dtype)
if out.dtype not in _ACCEPTED_ARRAY_DTYPES:
raise ValueError("'arr' seems to have an invalid dtype: "
"{0:s}".format(str(out.dtype)))
return out
|
Convert an object to a numerical NumPy array.
Avoid a copy if possible.
|
def _parse_req(requnit, reqval):
''' Parse a non-day fixed value '''
assert reqval[0] != '='
try:
retn = []
for val in reqval.split(','):
if requnit == 'month':
if reqval[0].isdigit():
retn.append(int(reqval)) # must be a month (1-12)
else:
try:
retn.append(list(calendar.month_abbr).index(val.title()))
except ValueError:
retn.append(list(calendar.month_name).index(val.title()))
else:
retn.append(int(val))
except ValueError:
return None
if not retn:
return None
return retn[0] if len(retn) == 1 else retn
|
Parse a non-day fixed value
|
def cp_als(X, rank, random_state=None, init='randn', **options):
"""Fits CP Decomposition using Alternating Least Squares (ALS).
Parameters
----------
X : (I_1, ..., I_N) array_like
A tensor with ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
random_state : integer, ``RandomState``, or ``None``, optional (default ``None``)
If integer, sets the seed of the random number generator;
If RandomState instance, random_state is the random number generator;
If None, use the RandomState instance used by ``numpy.random``.
init : str, or KTensor, optional (default ``'randn'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
Alternating Least Squares (ALS) is a very old and reliable method for
fitting CP decompositions. This is likely a good first algorithm to try.
References
----------
Kolda, T. G. & Bader, B. W.
"Tensor Decompositions and Applications."
SIAM Rev. 51 (2009): 455-500
http://epubs.siam.org/doi/pdf/10.1137/07070111X
Comon, Pierre & Xavier Luciani & Andre De Almeida.
"Tensor decompositions, alternating least squares and other tales."
Journal of chemometrics 23 (2009): 393-405.
http://onlinelibrary.wiley.com/doi/10.1002/cem.1236/abstract
Examples
--------
```
import tensortools as tt
I, J, K, R = 20, 20, 20, 4
X = tt.randn_tensor(I, J, K, rank=R)
tt.cp_als(X, rank=R)
```
"""
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'CP_ALS', **options)
# Main optimization loop.
while result.still_optimizing:
# Iterate over each tensor mode.
for n in range(X.ndim):
# i) Normalize factors to prevent singularities.
U.rebalance()
# ii) Compute the N-1 gram matrices.
components = [U[j] for j in range(X.ndim) if j != n]
grams = sci.multiply.reduce([sci.dot(u.T, u) for u in components])
# iii) Compute Khatri-Rao product.
kr = khatri_rao(components)
# iv) Form normal equations and solve via Cholesky
c = linalg.cho_factor(grams, overwrite_a=False)
p = unfold(X, n).dot(kr)
U[n] = linalg.cho_solve(c, p.T, overwrite_b=False).T
# U[n] = linalg.solve(grams, unfold(X, n).dot(kr).T).T
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[-1].T.dot(U[-1])
# obj = np.sqrt(np.sum(grams) - 2*sci.sum(p*U[-1]) + normX**2) / normX
obj = linalg.norm(U.full() - X) / normX
# Update result
result.update(obj)
# Finalize and return the optimization result.
return result.finalize()
|
Fits CP Decomposition using Alternating Least Squares (ALS).
Parameters
----------
X : (I_1, ..., I_N) array_like
A tensor with ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
random_state : integer, ``RandomState``, or ``None``, optional (default ``None``)
If integer, sets the seed of the random number generator;
If RandomState instance, random_state is the random number generator;
If None, use the RandomState instance used by ``numpy.random``.
init : str, or KTensor, optional (default ``'randn'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
Alternating Least Squares (ALS) is a very old and reliable method for
fitting CP decompositions. This is likely a good first algorithm to try.
References
----------
Kolda, T. G. & Bader, B. W.
"Tensor Decompositions and Applications."
SIAM Rev. 51 (2009): 455-500
http://epubs.siam.org/doi/pdf/10.1137/07070111X
Comon, Pierre & Xavier Luciani & Andre De Almeida.
"Tensor decompositions, alternating least squares and other tales."
Journal of chemometrics 23 (2009): 393-405.
http://onlinelibrary.wiley.com/doi/10.1002/cem.1236/abstract
Examples
--------
```
import tensortools as tt
I, J, K, R = 20, 20, 20, 4
X = tt.randn_tensor(I, J, K, rank=R)
tt.cp_als(X, rank=R)
```
|
def execute(self, **minimize_options):
"""
Execute the fit.
:param minimize_options: keyword arguments to be passed to the specified
minimizer.
:return: FitResults instance
"""
minimizer_ans = self.minimizer.execute(**minimize_options)
try: # to build covariance matrix
cov_matrix = minimizer_ans.covariance_matrix
except AttributeError:
cov_matrix = self.covariance_matrix(dict(zip(self.model.params, minimizer_ans._popt)))
else:
if cov_matrix is None:
cov_matrix = self.covariance_matrix(dict(zip(self.model.params, minimizer_ans._popt)))
finally:
minimizer_ans.covariance_matrix = cov_matrix
# Overwrite the DummyModel with the current model
minimizer_ans.model = self.model
minimizer_ans.gof_qualifiers['r_squared'] = r_squared(self.model, minimizer_ans, self.data)
return minimizer_ans
|
Execute the fit.
:param minimize_options: keyword arguments to be passed to the specified
minimizer.
:return: FitResults instance
|
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
|
Starts the web UI.
|
def get_url(pif, dataset, version=1, site="https://citrination.com"):
"""
Construct the URL of a PIF on a site
:param pif: to construct URL for
:param dataset: the pif will belong to
:param version: of the PIF (default: 1)
:param site: for the dataset (default: https://citrination.com)
:return: the URL as a string
"""
return "{site}/datasets/{dataset}/version/{version}/pif/{uid}".format(
uid=pif.uid, version=version, dataset=dataset, site=site
)
|
Construct the URL of a PIF on a site
:param pif: to construct URL for
:param dataset: the pif will belong to
:param version: of the PIF (default: 1)
:param site: for the dataset (default: https://citrination.com)
:return: the URL as a string
|
def _adapt_WSDateTime(dt):
"""Return unix timestamp of the datetime like input.
If conversion overflows high, return sint64_max ,
if underflows, return 0
"""
try:
ts = int(
(dt.replace(tzinfo=pytz.utc)
- datetime(1970,1,1,tzinfo=pytz.utc)
).total_seconds()
)
except (OverflowError,OSError):
if dt < datetime.now():
ts = 0
else:
ts = 2**63-1
return ts
|
Return unix timestamp of the datetime like input.
If conversion overflows high, return sint64_max ,
if underflows, return 0
|
def _build_tpm(tpm):
"""Validate the TPM passed by the user and convert to multidimensional
form.
"""
tpm = np.array(tpm)
validate.tpm(tpm)
# Convert to multidimensional state-by-node form
if is_state_by_state(tpm):
tpm = convert.state_by_state2state_by_node(tpm)
else:
tpm = convert.to_multidimensional(tpm)
utils.np_immutable(tpm)
return (tpm, utils.np_hash(tpm))
|
Validate the TPM passed by the user and convert to multidimensional
form.
|
def getWeights(self, fromName, toName):
"""
Gets the weights of the connection between two layers (argument strings).
"""
for connection in self.connections:
if connection.fromLayer.name == fromName and \
connection.toLayer.name == toName:
return connection.weight
raise NetworkError('Connection was not found.', (fromName, toName))
|
Gets the weights of the connection between two layers (argument strings).
|
def add_voice(self, voices, item):
""" Adds a voice to the list
"""
voice = None
if item.get('type') == 'title':
voice = self.get_title_voice(item)
elif item.get('type') == 'app':
voice = self.get_app_voice(item)
elif item.get('type') == 'model':
voice = self.get_app_model_voice(item)
elif item.get('type') == 'free':
voice = self.get_free_voice(item)
if voice:
voices.append(voice)
|
Adds a voice to the list
|
def print_png(o):
"""
A function to display sympy expression using inline style LaTeX in PNG.
"""
s = latex(o, mode='inline')
# mathtext does not understand certain latex flags, so we try to replace
# them with suitable subs.
s = s.replace('\\operatorname','')
s = s.replace('\\overline', '\\bar')
png = latex_to_png(s)
return png
|
A function to display sympy expression using inline style LaTeX in PNG.
|
def slice_orthogonal(dataset, x=None, y=None, z=None,
generate_triangles=False, contour=False):
"""Creates three orthogonal slices through the dataset on the three
caresian planes. Yields a MutliBlock dataset of the three slices
Parameters
----------
x : float
The X location of the YZ slice
y : float
The Y location of the XZ slice
z : float
The Z location of the XY slice
generate_triangles: bool, optional
If this is enabled (``False`` by default), the output will be
triangles otherwise, the output will be the intersection polygons.
contour : bool, optional
If True, apply a ``contour`` filter after slicing
"""
output = vtki.MultiBlock()
# Create the three slices
if x is None:
x = dataset.center[0]
if y is None:
y = dataset.center[1]
if z is None:
z = dataset.center[2]
output[0, 'YZ'] = dataset.slice(normal='x', origin=[x,y,z], generate_triangles=generate_triangles)
output[1, 'XZ'] = dataset.slice(normal='y', origin=[x,y,z], generate_triangles=generate_triangles)
output[2, 'XY'] = dataset.slice(normal='z', origin=[x,y,z], generate_triangles=generate_triangles)
return output
|
Creates three orthogonal slices through the dataset on the three
caresian planes. Yields a MutliBlock dataset of the three slices
Parameters
----------
x : float
The X location of the YZ slice
y : float
The Y location of the XZ slice
z : float
The Z location of the XY slice
generate_triangles: bool, optional
If this is enabled (``False`` by default), the output will be
triangles otherwise, the output will be the intersection polygons.
contour : bool, optional
If True, apply a ``contour`` filter after slicing
|
def deregister_all(self, *events):
"""
Deregisters all handler functions, or those registered against the given event(s).
"""
if events:
for event in events:
self._handler_dict[event] = []
else:
self._handler_dict = {}
|
Deregisters all handler functions, or those registered against the given event(s).
|
def update_vm_result(self, context, msg):
"""Update VM's result field in the DB.
The result reflects the success of failure of operation when an
agent processes the vm info.
"""
args = jsonutils.loads(msg)
agent = context.get('agent')
port_id = args.get('port_uuid')
result = args.get('result')
LOG.debug('update_vm_result received from %(agent)s: '
'%(port_id)s %(result)s', {'agent': agent,
'port_id': port_id,
'result': result})
# Add the request into queue for processing.
event_type = 'agent.vm_result.update'
payload = {'port_id': port_id, 'result': result}
timestamp = time.ctime()
data = (event_type, payload)
# TODO(nlahouti) use value defined in constants
pri = self.obj.PRI_LOW_START + 10
self.obj.pqueue.put((pri, timestamp, data))
LOG.debug('Added request vm result update into queue.')
return 0
|
Update VM's result field in the DB.
The result reflects the success of failure of operation when an
agent processes the vm info.
|
def getCustomDict(cls):
""" Returns a dict of all temporary values in custom configuration file
"""
if not os.path.exists(cls.getPath()):
return dict()
properties = Configuration._readConfigFile(os.path.basename(
cls.getPath()), os.path.dirname(cls.getPath()))
values = dict()
for propName in properties:
if 'value' in properties[propName]:
values[propName] = properties[propName]['value']
return values
|
Returns a dict of all temporary values in custom configuration file
|
def get_update(self, z=None):
"""
Computes the new estimate based on measurement `z` and returns it
without altering the state of the filter.
Parameters
----------
z : (dim_z, 1): array_like
measurement for this update. z can be a scalar if dim_z is 1,
otherwise it must be convertible to a column vector.
Returns
-------
(x, P) : tuple
State vector and covariance array of the update.
"""
if z is None:
return self.x, self.P
z = reshape_z(z, self.dim_z, self.x.ndim)
R = self.R
H = self.H
P = self.P
x = self.x
# error (residual) between measurement and prediction
y = z - dot(H, x)
# common subexpression for speed
PHT = dot(P, H.T)
# project system uncertainty into measurement space
S = dot(H, PHT) + R
# map system uncertainty into kalman gain
K = dot(PHT, self.inv(S))
# predict new x with residual scaled by the kalman gain
x = x + dot(K, y)
# P = (I-KH)P(I-KH)' + KRK'
I_KH = self._I - dot(K, H)
P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)
return x, P
|
Computes the new estimate based on measurement `z` and returns it
without altering the state of the filter.
Parameters
----------
z : (dim_z, 1): array_like
measurement for this update. z can be a scalar if dim_z is 1,
otherwise it must be convertible to a column vector.
Returns
-------
(x, P) : tuple
State vector and covariance array of the update.
|
def request(self, url,
json="",
data="",
username="",
password="",
headers=None,
timout=30):
"""This is overridden on module initialization. This function will make
an HTTP POST to a given url. Either json/data will be what is posted to
the end point. he HTTP request needs to be basicAuth when username and
password are provided. a headers dict maybe provided,
whatever the values are should be applied.
Args:
url (str): url to send the POST
json (dict, optional): Dict of the JSON to POST
data (dict, optional): Dict, presumed flat structure of
key/value of request to place as
www-form
username (str, optional): Username for basic auth. Must be
uncluded as part of password.
password (str, optional): Password for basic auth. Must be
included as part of username.
headers (dict, optional): Key/Value pairs of headers to include
Returns:
str: Raw request placed
str: Raw response received
int: HTTP status code, eg 200,404,401
dict: Key/Value pairs of the headers received.
:param timout:
"""
raise NotImplementedError('request of HTTPClient should have been '
'overridden on initialization. '
'Otherwise, can be overridden to '
'supply your own post method')
|
This is overridden on module initialization. This function will make
an HTTP POST to a given url. Either json/data will be what is posted to
the end point. he HTTP request needs to be basicAuth when username and
password are provided. a headers dict maybe provided,
whatever the values are should be applied.
Args:
url (str): url to send the POST
json (dict, optional): Dict of the JSON to POST
data (dict, optional): Dict, presumed flat structure of
key/value of request to place as
www-form
username (str, optional): Username for basic auth. Must be
uncluded as part of password.
password (str, optional): Password for basic auth. Must be
included as part of username.
headers (dict, optional): Key/Value pairs of headers to include
Returns:
str: Raw request placed
str: Raw response received
int: HTTP status code, eg 200,404,401
dict: Key/Value pairs of the headers received.
:param timout:
|
def interlink_translated_content(generator):
'''Make translations link to the native locations
for generators that may contain translated content
'''
inspector = GeneratorInspector(generator)
for content in inspector.all_contents():
interlink_translations(content)
|
Make translations link to the native locations
for generators that may contain translated content
|
def count_subgraph_sizes(graph: BELGraph, annotation: str = 'Subgraph') -> Counter[int]:
"""Count the number of nodes in each subgraph induced by an annotation.
:param annotation: The annotation to group by and compare. Defaults to 'Subgraph'
:return: A dictionary from {annotation value: number of nodes}
"""
return count_dict_values(group_nodes_by_annotation(graph, annotation))
|
Count the number of nodes in each subgraph induced by an annotation.
:param annotation: The annotation to group by and compare. Defaults to 'Subgraph'
:return: A dictionary from {annotation value: number of nodes}
|
def start_engine(self):
'''
Start the child processes (one per device OS)
'''
if self.disable_security is True:
log.warning('***Not starting the authenticator process due to disable_security being set to True***')
else:
log.debug('Generating the private key')
self.__priv_key = nacl.utils.random(nacl.secret.SecretBox.KEY_SIZE)
log.debug('Generating the signing key')
self.__signing_key = nacl.signing.SigningKey.generate()
# start the keepalive thread for the auth sub-process
self._processes.append(self._start_auth_proc())
log.debug('Starting the internal proxy')
proc = self._start_pub_px_proc()
self._processes.append(proc)
# publisher process start
pub_id = 0
for pub in self.publisher:
publisher_type, publisher_opts = list(pub.items())[0]
proc = self._start_pub_proc(publisher_type,
publisher_opts,
pub_id)
self._processes.append(proc)
pub_id += 1
# device process start
log.info('Starting child processes for each device type')
started_os_proc = []
for device_os, device_config in self.config_dict.items():
if not self._whitelist_blacklist(device_os):
log.debug('Not starting process for %s (whitelist-blacklist logic)', device_os)
# Ignore devices that are not in the whitelist (if defined),
# or those operating systems that are on the blacklist.
# This way we can prevent starting unwanted sub-processes.
continue
log.debug('Will start %d worker process(es) for %s', self.device_worker_processes, device_os)
for proc_index in range(self.device_worker_processes):
self._processes.append(self._start_dev_proc(device_os,
device_config))
started_os_proc.append(device_os)
# start the server process
self._processes.append(self._start_srv_proc(started_os_proc))
# start listener process
for lst in self.listener:
listener_type, listener_opts = list(lst.items())[0]
proc = self._start_lst_proc(listener_type,
listener_opts)
self._processes.append(proc)
thread = threading.Thread(target=self._check_children)
thread.start()
|
Start the child processes (one per device OS)
|
def trigger_show_by_id(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/triggers#getting-triggers"
api_path = "/api/v2/triggers/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs)
|
https://developer.zendesk.com/rest_api/docs/core/triggers#getting-triggers
|
def save_screenshot(driver, name, folder=None):
"""
Saves a screenshot to the current directory (or to a subfolder if provided)
If the folder provided doesn't exist, it will get created.
The screenshot will be in PNG format.
"""
if "." not in name:
name = name + ".png"
if folder:
abs_path = os.path.abspath('.')
file_path = abs_path + "/%s" % folder
if not os.path.exists(file_path):
os.makedirs(file_path)
screenshot_path = "%s/%s" % (file_path, name)
else:
screenshot_path = name
try:
element = driver.find_element_by_tag_name('body')
element_png = element.screenshot_as_png
with open(screenshot_path, "wb") as file:
file.write(element_png)
except Exception:
if driver:
driver.get_screenshot_as_file(screenshot_path)
else:
pass
|
Saves a screenshot to the current directory (or to a subfolder if provided)
If the folder provided doesn't exist, it will get created.
The screenshot will be in PNG format.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.