text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def create_hotkey(self, folder, description, modifiers, key, contents):
"""
Create a text hotkey
Usage: C{engine.create_hotkey(folder, description, modifiers, key, contents)}
When the given hotkey is pressed, it will be replaced with the given
text. Modifiers must be given as a list of strings, with the following
values permitted:
<ctrl>
<alt>
<super>
<hyper>
<meta>
<shift>
The key must be an unshifted character (i.e. lowercase)
@param folder: folder to place the abbreviation in, retrieved using C{engine.get_folder()}
@param description: description for the phrase
@param modifiers: modifiers to use with the hotkey (as a list)
@param key: the hotkey
@param contents: the expansion text
@raise Exception: if the specified hotkey is not unique
"""
modifiers.sort()
if not self.configManager.check_hotkey_unique(modifiers, key, None, None):
raise Exception("The specified hotkey and modifier combination is already in use")
self.monitor.suspend()
p = model.Phrase(description, contents)
p.modes.append(model.TriggerMode.HOTKEY)
p.set_hotkey(modifiers, key)
folder.add_item(p)
p.persist()
self.monitor.unsuspend()
self.configManager.config_altered(False) | [
"def",
"create_hotkey",
"(",
"self",
",",
"folder",
",",
"description",
",",
"modifiers",
",",
"key",
",",
"contents",
")",
":",
"modifiers",
".",
"sort",
"(",
")",
"if",
"not",
"self",
".",
"configManager",
".",
"check_hotkey_unique",
"(",
"modifiers",
",",
"key",
",",
"None",
",",
"None",
")",
":",
"raise",
"Exception",
"(",
"\"The specified hotkey and modifier combination is already in use\"",
")",
"self",
".",
"monitor",
".",
"suspend",
"(",
")",
"p",
"=",
"model",
".",
"Phrase",
"(",
"description",
",",
"contents",
")",
"p",
".",
"modes",
".",
"append",
"(",
"model",
".",
"TriggerMode",
".",
"HOTKEY",
")",
"p",
".",
"set_hotkey",
"(",
"modifiers",
",",
"key",
")",
"folder",
".",
"add_item",
"(",
"p",
")",
"p",
".",
"persist",
"(",
")",
"self",
".",
"monitor",
".",
"unsuspend",
"(",
")",
"self",
".",
"configManager",
".",
"config_altered",
"(",
"False",
")"
] | 37.526316 | 23.052632 |
def visitExponentExpression(self, ctx):
"""
expression: expression EXPONENT expression
"""
arg1 = conversions.to_decimal(self.visit(ctx.expression(0)), self._eval_context)
arg2 = conversions.to_decimal(self.visit(ctx.expression(1)), self._eval_context)
return conversions.to_decimal(decimal_pow(arg1, arg2), ctx) | [
"def",
"visitExponentExpression",
"(",
"self",
",",
"ctx",
")",
":",
"arg1",
"=",
"conversions",
".",
"to_decimal",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"0",
")",
")",
",",
"self",
".",
"_eval_context",
")",
"arg2",
"=",
"conversions",
".",
"to_decimal",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"1",
")",
")",
",",
"self",
".",
"_eval_context",
")",
"return",
"conversions",
".",
"to_decimal",
"(",
"decimal_pow",
"(",
"arg1",
",",
"arg2",
")",
",",
"ctx",
")"
] | 50.571429 | 19.142857 |
def _parse_sheet(workbook, sheet):
"""
The universal spreadsheet parser. Parse chron or paleo tables of type ensemble/model/summary.
:param str name: Filename
:param obj workbook: Excel Workbook
:param dict sheet: Sheet path and naming info
:return dict dict: Table metadata and numeric data
"""
logger_excel.info("enter parse_sheet: {}".format(sheet["old_name"]))
# Markers to track where we are on the sheet
ensemble_on = False
var_header_done = False
metadata_on = False
metadata_done = False
data_on = False
notes = False
# Open the sheet from the workbook
temp_sheet = workbook.sheet_by_name(sheet["old_name"])
filename = sheet["filename"]
# Store table metadata and numeric data separately
table_name = "{}DataTableName".format(sheet["paleo_chron"])
# Organize our root table data
table_metadata = OrderedDict()
table_metadata[table_name] = sheet["new_name"]
table_metadata['filename'] = filename
table_metadata['missingValue'] = 'nan'
if "ensemble" in sheet["new_name"]:
ensemble_on = True
# Store all CSV in here by rows
table_data = {filename: []}
# Master list of all column metadata
column_metadata = []
# Index tracks which cells are being parsed
num_col = 0
num_row = 0
nrows = temp_sheet.nrows
col_total = 0
# Tracks which "number" each metadata column is assigned
col_add_ct = 1
header_keys = []
variable_keys = []
variable_keys_lower = []
mv = ""
try:
# Loop for every row in the sheet
for i in range(0, nrows):
# Hold the contents of the current cell
cell = temp_sheet.cell_value(num_row, num_col)
row = temp_sheet.row(num_row)
# Skip all template lines
if isinstance(cell, str):
# Note and missing value entries are rogue. They are not close to the other data entries.
if cell.lower().strip() not in EXCEL_TEMPLATE:
if "notes" in cell.lower() and not metadata_on:
# Store at the root table level
nt = temp_sheet.cell_value(num_row, 1)
if nt not in EXCEL_TEMPLATE:
table_metadata["notes"] = nt
elif cell.lower().strip() in ALTS_MV:
# Store at the root table level and in our function
mv = temp_sheet.cell_value(num_row, 1)
# Add if not placeholder value
if mv not in EXCEL_TEMPLATE:
table_metadata["missingValue"] = mv
# Variable template header row
elif cell.lower() in EXCEL_HEADER and not metadata_on and not data_on:
# Grab the header line
row = temp_sheet.row(num_row)
header_keys = _get_header_keys(row)
# Turn on the marker
var_header_done = True
# Data section (bottom of sheet)
elif data_on:
# Parse the row, clean, and add to table_data
table_data = _parse_sheet_data_row(temp_sheet, num_row, col_total, table_data, filename, mv)
# Metadata section. (top)
elif metadata_on:
# Reached an empty cell while parsing metadata. Mark the end of the section.
if cell in EMPTY:
metadata_on = False
metadata_done = True
# Create a list of all the variable names found
for entry in column_metadata:
try:
# var keys is used as the variableName entry in each column's metadata
variable_keys.append(entry["variableName"].strip())
# var keys lower is used for comparing and finding the data header row
variable_keys_lower.append(entry["variableName"].lower().strip())
except KeyError:
# missing a variableName key
pass
# Not at the end of the section yet. Parse the metadata
else:
# Get the row data
row = temp_sheet.row(num_row)
# Get column metadata
col_tmp = _compile_column_metadata(row, header_keys, col_add_ct)
# Append to master list
column_metadata.append(col_tmp)
col_add_ct += 1
# Variable metadata, if variable header exists
elif var_header_done and not metadata_done:
# Start piecing column metadata together with their respective variable keys
metadata_on = True
# Get the row data
row = temp_sheet.row(num_row)
# Get column metadata
col_tmp = _compile_column_metadata(row, header_keys, col_add_ct)
# Append to master list
column_metadata.append(col_tmp)
col_add_ct += 1
# Variable metadata, if variable header does not exist
elif not var_header_done and not metadata_done and cell:
# LiPD Version 1.1 and earlier: Chronology sheets don't have variable headers
# We could blindly parse, but without a header row_num we wouldn't know where
# to save the metadata
# Play it safe and assume data for first column only: variable name
metadata_on = True
# Get the row data
row = temp_sheet.row(num_row)
# Get column metadata
col_tmp = _compile_column_metadata(row, header_keys, col_add_ct)
# Append to master list
column_metadata.append(col_tmp)
col_add_ct += 1
# Data variable header row. Column metadata exists and metadata_done marker is on.
# This is where we compare top section variableNames to bottom section variableNames to see if
# we need to start parsing the column values
else:
try:
# Clean up variable_keys_lower so we all variable names change from "age(yrs BP)" to "age"
# Units in parenthesis make it too difficult to compare variables. Remove them.
row = _rm_units_from_var_names_multi(row)
if metadata_done and any(i in row for i in variable_keys_lower):
data_on = True
# Take the difference of the two lists. If anything exists, then that's a problem
__compare_vars(row, variable_keys_lower, sheet["old_name"])
# Ensemble columns are counted differently.
if ensemble_on:
# Get the next row, and count the data cells.
col_total = len(temp_sheet.row(num_row+1))
# If there's an empty row, between, then try the next row.
if col_total < 2:
col_total = temp_sheet.row(num_row + 2)
try:
ens_cols = []
[ens_cols.append(i+1) for i in range(0, col_total-1)]
column_metadata[1]["number"] = ens_cols
except IndexError:
logger_excel.debug("excel: parse_sheet: unable to add ensemble 'number' key")
except KeyError:
logger_excel.debug("excel: parse_sheet: unable to add ensemble 'number' list at key")
# All other cass, columns are the length of column_metadata
else:
col_total = len(column_metadata)
except AttributeError:
pass
# cell is not a string, and lower() was not a valid call.
# If this is a numeric cell, 99% chance it's parsing the data columns.
elif isinstance(cell, float) or isinstance(cell, int):
if data_on or metadata_done:
# Parse the row, clean, and add to table_data
table_data = _parse_sheet_data_row(temp_sheet, num_row, col_total, table_data, filename, mv)
# Move on to the next row
num_row += 1
table_metadata["columns"] = column_metadata
except IndexError as e:
logger_excel.debug("parse_sheet: IndexError: sheet: {}, row_num: {}, col_num: {}, {}".format(sheet, num_row, num_col, e))
# If there isn't any data in this sheet, and nothing was parsed, don't let this
# move forward to final output.
if not table_data[filename]:
table_data = None
table_metadata = None
logger_excel.info("exit parse_sheet")
return table_metadata, table_data | [
"def",
"_parse_sheet",
"(",
"workbook",
",",
"sheet",
")",
":",
"logger_excel",
".",
"info",
"(",
"\"enter parse_sheet: {}\"",
".",
"format",
"(",
"sheet",
"[",
"\"old_name\"",
"]",
")",
")",
"# Markers to track where we are on the sheet",
"ensemble_on",
"=",
"False",
"var_header_done",
"=",
"False",
"metadata_on",
"=",
"False",
"metadata_done",
"=",
"False",
"data_on",
"=",
"False",
"notes",
"=",
"False",
"# Open the sheet from the workbook",
"temp_sheet",
"=",
"workbook",
".",
"sheet_by_name",
"(",
"sheet",
"[",
"\"old_name\"",
"]",
")",
"filename",
"=",
"sheet",
"[",
"\"filename\"",
"]",
"# Store table metadata and numeric data separately",
"table_name",
"=",
"\"{}DataTableName\"",
".",
"format",
"(",
"sheet",
"[",
"\"paleo_chron\"",
"]",
")",
"# Organize our root table data",
"table_metadata",
"=",
"OrderedDict",
"(",
")",
"table_metadata",
"[",
"table_name",
"]",
"=",
"sheet",
"[",
"\"new_name\"",
"]",
"table_metadata",
"[",
"'filename'",
"]",
"=",
"filename",
"table_metadata",
"[",
"'missingValue'",
"]",
"=",
"'nan'",
"if",
"\"ensemble\"",
"in",
"sheet",
"[",
"\"new_name\"",
"]",
":",
"ensemble_on",
"=",
"True",
"# Store all CSV in here by rows",
"table_data",
"=",
"{",
"filename",
":",
"[",
"]",
"}",
"# Master list of all column metadata",
"column_metadata",
"=",
"[",
"]",
"# Index tracks which cells are being parsed",
"num_col",
"=",
"0",
"num_row",
"=",
"0",
"nrows",
"=",
"temp_sheet",
".",
"nrows",
"col_total",
"=",
"0",
"# Tracks which \"number\" each metadata column is assigned",
"col_add_ct",
"=",
"1",
"header_keys",
"=",
"[",
"]",
"variable_keys",
"=",
"[",
"]",
"variable_keys_lower",
"=",
"[",
"]",
"mv",
"=",
"\"\"",
"try",
":",
"# Loop for every row in the sheet",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"nrows",
")",
":",
"# Hold the contents of the current cell",
"cell",
"=",
"temp_sheet",
".",
"cell_value",
"(",
"num_row",
",",
"num_col",
")",
"row",
"=",
"temp_sheet",
".",
"row",
"(",
"num_row",
")",
"# Skip all template lines",
"if",
"isinstance",
"(",
"cell",
",",
"str",
")",
":",
"# Note and missing value entries are rogue. They are not close to the other data entries.",
"if",
"cell",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"not",
"in",
"EXCEL_TEMPLATE",
":",
"if",
"\"notes\"",
"in",
"cell",
".",
"lower",
"(",
")",
"and",
"not",
"metadata_on",
":",
"# Store at the root table level",
"nt",
"=",
"temp_sheet",
".",
"cell_value",
"(",
"num_row",
",",
"1",
")",
"if",
"nt",
"not",
"in",
"EXCEL_TEMPLATE",
":",
"table_metadata",
"[",
"\"notes\"",
"]",
"=",
"nt",
"elif",
"cell",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"in",
"ALTS_MV",
":",
"# Store at the root table level and in our function",
"mv",
"=",
"temp_sheet",
".",
"cell_value",
"(",
"num_row",
",",
"1",
")",
"# Add if not placeholder value",
"if",
"mv",
"not",
"in",
"EXCEL_TEMPLATE",
":",
"table_metadata",
"[",
"\"missingValue\"",
"]",
"=",
"mv",
"# Variable template header row",
"elif",
"cell",
".",
"lower",
"(",
")",
"in",
"EXCEL_HEADER",
"and",
"not",
"metadata_on",
"and",
"not",
"data_on",
":",
"# Grab the header line",
"row",
"=",
"temp_sheet",
".",
"row",
"(",
"num_row",
")",
"header_keys",
"=",
"_get_header_keys",
"(",
"row",
")",
"# Turn on the marker",
"var_header_done",
"=",
"True",
"# Data section (bottom of sheet)",
"elif",
"data_on",
":",
"# Parse the row, clean, and add to table_data",
"table_data",
"=",
"_parse_sheet_data_row",
"(",
"temp_sheet",
",",
"num_row",
",",
"col_total",
",",
"table_data",
",",
"filename",
",",
"mv",
")",
"# Metadata section. (top)",
"elif",
"metadata_on",
":",
"# Reached an empty cell while parsing metadata. Mark the end of the section.",
"if",
"cell",
"in",
"EMPTY",
":",
"metadata_on",
"=",
"False",
"metadata_done",
"=",
"True",
"# Create a list of all the variable names found",
"for",
"entry",
"in",
"column_metadata",
":",
"try",
":",
"# var keys is used as the variableName entry in each column's metadata",
"variable_keys",
".",
"append",
"(",
"entry",
"[",
"\"variableName\"",
"]",
".",
"strip",
"(",
")",
")",
"# var keys lower is used for comparing and finding the data header row",
"variable_keys_lower",
".",
"append",
"(",
"entry",
"[",
"\"variableName\"",
"]",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
")",
"except",
"KeyError",
":",
"# missing a variableName key",
"pass",
"# Not at the end of the section yet. Parse the metadata",
"else",
":",
"# Get the row data",
"row",
"=",
"temp_sheet",
".",
"row",
"(",
"num_row",
")",
"# Get column metadata",
"col_tmp",
"=",
"_compile_column_metadata",
"(",
"row",
",",
"header_keys",
",",
"col_add_ct",
")",
"# Append to master list",
"column_metadata",
".",
"append",
"(",
"col_tmp",
")",
"col_add_ct",
"+=",
"1",
"# Variable metadata, if variable header exists",
"elif",
"var_header_done",
"and",
"not",
"metadata_done",
":",
"# Start piecing column metadata together with their respective variable keys",
"metadata_on",
"=",
"True",
"# Get the row data",
"row",
"=",
"temp_sheet",
".",
"row",
"(",
"num_row",
")",
"# Get column metadata",
"col_tmp",
"=",
"_compile_column_metadata",
"(",
"row",
",",
"header_keys",
",",
"col_add_ct",
")",
"# Append to master list",
"column_metadata",
".",
"append",
"(",
"col_tmp",
")",
"col_add_ct",
"+=",
"1",
"# Variable metadata, if variable header does not exist",
"elif",
"not",
"var_header_done",
"and",
"not",
"metadata_done",
"and",
"cell",
":",
"# LiPD Version 1.1 and earlier: Chronology sheets don't have variable headers",
"# We could blindly parse, but without a header row_num we wouldn't know where",
"# to save the metadata",
"# Play it safe and assume data for first column only: variable name",
"metadata_on",
"=",
"True",
"# Get the row data",
"row",
"=",
"temp_sheet",
".",
"row",
"(",
"num_row",
")",
"# Get column metadata",
"col_tmp",
"=",
"_compile_column_metadata",
"(",
"row",
",",
"header_keys",
",",
"col_add_ct",
")",
"# Append to master list",
"column_metadata",
".",
"append",
"(",
"col_tmp",
")",
"col_add_ct",
"+=",
"1",
"# Data variable header row. Column metadata exists and metadata_done marker is on.",
"# This is where we compare top section variableNames to bottom section variableNames to see if",
"# we need to start parsing the column values",
"else",
":",
"try",
":",
"# Clean up variable_keys_lower so we all variable names change from \"age(yrs BP)\" to \"age\"",
"# Units in parenthesis make it too difficult to compare variables. Remove them.",
"row",
"=",
"_rm_units_from_var_names_multi",
"(",
"row",
")",
"if",
"metadata_done",
"and",
"any",
"(",
"i",
"in",
"row",
"for",
"i",
"in",
"variable_keys_lower",
")",
":",
"data_on",
"=",
"True",
"# Take the difference of the two lists. If anything exists, then that's a problem",
"__compare_vars",
"(",
"row",
",",
"variable_keys_lower",
",",
"sheet",
"[",
"\"old_name\"",
"]",
")",
"# Ensemble columns are counted differently.",
"if",
"ensemble_on",
":",
"# Get the next row, and count the data cells.",
"col_total",
"=",
"len",
"(",
"temp_sheet",
".",
"row",
"(",
"num_row",
"+",
"1",
")",
")",
"# If there's an empty row, between, then try the next row.",
"if",
"col_total",
"<",
"2",
":",
"col_total",
"=",
"temp_sheet",
".",
"row",
"(",
"num_row",
"+",
"2",
")",
"try",
":",
"ens_cols",
"=",
"[",
"]",
"[",
"ens_cols",
".",
"append",
"(",
"i",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"col_total",
"-",
"1",
")",
"]",
"column_metadata",
"[",
"1",
"]",
"[",
"\"number\"",
"]",
"=",
"ens_cols",
"except",
"IndexError",
":",
"logger_excel",
".",
"debug",
"(",
"\"excel: parse_sheet: unable to add ensemble 'number' key\"",
")",
"except",
"KeyError",
":",
"logger_excel",
".",
"debug",
"(",
"\"excel: parse_sheet: unable to add ensemble 'number' list at key\"",
")",
"# All other cass, columns are the length of column_metadata",
"else",
":",
"col_total",
"=",
"len",
"(",
"column_metadata",
")",
"except",
"AttributeError",
":",
"pass",
"# cell is not a string, and lower() was not a valid call.",
"# If this is a numeric cell, 99% chance it's parsing the data columns.",
"elif",
"isinstance",
"(",
"cell",
",",
"float",
")",
"or",
"isinstance",
"(",
"cell",
",",
"int",
")",
":",
"if",
"data_on",
"or",
"metadata_done",
":",
"# Parse the row, clean, and add to table_data",
"table_data",
"=",
"_parse_sheet_data_row",
"(",
"temp_sheet",
",",
"num_row",
",",
"col_total",
",",
"table_data",
",",
"filename",
",",
"mv",
")",
"# Move on to the next row",
"num_row",
"+=",
"1",
"table_metadata",
"[",
"\"columns\"",
"]",
"=",
"column_metadata",
"except",
"IndexError",
"as",
"e",
":",
"logger_excel",
".",
"debug",
"(",
"\"parse_sheet: IndexError: sheet: {}, row_num: {}, col_num: {}, {}\"",
".",
"format",
"(",
"sheet",
",",
"num_row",
",",
"num_col",
",",
"e",
")",
")",
"# If there isn't any data in this sheet, and nothing was parsed, don't let this",
"# move forward to final output.",
"if",
"not",
"table_data",
"[",
"filename",
"]",
":",
"table_data",
"=",
"None",
"table_metadata",
"=",
"None",
"logger_excel",
".",
"info",
"(",
"\"exit parse_sheet\"",
")",
"return",
"table_metadata",
",",
"table_data"
] | 44.420091 | 25.917808 |
def _get_token():
'''
Get an auth token
'''
username = __opts__.get('rallydev', {}).get('username', None)
password = __opts__.get('rallydev', {}).get('password', None)
path = 'https://rally1.rallydev.com/slm/webservice/v2.0/security/authorize'
result = salt.utils.http.query(
path,
decode=True,
decode_type='json',
text=True,
status=True,
username=username,
password=password,
cookies=True,
persist_session=True,
opts=__opts__,
)
if 'dict' not in result:
return None
return result['dict']['OperationResult']['SecurityToken'] | [
"def",
"_get_token",
"(",
")",
":",
"username",
"=",
"__opts__",
".",
"get",
"(",
"'rallydev'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'username'",
",",
"None",
")",
"password",
"=",
"__opts__",
".",
"get",
"(",
"'rallydev'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'password'",
",",
"None",
")",
"path",
"=",
"'https://rally1.rallydev.com/slm/webservice/v2.0/security/authorize'",
"result",
"=",
"salt",
".",
"utils",
".",
"http",
".",
"query",
"(",
"path",
",",
"decode",
"=",
"True",
",",
"decode_type",
"=",
"'json'",
",",
"text",
"=",
"True",
",",
"status",
"=",
"True",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"cookies",
"=",
"True",
",",
"persist_session",
"=",
"True",
",",
"opts",
"=",
"__opts__",
",",
")",
"if",
"'dict'",
"not",
"in",
"result",
":",
"return",
"None",
"return",
"result",
"[",
"'dict'",
"]",
"[",
"'OperationResult'",
"]",
"[",
"'SecurityToken'",
"]"
] | 27.434783 | 22.130435 |
def parse_tile_name(name):
"""
Parses and verifies tile name.
:param name: class input parameter `tile_name`
:type name: str
:return: parsed tile name
:rtype: str
"""
tile_name = name.lstrip('T0')
if len(tile_name) == 4:
tile_name = '0' + tile_name
if len(tile_name) != 5:
raise ValueError('Invalid tile name {}'.format(name))
return tile_name | [
"def",
"parse_tile_name",
"(",
"name",
")",
":",
"tile_name",
"=",
"name",
".",
"lstrip",
"(",
"'T0'",
")",
"if",
"len",
"(",
"tile_name",
")",
"==",
"4",
":",
"tile_name",
"=",
"'0'",
"+",
"tile_name",
"if",
"len",
"(",
"tile_name",
")",
"!=",
"5",
":",
"raise",
"ValueError",
"(",
"'Invalid tile name {}'",
".",
"format",
"(",
"name",
")",
")",
"return",
"tile_name"
] | 29.466667 | 11.866667 |
def data(self, index, role=QtCore.Qt.UserRole, mode=BuildMode):
"""Used by the view to determine data to present
See :qtdoc:`QAbstractItemModel<QAbstractItemModel.data>`,
and :qtdoc:`subclassing<qabstractitemmodel.subclassing>`
"""
if role == CursorRole:
if index.isValid():
if mode == BuildMode:
return cursors.openHand()
elif mode == AutoParamMode:
return cursors.pointyHand()
else:
raise ValueError("Invalid stimulus edit mode")
else:
return QtGui.QCursor(QtCore.Qt.ArrowCursor)
if not index.isValid():
return None
if role == QtCore.Qt.DisplayRole:
component = self._stim.component(index.row(),index.column())
return component.__class__.__name__
elif role == QtCore.Qt.SizeHintRole:
component = self._stim.component(index.row(),index.column())
return component.duration() #* PIXELS_PER_MS * 1000
elif role == QtCore.Qt.UserRole or role == QtCore.Qt.UserRole+1: #return the whole python object
if self._stim.columnCountForRow(index.row()) > index.column():
component = self._stim.component(index.row(),index.column())
if role == QtCore.Qt.UserRole:
component = wrapComponent(component)
else:
component = None
return component | [
"def",
"data",
"(",
"self",
",",
"index",
",",
"role",
"=",
"QtCore",
".",
"Qt",
".",
"UserRole",
",",
"mode",
"=",
"BuildMode",
")",
":",
"if",
"role",
"==",
"CursorRole",
":",
"if",
"index",
".",
"isValid",
"(",
")",
":",
"if",
"mode",
"==",
"BuildMode",
":",
"return",
"cursors",
".",
"openHand",
"(",
")",
"elif",
"mode",
"==",
"AutoParamMode",
":",
"return",
"cursors",
".",
"pointyHand",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid stimulus edit mode\"",
")",
"else",
":",
"return",
"QtGui",
".",
"QCursor",
"(",
"QtCore",
".",
"Qt",
".",
"ArrowCursor",
")",
"if",
"not",
"index",
".",
"isValid",
"(",
")",
":",
"return",
"None",
"if",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"DisplayRole",
":",
"component",
"=",
"self",
".",
"_stim",
".",
"component",
"(",
"index",
".",
"row",
"(",
")",
",",
"index",
".",
"column",
"(",
")",
")",
"return",
"component",
".",
"__class__",
".",
"__name__",
"elif",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"SizeHintRole",
":",
"component",
"=",
"self",
".",
"_stim",
".",
"component",
"(",
"index",
".",
"row",
"(",
")",
",",
"index",
".",
"column",
"(",
")",
")",
"return",
"component",
".",
"duration",
"(",
")",
"#* PIXELS_PER_MS * 1000",
"elif",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"UserRole",
"or",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"UserRole",
"+",
"1",
":",
"#return the whole python object",
"if",
"self",
".",
"_stim",
".",
"columnCountForRow",
"(",
"index",
".",
"row",
"(",
")",
")",
">",
"index",
".",
"column",
"(",
")",
":",
"component",
"=",
"self",
".",
"_stim",
".",
"component",
"(",
"index",
".",
"row",
"(",
")",
",",
"index",
".",
"column",
"(",
")",
")",
"if",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"UserRole",
":",
"component",
"=",
"wrapComponent",
"(",
"component",
")",
"else",
":",
"component",
"=",
"None",
"return",
"component"
] | 44.939394 | 18.242424 |
def feed_line(self, line):
"""Feeds one line of input into the reader machine. This method is
a generator that yields all top-level S-expressions that have been
recognized on this line (including multi-line expressions whose last
character is on this line).
"""
self.line += 1
pos = 0
while pos < len(line):
loc_start = TextLocationSingle(self.filename, self.line, pos + 1)
if self.state is State.NORMAL:
item_re = RE_TOKEN
thing = 'token'
elif self.state is State.STRING:
item_re = RE_STRING_ITEM
thing = 'escape sequence'
elif self.state is State.BINARRAY:
item_re = RE_BINARRAY_ITEM[self.binarray_base]
thing = 'binarray item'
else:
assert 0
match = item_re.match(line, pos)
if not match:
raise ReadError(f'{loc_start}: unknown {thing}')
pos = match.end()
loc_end = TextLocationSingle(self.filename, self.line, pos + 1)
loc = loc_start - loc_end
if match['ws_error'] is not None:
raise ReadError(f'{loc_end}: no whitespace after token')
if self.state is State.NORMAL:
# Normal state -- read tokens.
if match['lparen'] is not None:
self.stack.append(StackEntryList(loc_start, []))
elif match['rparen'] is not None:
if not self.stack:
raise ReadError(f'{loc}: unmatched closing paren')
top = self.stack.pop()
if not isinstance(top, StackEntryList):
top.raise_unclosed_error()
yield from self._feed_node(top.items, top.start - loc_end)
elif match['symbol'] is not None:
value = Symbol(match['symbol'])
yield from self._feed_node(value, loc)
elif match['sexpr_comment'] is not None:
self.stack.append(StackEntryComment(loc))
elif match['bool_value'] is not None:
value = match['bool_value'] == '@true'
yield from self._feed_node(value, loc)
elif match['nil_value'] is not None:
yield from self._feed_node(None, loc)
elif match['int_or_word'] is not None:
if match['number'] is not None:
value = int(match['number'], 0)
elif match['raw_char'] is not None:
value = ord(match['raw_char'])
elif match['simple_escape'] is not None:
value = ord(ESCAPE_TO_CHAR[match['simple_escape']])
elif match['hex_code'] is not None:
value = int(match['hex_code'], 16)
if value not in range(0x110000):
raise ReadError(
f'{loc}: not a valid unicode codepoint')
else:
assert 0
if match['word_width'] is not None:
width = int(match['word_width'])
if value < 0:
value += 1 << width
if value not in range(1 << width):
raise ReadError(f'{loc}: word value out of range')
value = BinWord(width, value)
yield from self._feed_node(value, loc)
elif match['array_width'] is not None:
self.binarray_base = {
'0b': 2,
'0o': 8,
None: 10,
'0x': 16,
}[match['array_base']]
self.binarray_data = []
self.binarray_width = int(match['array_width'])
self.token_start = loc_start
self.state = State.BINARRAY
elif match['start_quote'] is not None:
self.state = State.STRING
self.token_start = loc_start
self.string_buffer = StringIO()
if match['string_width'] is not None:
self.binarray_width = int(match['string_width'])
else:
self.binarray_width = None
elif self.state is State.STRING:
# Inside a string.
if match['end_quote'] is not None:
self.state = State.NORMAL
value = self.string_buffer.getvalue()
loc = self.token_start - loc_end
if self.binarray_width is not None:
vals = [ord(x) for x in value]
for x in vals:
if x not in range(1 << self.binarray_width):
raise ReadError(
f'{loc}: character code out of range')
value = BinArray(vals, width=self.binarray_width)
yield from self._feed_node(value, loc)
elif match['raw_chars'] is not None:
self.string_buffer.write(match['raw_chars'])
elif match['simple_escape'] is not None:
c = ESCAPE_TO_CHAR[match['simple_escape']]
self.string_buffer.write(c)
elif match['hex_code'] is not None:
code = int(match['hex_code'], 16)
if code not in range(0x110000):
raise ReadError(
f'{loc}: not a valid unicode codepoint')
self.string_buffer.write(chr(code))
else:
assert 0
elif self.state is State.BINARRAY:
# In a BinArray.
if match['rparen'] is not None:
self.state = State.NORMAL
value = BinArray(self.binarray_data,
width=self.binarray_width)
loc = self.token_start - loc_end
yield from self._feed_node(value, loc)
elif match['digits'] is not None:
value = int(match['digits'], self.binarray_base)
if value < 0:
value += 1 << self.binarray_width
if value not in range(1 << self.binarray_width):
raise ReadError(f'{loc}: word value out of range')
self.binarray_data.append(value)
else:
assert 0 | [
"def",
"feed_line",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"line",
"+=",
"1",
"pos",
"=",
"0",
"while",
"pos",
"<",
"len",
"(",
"line",
")",
":",
"loc_start",
"=",
"TextLocationSingle",
"(",
"self",
".",
"filename",
",",
"self",
".",
"line",
",",
"pos",
"+",
"1",
")",
"if",
"self",
".",
"state",
"is",
"State",
".",
"NORMAL",
":",
"item_re",
"=",
"RE_TOKEN",
"thing",
"=",
"'token'",
"elif",
"self",
".",
"state",
"is",
"State",
".",
"STRING",
":",
"item_re",
"=",
"RE_STRING_ITEM",
"thing",
"=",
"'escape sequence'",
"elif",
"self",
".",
"state",
"is",
"State",
".",
"BINARRAY",
":",
"item_re",
"=",
"RE_BINARRAY_ITEM",
"[",
"self",
".",
"binarray_base",
"]",
"thing",
"=",
"'binarray item'",
"else",
":",
"assert",
"0",
"match",
"=",
"item_re",
".",
"match",
"(",
"line",
",",
"pos",
")",
"if",
"not",
"match",
":",
"raise",
"ReadError",
"(",
"f'{loc_start}: unknown {thing}'",
")",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"loc_end",
"=",
"TextLocationSingle",
"(",
"self",
".",
"filename",
",",
"self",
".",
"line",
",",
"pos",
"+",
"1",
")",
"loc",
"=",
"loc_start",
"-",
"loc_end",
"if",
"match",
"[",
"'ws_error'",
"]",
"is",
"not",
"None",
":",
"raise",
"ReadError",
"(",
"f'{loc_end}: no whitespace after token'",
")",
"if",
"self",
".",
"state",
"is",
"State",
".",
"NORMAL",
":",
"# Normal state -- read tokens.",
"if",
"match",
"[",
"'lparen'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"stack",
".",
"append",
"(",
"StackEntryList",
"(",
"loc_start",
",",
"[",
"]",
")",
")",
"elif",
"match",
"[",
"'rparen'",
"]",
"is",
"not",
"None",
":",
"if",
"not",
"self",
".",
"stack",
":",
"raise",
"ReadError",
"(",
"f'{loc}: unmatched closing paren'",
")",
"top",
"=",
"self",
".",
"stack",
".",
"pop",
"(",
")",
"if",
"not",
"isinstance",
"(",
"top",
",",
"StackEntryList",
")",
":",
"top",
".",
"raise_unclosed_error",
"(",
")",
"yield",
"from",
"self",
".",
"_feed_node",
"(",
"top",
".",
"items",
",",
"top",
".",
"start",
"-",
"loc_end",
")",
"elif",
"match",
"[",
"'symbol'",
"]",
"is",
"not",
"None",
":",
"value",
"=",
"Symbol",
"(",
"match",
"[",
"'symbol'",
"]",
")",
"yield",
"from",
"self",
".",
"_feed_node",
"(",
"value",
",",
"loc",
")",
"elif",
"match",
"[",
"'sexpr_comment'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"stack",
".",
"append",
"(",
"StackEntryComment",
"(",
"loc",
")",
")",
"elif",
"match",
"[",
"'bool_value'",
"]",
"is",
"not",
"None",
":",
"value",
"=",
"match",
"[",
"'bool_value'",
"]",
"==",
"'@true'",
"yield",
"from",
"self",
".",
"_feed_node",
"(",
"value",
",",
"loc",
")",
"elif",
"match",
"[",
"'nil_value'",
"]",
"is",
"not",
"None",
":",
"yield",
"from",
"self",
".",
"_feed_node",
"(",
"None",
",",
"loc",
")",
"elif",
"match",
"[",
"'int_or_word'",
"]",
"is",
"not",
"None",
":",
"if",
"match",
"[",
"'number'",
"]",
"is",
"not",
"None",
":",
"value",
"=",
"int",
"(",
"match",
"[",
"'number'",
"]",
",",
"0",
")",
"elif",
"match",
"[",
"'raw_char'",
"]",
"is",
"not",
"None",
":",
"value",
"=",
"ord",
"(",
"match",
"[",
"'raw_char'",
"]",
")",
"elif",
"match",
"[",
"'simple_escape'",
"]",
"is",
"not",
"None",
":",
"value",
"=",
"ord",
"(",
"ESCAPE_TO_CHAR",
"[",
"match",
"[",
"'simple_escape'",
"]",
"]",
")",
"elif",
"match",
"[",
"'hex_code'",
"]",
"is",
"not",
"None",
":",
"value",
"=",
"int",
"(",
"match",
"[",
"'hex_code'",
"]",
",",
"16",
")",
"if",
"value",
"not",
"in",
"range",
"(",
"0x110000",
")",
":",
"raise",
"ReadError",
"(",
"f'{loc}: not a valid unicode codepoint'",
")",
"else",
":",
"assert",
"0",
"if",
"match",
"[",
"'word_width'",
"]",
"is",
"not",
"None",
":",
"width",
"=",
"int",
"(",
"match",
"[",
"'word_width'",
"]",
")",
"if",
"value",
"<",
"0",
":",
"value",
"+=",
"1",
"<<",
"width",
"if",
"value",
"not",
"in",
"range",
"(",
"1",
"<<",
"width",
")",
":",
"raise",
"ReadError",
"(",
"f'{loc}: word value out of range'",
")",
"value",
"=",
"BinWord",
"(",
"width",
",",
"value",
")",
"yield",
"from",
"self",
".",
"_feed_node",
"(",
"value",
",",
"loc",
")",
"elif",
"match",
"[",
"'array_width'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"binarray_base",
"=",
"{",
"'0b'",
":",
"2",
",",
"'0o'",
":",
"8",
",",
"None",
":",
"10",
",",
"'0x'",
":",
"16",
",",
"}",
"[",
"match",
"[",
"'array_base'",
"]",
"]",
"self",
".",
"binarray_data",
"=",
"[",
"]",
"self",
".",
"binarray_width",
"=",
"int",
"(",
"match",
"[",
"'array_width'",
"]",
")",
"self",
".",
"token_start",
"=",
"loc_start",
"self",
".",
"state",
"=",
"State",
".",
"BINARRAY",
"elif",
"match",
"[",
"'start_quote'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"state",
"=",
"State",
".",
"STRING",
"self",
".",
"token_start",
"=",
"loc_start",
"self",
".",
"string_buffer",
"=",
"StringIO",
"(",
")",
"if",
"match",
"[",
"'string_width'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"binarray_width",
"=",
"int",
"(",
"match",
"[",
"'string_width'",
"]",
")",
"else",
":",
"self",
".",
"binarray_width",
"=",
"None",
"elif",
"self",
".",
"state",
"is",
"State",
".",
"STRING",
":",
"# Inside a string.",
"if",
"match",
"[",
"'end_quote'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"state",
"=",
"State",
".",
"NORMAL",
"value",
"=",
"self",
".",
"string_buffer",
".",
"getvalue",
"(",
")",
"loc",
"=",
"self",
".",
"token_start",
"-",
"loc_end",
"if",
"self",
".",
"binarray_width",
"is",
"not",
"None",
":",
"vals",
"=",
"[",
"ord",
"(",
"x",
")",
"for",
"x",
"in",
"value",
"]",
"for",
"x",
"in",
"vals",
":",
"if",
"x",
"not",
"in",
"range",
"(",
"1",
"<<",
"self",
".",
"binarray_width",
")",
":",
"raise",
"ReadError",
"(",
"f'{loc}: character code out of range'",
")",
"value",
"=",
"BinArray",
"(",
"vals",
",",
"width",
"=",
"self",
".",
"binarray_width",
")",
"yield",
"from",
"self",
".",
"_feed_node",
"(",
"value",
",",
"loc",
")",
"elif",
"match",
"[",
"'raw_chars'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"string_buffer",
".",
"write",
"(",
"match",
"[",
"'raw_chars'",
"]",
")",
"elif",
"match",
"[",
"'simple_escape'",
"]",
"is",
"not",
"None",
":",
"c",
"=",
"ESCAPE_TO_CHAR",
"[",
"match",
"[",
"'simple_escape'",
"]",
"]",
"self",
".",
"string_buffer",
".",
"write",
"(",
"c",
")",
"elif",
"match",
"[",
"'hex_code'",
"]",
"is",
"not",
"None",
":",
"code",
"=",
"int",
"(",
"match",
"[",
"'hex_code'",
"]",
",",
"16",
")",
"if",
"code",
"not",
"in",
"range",
"(",
"0x110000",
")",
":",
"raise",
"ReadError",
"(",
"f'{loc}: not a valid unicode codepoint'",
")",
"self",
".",
"string_buffer",
".",
"write",
"(",
"chr",
"(",
"code",
")",
")",
"else",
":",
"assert",
"0",
"elif",
"self",
".",
"state",
"is",
"State",
".",
"BINARRAY",
":",
"# In a BinArray.",
"if",
"match",
"[",
"'rparen'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"state",
"=",
"State",
".",
"NORMAL",
"value",
"=",
"BinArray",
"(",
"self",
".",
"binarray_data",
",",
"width",
"=",
"self",
".",
"binarray_width",
")",
"loc",
"=",
"self",
".",
"token_start",
"-",
"loc_end",
"yield",
"from",
"self",
".",
"_feed_node",
"(",
"value",
",",
"loc",
")",
"elif",
"match",
"[",
"'digits'",
"]",
"is",
"not",
"None",
":",
"value",
"=",
"int",
"(",
"match",
"[",
"'digits'",
"]",
",",
"self",
".",
"binarray_base",
")",
"if",
"value",
"<",
"0",
":",
"value",
"+=",
"1",
"<<",
"self",
".",
"binarray_width",
"if",
"value",
"not",
"in",
"range",
"(",
"1",
"<<",
"self",
".",
"binarray_width",
")",
":",
"raise",
"ReadError",
"(",
"f'{loc}: word value out of range'",
")",
"self",
".",
"binarray_data",
".",
"append",
"(",
"value",
")",
"else",
":",
"assert",
"0"
] | 49.748148 | 14.474074 |
def valid_starter_settings(self):
'''check starter settings'''
if self.gasheli_settings.ignition_chan <= 0 or self.gasheli_settings.ignition_chan > 8:
print("Invalid ignition channel %d" % self.gasheli_settings.ignition_chan)
return False
if self.gasheli_settings.starter_chan <= 0 or self.gasheli_settings.starter_chan > 14:
print("Invalid starter channel %d" % self.gasheli_settings.starter_chan)
return False
return True | [
"def",
"valid_starter_settings",
"(",
"self",
")",
":",
"if",
"self",
".",
"gasheli_settings",
".",
"ignition_chan",
"<=",
"0",
"or",
"self",
".",
"gasheli_settings",
".",
"ignition_chan",
">",
"8",
":",
"print",
"(",
"\"Invalid ignition channel %d\"",
"%",
"self",
".",
"gasheli_settings",
".",
"ignition_chan",
")",
"return",
"False",
"if",
"self",
".",
"gasheli_settings",
".",
"starter_chan",
"<=",
"0",
"or",
"self",
".",
"gasheli_settings",
".",
"starter_chan",
">",
"14",
":",
"print",
"(",
"\"Invalid starter channel %d\"",
"%",
"self",
".",
"gasheli_settings",
".",
"starter_chan",
")",
"return",
"False",
"return",
"True"
] | 55 | 29.222222 |
def export_debug(self, output_path):
"""
this method is used to generate a debug map for NEO debugger
"""
file_hash = hashlib.md5(open(output_path, 'rb').read()).hexdigest()
avm_name = os.path.splitext(os.path.basename(output_path))[0]
json_data = self.generate_debug_json(avm_name, file_hash)
mapfilename = output_path.replace('.avm', '.debug.json')
with open(mapfilename, 'w+') as out_file:
out_file.write(json_data) | [
"def",
"export_debug",
"(",
"self",
",",
"output_path",
")",
":",
"file_hash",
"=",
"hashlib",
".",
"md5",
"(",
"open",
"(",
"output_path",
",",
"'rb'",
")",
".",
"read",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
"avm_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"output_path",
")",
")",
"[",
"0",
"]",
"json_data",
"=",
"self",
".",
"generate_debug_json",
"(",
"avm_name",
",",
"file_hash",
")",
"mapfilename",
"=",
"output_path",
".",
"replace",
"(",
"'.avm'",
",",
"'.debug.json'",
")",
"with",
"open",
"(",
"mapfilename",
",",
"'w+'",
")",
"as",
"out_file",
":",
"out_file",
".",
"write",
"(",
"json_data",
")"
] | 37.307692 | 21.307692 |
def __make_message(self, topic, content):
"""
Prepares the message content
"""
return {"uid": str(uuid.uuid4()).replace('-', '').upper(),
"topic": topic,
"content": content} | [
"def",
"__make_message",
"(",
"self",
",",
"topic",
",",
"content",
")",
":",
"return",
"{",
"\"uid\"",
":",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
".",
"upper",
"(",
")",
",",
"\"topic\"",
":",
"topic",
",",
"\"content\"",
":",
"content",
"}"
] | 33 | 6.428571 |
def read_raid(self, raid_config=None):
"""Read the logical drives from the system
:param raid_config: None or a dictionary containing target raid
configuration data. This data stucture should be as
follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:returns: A dictionary containing list of logical disks
"""
self.check_smart_storage_config_ids()
if raid_config:
# When read called after create raid, user can pass raid config
# as a input
result = self._post_create_read_raid(raid_config=raid_config)
else:
# When read called after delete raid, there will be no input
# passed by user then
result = self._post_delete_read_raid()
return result | [
"def",
"read_raid",
"(",
"self",
",",
"raid_config",
"=",
"None",
")",
":",
"self",
".",
"check_smart_storage_config_ids",
"(",
")",
"if",
"raid_config",
":",
"# When read called after create raid, user can pass raid config",
"# as a input",
"result",
"=",
"self",
".",
"_post_create_read_raid",
"(",
"raid_config",
"=",
"raid_config",
")",
"else",
":",
"# When read called after delete raid, there will be no input",
"# passed by user then",
"result",
"=",
"self",
".",
"_post_delete_read_raid",
"(",
")",
"return",
"result"
] | 48.227273 | 21.090909 |
def for_data_and_tracer(cls, lens_data, tracer, padded_tracer=None):
"""Fit lens data with a model tracer, automatically determining the type of fit based on the \
properties of the galaxies in the tracer.
Parameters
-----------
lens_data : lens_data.LensData or lens_data.LensDataHyper
The lens-images that is fitted.
tracer : ray_tracing.TracerNonStack
The tracer, which describes the ray-tracing and strong lens configuration.
padded_tracer : ray_tracing.Tracer or None
A tracer with an identical strong lens configuration to the tracer above, but using the lens data's \
padded grid_stack such that unmasked model-images can be computed.
"""
if tracer.has_light_profile and not tracer.has_pixelization:
return LensProfileFit(lens_data=lens_data, tracer=tracer, padded_tracer=padded_tracer)
elif not tracer.has_light_profile and tracer.has_pixelization:
return LensInversionFit(lens_data=lens_data, tracer=tracer, padded_tracer=None)
elif tracer.has_light_profile and tracer.has_pixelization:
return LensProfileInversionFit(lens_data=lens_data, tracer=tracer, padded_tracer=None)
else:
raise exc.FittingException('The fit routine did not call a Fit class - check the '
'properties of the tracer') | [
"def",
"for_data_and_tracer",
"(",
"cls",
",",
"lens_data",
",",
"tracer",
",",
"padded_tracer",
"=",
"None",
")",
":",
"if",
"tracer",
".",
"has_light_profile",
"and",
"not",
"tracer",
".",
"has_pixelization",
":",
"return",
"LensProfileFit",
"(",
"lens_data",
"=",
"lens_data",
",",
"tracer",
"=",
"tracer",
",",
"padded_tracer",
"=",
"padded_tracer",
")",
"elif",
"not",
"tracer",
".",
"has_light_profile",
"and",
"tracer",
".",
"has_pixelization",
":",
"return",
"LensInversionFit",
"(",
"lens_data",
"=",
"lens_data",
",",
"tracer",
"=",
"tracer",
",",
"padded_tracer",
"=",
"None",
")",
"elif",
"tracer",
".",
"has_light_profile",
"and",
"tracer",
".",
"has_pixelization",
":",
"return",
"LensProfileInversionFit",
"(",
"lens_data",
"=",
"lens_data",
",",
"tracer",
"=",
"tracer",
",",
"padded_tracer",
"=",
"None",
")",
"else",
":",
"raise",
"exc",
".",
"FittingException",
"(",
"'The fit routine did not call a Fit class - check the '",
"'properties of the tracer'",
")"
] | 58.708333 | 29.833333 |
def _send_file(self, local, remote):
"""send a single file"""
remote = "%s:%s" % (self.location, remote)
for i in range(10):
if not os.path.exists(local):
self.log.debug("waiting for %s" % local)
time.sleep(1)
else:
break
self.log.info("sending %s to %s", local, remote)
check_output(self.scp_cmd + [local, remote]) | [
"def",
"_send_file",
"(",
"self",
",",
"local",
",",
"remote",
")",
":",
"remote",
"=",
"\"%s:%s\"",
"%",
"(",
"self",
".",
"location",
",",
"remote",
")",
"for",
"i",
"in",
"range",
"(",
"10",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"local",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"waiting for %s\"",
"%",
"local",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"else",
":",
"break",
"self",
".",
"log",
".",
"info",
"(",
"\"sending %s to %s\"",
",",
"local",
",",
"remote",
")",
"check_output",
"(",
"self",
".",
"scp_cmd",
"+",
"[",
"local",
",",
"remote",
"]",
")"
] | 37.909091 | 11.363636 |
def acquire(self):
"""
Acquire the lock.
If the lock can't be acquired immediately, retry a specified number of
times, with a specified wait time.
"""
retries = [0]
self._acquire_start_seconds = self._reactor.seconds()
def log_lock_acquired(result):
self._lock_acquired_seconds = self._reactor.seconds()
seconds = self._lock_acquired_seconds - self._acquire_start_seconds
self._log.msg('Acquired lock in {0} seconds'.format(seconds),
lock_acquire_time=seconds, **self._log_kwargs)
return result
def acquire_lock():
d = self._write_lock()
d.addCallback(self._read_lock)
d.addCallback(self._verify_lock)
if self._log:
d.addCallback(log_lock_acquired)
d.addErrback(lock_not_acquired)
return d
def lock_not_acquired(failure):
failure.trap(BusyLockError, NoLockClaimsError)
retries[0] += 1
if retries[0] <= self._max_retry:
return task.deferLater(self._reactor, self._retry_wait, acquire_lock)
else:
return failure
def log_lock_acquire_failure(failure):
if self._log:
seconds = self._reactor.seconds() - self._acquire_start_seconds
self._log.msg(
'Could not acquire lock in {0} seconds due to {1}'.format(seconds, failure),
lock_acquire_fail_time=seconds, reason=failure, **self._log_kwargs)
return failure
return acquire_lock().addErrback(log_lock_acquire_failure) | [
"def",
"acquire",
"(",
"self",
")",
":",
"retries",
"=",
"[",
"0",
"]",
"self",
".",
"_acquire_start_seconds",
"=",
"self",
".",
"_reactor",
".",
"seconds",
"(",
")",
"def",
"log_lock_acquired",
"(",
"result",
")",
":",
"self",
".",
"_lock_acquired_seconds",
"=",
"self",
".",
"_reactor",
".",
"seconds",
"(",
")",
"seconds",
"=",
"self",
".",
"_lock_acquired_seconds",
"-",
"self",
".",
"_acquire_start_seconds",
"self",
".",
"_log",
".",
"msg",
"(",
"'Acquired lock in {0} seconds'",
".",
"format",
"(",
"seconds",
")",
",",
"lock_acquire_time",
"=",
"seconds",
",",
"*",
"*",
"self",
".",
"_log_kwargs",
")",
"return",
"result",
"def",
"acquire_lock",
"(",
")",
":",
"d",
"=",
"self",
".",
"_write_lock",
"(",
")",
"d",
".",
"addCallback",
"(",
"self",
".",
"_read_lock",
")",
"d",
".",
"addCallback",
"(",
"self",
".",
"_verify_lock",
")",
"if",
"self",
".",
"_log",
":",
"d",
".",
"addCallback",
"(",
"log_lock_acquired",
")",
"d",
".",
"addErrback",
"(",
"lock_not_acquired",
")",
"return",
"d",
"def",
"lock_not_acquired",
"(",
"failure",
")",
":",
"failure",
".",
"trap",
"(",
"BusyLockError",
",",
"NoLockClaimsError",
")",
"retries",
"[",
"0",
"]",
"+=",
"1",
"if",
"retries",
"[",
"0",
"]",
"<=",
"self",
".",
"_max_retry",
":",
"return",
"task",
".",
"deferLater",
"(",
"self",
".",
"_reactor",
",",
"self",
".",
"_retry_wait",
",",
"acquire_lock",
")",
"else",
":",
"return",
"failure",
"def",
"log_lock_acquire_failure",
"(",
"failure",
")",
":",
"if",
"self",
".",
"_log",
":",
"seconds",
"=",
"self",
".",
"_reactor",
".",
"seconds",
"(",
")",
"-",
"self",
".",
"_acquire_start_seconds",
"self",
".",
"_log",
".",
"msg",
"(",
"'Could not acquire lock in {0} seconds due to {1}'",
".",
"format",
"(",
"seconds",
",",
"failure",
")",
",",
"lock_acquire_fail_time",
"=",
"seconds",
",",
"reason",
"=",
"failure",
",",
"*",
"*",
"self",
".",
"_log_kwargs",
")",
"return",
"failure",
"return",
"acquire_lock",
"(",
")",
".",
"addErrback",
"(",
"log_lock_acquire_failure",
")"
] | 38.55814 | 20.976744 |
def vtrees(self):
"""
Get list of VTrees from ScaleIO cluster
:return: List of VTree objects - Can be empty of no VTrees exist
:rtype: VTree object
"""
self.connection._check_login()
response = self.connection._do_get("{}/{}".format(self.connection._api_url, "types/VTree/instances")).json()
all_vtrees = []
for vtree in response:
all_vtrees.append(
SIO_Vtree.from_dict(vtree)
)
return all_vtrees | [
"def",
"vtrees",
"(",
"self",
")",
":",
"self",
".",
"connection",
".",
"_check_login",
"(",
")",
"response",
"=",
"self",
".",
"connection",
".",
"_do_get",
"(",
"\"{}/{}\"",
".",
"format",
"(",
"self",
".",
"connection",
".",
"_api_url",
",",
"\"types/VTree/instances\"",
")",
")",
".",
"json",
"(",
")",
"all_vtrees",
"=",
"[",
"]",
"for",
"vtree",
"in",
"response",
":",
"all_vtrees",
".",
"append",
"(",
"SIO_Vtree",
".",
"from_dict",
"(",
"vtree",
")",
")",
"return",
"all_vtrees"
] | 35.928571 | 16.642857 |
def _normalize_branch_node(self, node):
# sys.stderr.write('nbn\n')
"""node should have only one item changed
"""
not_blank_items_count = sum(1 for x in range(17) if node[x])
assert not_blank_items_count >= 1
if not_blank_items_count > 1:
self._encode_node(node)
return node
# now only one item is not blank
not_blank_index = [i for i, item in enumerate(node) if item][0]
# the value item is not blank
if not_blank_index == 16:
o = [pack_nibbles(with_terminator([])), node[16]]
self._encode_node(o)
return o
# normal item is not blank
sub_node = self._decode_to_node(node[not_blank_index])
sub_node_type = self._get_node_type(sub_node)
if is_key_value_type(sub_node_type):
# collape subnode to this node, not this node will have same
# terminator with the new sub node, and value does not change
self._delete_node_storage(sub_node)
new_key = [not_blank_index] + \
unpack_to_nibbles(sub_node[0])
o = [pack_nibbles(new_key), sub_node[1]]
self._encode_node(o)
return o
if sub_node_type == NODE_TYPE_BRANCH:
o = [pack_nibbles([not_blank_index]),
node[not_blank_index]]
self._encode_node(o)
return o
assert False | [
"def",
"_normalize_branch_node",
"(",
"self",
",",
"node",
")",
":",
"# sys.stderr.write('nbn\\n')",
"not_blank_items_count",
"=",
"sum",
"(",
"1",
"for",
"x",
"in",
"range",
"(",
"17",
")",
"if",
"node",
"[",
"x",
"]",
")",
"assert",
"not_blank_items_count",
">=",
"1",
"if",
"not_blank_items_count",
">",
"1",
":",
"self",
".",
"_encode_node",
"(",
"node",
")",
"return",
"node",
"# now only one item is not blank",
"not_blank_index",
"=",
"[",
"i",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"node",
")",
"if",
"item",
"]",
"[",
"0",
"]",
"# the value item is not blank",
"if",
"not_blank_index",
"==",
"16",
":",
"o",
"=",
"[",
"pack_nibbles",
"(",
"with_terminator",
"(",
"[",
"]",
")",
")",
",",
"node",
"[",
"16",
"]",
"]",
"self",
".",
"_encode_node",
"(",
"o",
")",
"return",
"o",
"# normal item is not blank",
"sub_node",
"=",
"self",
".",
"_decode_to_node",
"(",
"node",
"[",
"not_blank_index",
"]",
")",
"sub_node_type",
"=",
"self",
".",
"_get_node_type",
"(",
"sub_node",
")",
"if",
"is_key_value_type",
"(",
"sub_node_type",
")",
":",
"# collape subnode to this node, not this node will have same",
"# terminator with the new sub node, and value does not change",
"self",
".",
"_delete_node_storage",
"(",
"sub_node",
")",
"new_key",
"=",
"[",
"not_blank_index",
"]",
"+",
"unpack_to_nibbles",
"(",
"sub_node",
"[",
"0",
"]",
")",
"o",
"=",
"[",
"pack_nibbles",
"(",
"new_key",
")",
",",
"sub_node",
"[",
"1",
"]",
"]",
"self",
".",
"_encode_node",
"(",
"o",
")",
"return",
"o",
"if",
"sub_node_type",
"==",
"NODE_TYPE_BRANCH",
":",
"o",
"=",
"[",
"pack_nibbles",
"(",
"[",
"not_blank_index",
"]",
")",
",",
"node",
"[",
"not_blank_index",
"]",
"]",
"self",
".",
"_encode_node",
"(",
"o",
")",
"return",
"o",
"assert",
"False"
] | 36.282051 | 14.846154 |
def store_file(self, remote_full_path, local_full_path):
"""
Transfers a local file to the remote location.
If local_full_path_or_buffer is a string path, the file will be read
from that location
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path: full path to the local file
:type local_full_path: str
"""
conn = self.get_conn()
conn.put(local_full_path, remote_full_path) | [
"def",
"store_file",
"(",
"self",
",",
"remote_full_path",
",",
"local_full_path",
")",
":",
"conn",
"=",
"self",
".",
"get_conn",
"(",
")",
"conn",
".",
"put",
"(",
"local_full_path",
",",
"remote_full_path",
")"
] | 42 | 12.666667 |
def monitor(i):
"""Given an iterator, yields data from it
but prints progress every 10,000 records"""
count = 0
for x in i:
count+=1
if count % 10000 == 0:
logger.info("%d records so far, current record is %s",
count, x["idx"])
yield x | [
"def",
"monitor",
"(",
"i",
")",
":",
"count",
"=",
"0",
"for",
"x",
"in",
"i",
":",
"count",
"+=",
"1",
"if",
"count",
"%",
"10000",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"\"%d records so far, current record is %s\"",
",",
"count",
",",
"x",
"[",
"\"idx\"",
"]",
")",
"yield",
"x"
] | 29.4 | 17 |
def call_for_each_tower(
towers, func, devices=None, use_vs=None):
"""
Run `func` on all GPUs (towers) and return the results.
Args:
towers (list[int]): a list of GPU id.
func: a lambda to be called inside each tower
devices: a list of devices to be used. By default will use '/gpu:{tower}'
use_vs (list[bool]): list of use_vs to passed to TowerContext
Returns:
List of outputs of ``func``, evaluated on each tower.
"""
ret = []
if devices is not None:
assert len(devices) == len(towers)
if use_vs is not None:
assert len(use_vs) == len(towers)
tower_names = ['tower{}'.format(idx) for idx in range(len(towers))]
for idx, t in enumerate(towers):
device = devices[idx] if devices is not None else '/gpu:{}'.format(t)
usevs = use_vs[idx] if use_vs is not None else False
reuse = not usevs and idx > 0
with tfv1.device(device), _maybe_reuse_vs(reuse), TrainTowerContext(
tower_names[idx],
vs_name=tower_names[idx] if usevs else '',
index=idx, total=len(towers)):
if len(str(device)) < 10: # a device function doesn't have good string description
logger.info("Building graph for training tower {} on device {} ...".format(idx, device))
else:
logger.info("Building graph for training tower {} ...".format(idx))
# When use_vs is True, use LOCAL_VARIABLES,
# so these duplicated variables won't be saved by default.
with override_to_local_variable(enable=usevs):
ret.append(func())
return ret | [
"def",
"call_for_each_tower",
"(",
"towers",
",",
"func",
",",
"devices",
"=",
"None",
",",
"use_vs",
"=",
"None",
")",
":",
"ret",
"=",
"[",
"]",
"if",
"devices",
"is",
"not",
"None",
":",
"assert",
"len",
"(",
"devices",
")",
"==",
"len",
"(",
"towers",
")",
"if",
"use_vs",
"is",
"not",
"None",
":",
"assert",
"len",
"(",
"use_vs",
")",
"==",
"len",
"(",
"towers",
")",
"tower_names",
"=",
"[",
"'tower{}'",
".",
"format",
"(",
"idx",
")",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"towers",
")",
")",
"]",
"for",
"idx",
",",
"t",
"in",
"enumerate",
"(",
"towers",
")",
":",
"device",
"=",
"devices",
"[",
"idx",
"]",
"if",
"devices",
"is",
"not",
"None",
"else",
"'/gpu:{}'",
".",
"format",
"(",
"t",
")",
"usevs",
"=",
"use_vs",
"[",
"idx",
"]",
"if",
"use_vs",
"is",
"not",
"None",
"else",
"False",
"reuse",
"=",
"not",
"usevs",
"and",
"idx",
">",
"0",
"with",
"tfv1",
".",
"device",
"(",
"device",
")",
",",
"_maybe_reuse_vs",
"(",
"reuse",
")",
",",
"TrainTowerContext",
"(",
"tower_names",
"[",
"idx",
"]",
",",
"vs_name",
"=",
"tower_names",
"[",
"idx",
"]",
"if",
"usevs",
"else",
"''",
",",
"index",
"=",
"idx",
",",
"total",
"=",
"len",
"(",
"towers",
")",
")",
":",
"if",
"len",
"(",
"str",
"(",
"device",
")",
")",
"<",
"10",
":",
"# a device function doesn't have good string description",
"logger",
".",
"info",
"(",
"\"Building graph for training tower {} on device {} ...\"",
".",
"format",
"(",
"idx",
",",
"device",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Building graph for training tower {} ...\"",
".",
"format",
"(",
"idx",
")",
")",
"# When use_vs is True, use LOCAL_VARIABLES,",
"# so these duplicated variables won't be saved by default.",
"with",
"override_to_local_variable",
"(",
"enable",
"=",
"usevs",
")",
":",
"ret",
".",
"append",
"(",
"func",
"(",
")",
")",
"return",
"ret"
] | 43.536585 | 24.268293 |
def decay_indexpointer(self):
'''
This private method provides decay indexpointers which allow to
instantaneously decay an abundance vector. These are attributes
are.
Parameters
=================
decay_idp : list
points in the iso_to_plot (i.e. the undecayed abundance
vector index space) to the decay target.
idp_to_stables_in_isostoplot : list
points to the stable isotopes in the undecayed abundance
vector index space.
Notes
-----
For an application example see ppn.py-abu_vector-_getcycle.
'''
a_iso_to_plot =self.a_iso_to_plot
isotope_to_plot =self.isotope_to_plot
z_iso_to_plot =self.z_iso_to_plot
el_iso_to_plot =self.el_iso_to_plot
abunds =self.abunds
isom =self.isom
z_db, a_db, el_db, stable_a_db,logic_db,charge_from_element_name=\
self._read_isotopedatabase()
# find out which species beta+ and which beta- decay:
beta=np.sign(stable_a_db-a_db) # if a species is unstable and if beta < 0 => beta- decay
# else beta > 0 => beta+ decay
# now we need an index array on the scale of the abundance
# distribution to be plotted that points to itself for stable species,
# and to the stable element to which it decays in case of an unstable
# species
decay_index_pointer=np.zeros(len(isotope_to_plot), dtype='int')-1
idp_to_stables_in_isostoplot=[]
for i in range(len(isotope_to_plot)):
element_name=isotope_to_plot[i].split('-')[0]
try:
stable_a=stable_a_db[np.where(el_db==element_name)][0] # 4th column for that element in isotopedatabase.txt
except IndexError:
print("Can't find element "+element_name+" in isotopedatabase.txt")
if a_iso_to_plot[i] <= 209 and stable_a <=209: # Bi209 is last stable element
stable_mass_numbers=self.stable_el[self.stable_names.index(element_name)][1:]
iso_db_index_range_el=np.where(el_db==element_name)
beta_for_this_species=\
int(beta[iso_db_index_range_el][np.where(a_db[iso_db_index_range_el]==a_iso_to_plot[i])])
if beta_for_this_species == 0: # if there are no stable species for an element (Tc,Pm) the cutoff specifies
beta_for_this_species = -1 # the lowest mass beta- isotope
if a_iso_to_plot[i] in stable_mass_numbers:
# print isotope_to_plot[i]+" is stable"
decay_index_pointer[i]=i
idp_to_stables_in_isostoplot.append(i)
elif a_iso_to_plot[i]==8: # Be8 -> He4
decay_index_pointer[i]=isotope_to_plot.index('He-4')
else: # beta decay
found_decay_target=False
i_search=-1*beta_for_this_species
while not found_decay_target:
try:
try_target_el=self.stable_names[charge_from_element_name[element_name]+i_search]
except TypeError:
print("Maybe information about species "+isotope_to_plot[i]+" is not available in isotopedatabase.txt")
decay_index_pointer[i]=-1
break
# print try_target_el
try:
stable_mass_numbers=self.stable_el[self.stable_names.index(try_target_el)][1:]
except ValueError:
print("Can not find decay target for "+isotope_to_plot[i])
if a_iso_to_plot[i] in stable_mass_numbers:
ind_range=np.where(np.array(el_iso_to_plot)==try_target_el)[0]
if a_iso_to_plot[i] in np.array(a_iso_to_plot)[ind_range]:
this_ind=\
ind_range[np.where(np.array(a_iso_to_plot)[ind_range]==a_iso_to_plot[i])[0]]
# print isotope_to_plot[i]+" is unstable and decays to "+isotope_to_plot[this_ind]
decay_index_pointer[i]=this_ind
else:
print("It seems unstable species "+isotope_to_plot[i]+" wants to decay to " \
+try_target_el+"-"+str(a_iso_to_plot[i])+", however this species is not in this run." \
+" This points to an inconsistency in the network build. Here we will ignore the abundance of " \
+isotope_to_plot[i]+'.')
decay_index_pointer[i]=-1
found_decay_target=True
else:
i_search += -1*beta_for_this_species
if self.debug:
print("Decay rules:")
for i in range(len(isotope_to_plot)):
if decay_index_pointer[i]>= 0:
print(isotope_to_plot[i]+" -> "+isotope_to_plot[decay_index_pointer[i]])
ind_tmp=idp_to_stables_in_isostoplot
#ind_tmp=utils.strictly_monotonic(decay_index_pointer) # this would do the same, but the method above is more straight forward
self.decay_idp=decay_index_pointer
self.idp_to_stables_in_isostoplot=ind_tmp | [
"def",
"decay_indexpointer",
"(",
"self",
")",
":",
"a_iso_to_plot",
"=",
"self",
".",
"a_iso_to_plot",
"isotope_to_plot",
"=",
"self",
".",
"isotope_to_plot",
"z_iso_to_plot",
"=",
"self",
".",
"z_iso_to_plot",
"el_iso_to_plot",
"=",
"self",
".",
"el_iso_to_plot",
"abunds",
"=",
"self",
".",
"abunds",
"isom",
"=",
"self",
".",
"isom",
"z_db",
",",
"a_db",
",",
"el_db",
",",
"stable_a_db",
",",
"logic_db",
",",
"charge_from_element_name",
"=",
"self",
".",
"_read_isotopedatabase",
"(",
")",
"# find out which species beta+ and which beta- decay:",
"beta",
"=",
"np",
".",
"sign",
"(",
"stable_a_db",
"-",
"a_db",
")",
"# if a species is unstable and if beta < 0 => beta- decay",
"# else beta > 0 => beta+ decay",
"# now we need an index array on the scale of the abundance",
"# distribution to be plotted that points to itself for stable species,",
"# and to the stable element to which it decays in case of an unstable",
"# species",
"decay_index_pointer",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"isotope_to_plot",
")",
",",
"dtype",
"=",
"'int'",
")",
"-",
"1",
"idp_to_stables_in_isostoplot",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"isotope_to_plot",
")",
")",
":",
"element_name",
"=",
"isotope_to_plot",
"[",
"i",
"]",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
"try",
":",
"stable_a",
"=",
"stable_a_db",
"[",
"np",
".",
"where",
"(",
"el_db",
"==",
"element_name",
")",
"]",
"[",
"0",
"]",
"# 4th column for that element in isotopedatabase.txt",
"except",
"IndexError",
":",
"print",
"(",
"\"Can't find element \"",
"+",
"element_name",
"+",
"\" in isotopedatabase.txt\"",
")",
"if",
"a_iso_to_plot",
"[",
"i",
"]",
"<=",
"209",
"and",
"stable_a",
"<=",
"209",
":",
"# Bi209 is last stable element",
"stable_mass_numbers",
"=",
"self",
".",
"stable_el",
"[",
"self",
".",
"stable_names",
".",
"index",
"(",
"element_name",
")",
"]",
"[",
"1",
":",
"]",
"iso_db_index_range_el",
"=",
"np",
".",
"where",
"(",
"el_db",
"==",
"element_name",
")",
"beta_for_this_species",
"=",
"int",
"(",
"beta",
"[",
"iso_db_index_range_el",
"]",
"[",
"np",
".",
"where",
"(",
"a_db",
"[",
"iso_db_index_range_el",
"]",
"==",
"a_iso_to_plot",
"[",
"i",
"]",
")",
"]",
")",
"if",
"beta_for_this_species",
"==",
"0",
":",
"# if there are no stable species for an element (Tc,Pm) the cutoff specifies",
"beta_for_this_species",
"=",
"-",
"1",
"# the lowest mass beta- isotope",
"if",
"a_iso_to_plot",
"[",
"i",
"]",
"in",
"stable_mass_numbers",
":",
"# print isotope_to_plot[i]+\" is stable\"",
"decay_index_pointer",
"[",
"i",
"]",
"=",
"i",
"idp_to_stables_in_isostoplot",
".",
"append",
"(",
"i",
")",
"elif",
"a_iso_to_plot",
"[",
"i",
"]",
"==",
"8",
":",
"# Be8 -> He4",
"decay_index_pointer",
"[",
"i",
"]",
"=",
"isotope_to_plot",
".",
"index",
"(",
"'He-4'",
")",
"else",
":",
"# beta decay",
"found_decay_target",
"=",
"False",
"i_search",
"=",
"-",
"1",
"*",
"beta_for_this_species",
"while",
"not",
"found_decay_target",
":",
"try",
":",
"try_target_el",
"=",
"self",
".",
"stable_names",
"[",
"charge_from_element_name",
"[",
"element_name",
"]",
"+",
"i_search",
"]",
"except",
"TypeError",
":",
"print",
"(",
"\"Maybe information about species \"",
"+",
"isotope_to_plot",
"[",
"i",
"]",
"+",
"\" is not available in isotopedatabase.txt\"",
")",
"decay_index_pointer",
"[",
"i",
"]",
"=",
"-",
"1",
"break",
"# print try_target_el",
"try",
":",
"stable_mass_numbers",
"=",
"self",
".",
"stable_el",
"[",
"self",
".",
"stable_names",
".",
"index",
"(",
"try_target_el",
")",
"]",
"[",
"1",
":",
"]",
"except",
"ValueError",
":",
"print",
"(",
"\"Can not find decay target for \"",
"+",
"isotope_to_plot",
"[",
"i",
"]",
")",
"if",
"a_iso_to_plot",
"[",
"i",
"]",
"in",
"stable_mass_numbers",
":",
"ind_range",
"=",
"np",
".",
"where",
"(",
"np",
".",
"array",
"(",
"el_iso_to_plot",
")",
"==",
"try_target_el",
")",
"[",
"0",
"]",
"if",
"a_iso_to_plot",
"[",
"i",
"]",
"in",
"np",
".",
"array",
"(",
"a_iso_to_plot",
")",
"[",
"ind_range",
"]",
":",
"this_ind",
"=",
"ind_range",
"[",
"np",
".",
"where",
"(",
"np",
".",
"array",
"(",
"a_iso_to_plot",
")",
"[",
"ind_range",
"]",
"==",
"a_iso_to_plot",
"[",
"i",
"]",
")",
"[",
"0",
"]",
"]",
"# print isotope_to_plot[i]+\" is unstable and decays to \"+isotope_to_plot[this_ind]",
"decay_index_pointer",
"[",
"i",
"]",
"=",
"this_ind",
"else",
":",
"print",
"(",
"\"It seems unstable species \"",
"+",
"isotope_to_plot",
"[",
"i",
"]",
"+",
"\" wants to decay to \"",
"+",
"try_target_el",
"+",
"\"-\"",
"+",
"str",
"(",
"a_iso_to_plot",
"[",
"i",
"]",
")",
"+",
"\", however this species is not in this run.\"",
"+",
"\" This points to an inconsistency in the network build. Here we will ignore the abundance of \"",
"+",
"isotope_to_plot",
"[",
"i",
"]",
"+",
"'.'",
")",
"decay_index_pointer",
"[",
"i",
"]",
"=",
"-",
"1",
"found_decay_target",
"=",
"True",
"else",
":",
"i_search",
"+=",
"-",
"1",
"*",
"beta_for_this_species",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"\"Decay rules:\"",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"isotope_to_plot",
")",
")",
":",
"if",
"decay_index_pointer",
"[",
"i",
"]",
">=",
"0",
":",
"print",
"(",
"isotope_to_plot",
"[",
"i",
"]",
"+",
"\" -> \"",
"+",
"isotope_to_plot",
"[",
"decay_index_pointer",
"[",
"i",
"]",
"]",
")",
"ind_tmp",
"=",
"idp_to_stables_in_isostoplot",
"#ind_tmp=utils.strictly_monotonic(decay_index_pointer) # this would do the same, but the method above is more straight forward",
"self",
".",
"decay_idp",
"=",
"decay_index_pointer",
"self",
".",
"idp_to_stables_in_isostoplot",
"=",
"ind_tmp"
] | 54.75 | 27.83 |
def set_l2cap_options (sock, options):
"""set_l2cap_options (sock, options)
Sets L2CAP options for the specified L2CAP socket.
The option list must be in the same format supplied by
get_l2cap_options().
"""
# TODO this should be in the C module, because it depends
# directly on struct l2cap_options layout.
s = struct.pack ("HHHBBBH", *options)
sock.setsockopt (SOL_L2CAP, L2CAP_OPTIONS, s) | [
"def",
"set_l2cap_options",
"(",
"sock",
",",
"options",
")",
":",
"# TODO this should be in the C module, because it depends",
"# directly on struct l2cap_options layout.",
"s",
"=",
"struct",
".",
"pack",
"(",
"\"HHHBBBH\"",
",",
"*",
"options",
")",
"sock",
".",
"setsockopt",
"(",
"SOL_L2CAP",
",",
"L2CAP_OPTIONS",
",",
"s",
")"
] | 38 | 11.545455 |
def _verify_jws(self, payload, key):
"""Verify the given JWS payload with the given key and return the payload"""
jws = JWS.from_compact(payload)
try:
alg = jws.signature.combined.alg.name
except KeyError:
msg = 'No alg value found in header'
raise SuspiciousOperation(msg)
if alg != self.OIDC_RP_SIGN_ALGO:
msg = "The provider algorithm {!r} does not match the client's " \
"OIDC_RP_SIGN_ALGO.".format(alg)
raise SuspiciousOperation(msg)
if isinstance(key, six.string_types):
# Use smart_bytes here since the key string comes from settings.
jwk = JWK.load(smart_bytes(key))
else:
# The key is a json returned from the IDP JWKS endpoint.
jwk = JWK.from_json(key)
if not jws.verify(jwk):
msg = 'JWS token verification failed.'
raise SuspiciousOperation(msg)
return jws.payload | [
"def",
"_verify_jws",
"(",
"self",
",",
"payload",
",",
"key",
")",
":",
"jws",
"=",
"JWS",
".",
"from_compact",
"(",
"payload",
")",
"try",
":",
"alg",
"=",
"jws",
".",
"signature",
".",
"combined",
".",
"alg",
".",
"name",
"except",
"KeyError",
":",
"msg",
"=",
"'No alg value found in header'",
"raise",
"SuspiciousOperation",
"(",
"msg",
")",
"if",
"alg",
"!=",
"self",
".",
"OIDC_RP_SIGN_ALGO",
":",
"msg",
"=",
"\"The provider algorithm {!r} does not match the client's \"",
"\"OIDC_RP_SIGN_ALGO.\"",
".",
"format",
"(",
"alg",
")",
"raise",
"SuspiciousOperation",
"(",
"msg",
")",
"if",
"isinstance",
"(",
"key",
",",
"six",
".",
"string_types",
")",
":",
"# Use smart_bytes here since the key string comes from settings.",
"jwk",
"=",
"JWK",
".",
"load",
"(",
"smart_bytes",
"(",
"key",
")",
")",
"else",
":",
"# The key is a json returned from the IDP JWKS endpoint.",
"jwk",
"=",
"JWK",
".",
"from_json",
"(",
"key",
")",
"if",
"not",
"jws",
".",
"verify",
"(",
"jwk",
")",
":",
"msg",
"=",
"'JWS token verification failed.'",
"raise",
"SuspiciousOperation",
"(",
"msg",
")",
"return",
"jws",
".",
"payload"
] | 36.148148 | 16.962963 |
def path_fraction_point(points, fraction):
'''Computes the point which corresponds to the fraction
of the path length along the piecewise linear curve which
is constructed from the set of points.
Args:
points: an iterable of indexable objects with indices
0, 1, 2 correspoding to 3D cartesian coordinates
fraction: path length fraction (0 <= fraction <= 1)
Returns:
The 3D coordinates of the aforementioned point
'''
seg_id, offset = path_fraction_id_offset(points, fraction, relative_offset=True)
return linear_interpolate(points[seg_id], points[seg_id + 1], offset) | [
"def",
"path_fraction_point",
"(",
"points",
",",
"fraction",
")",
":",
"seg_id",
",",
"offset",
"=",
"path_fraction_id_offset",
"(",
"points",
",",
"fraction",
",",
"relative_offset",
"=",
"True",
")",
"return",
"linear_interpolate",
"(",
"points",
"[",
"seg_id",
"]",
",",
"points",
"[",
"seg_id",
"+",
"1",
"]",
",",
"offset",
")"
] | 41.266667 | 24.2 |
def get_bbox(self, primitive):
"""Get the bounding box for the mesh"""
accessor = primitive.attributes.get('POSITION')
return accessor.min, accessor.max | [
"def",
"get_bbox",
"(",
"self",
",",
"primitive",
")",
":",
"accessor",
"=",
"primitive",
".",
"attributes",
".",
"get",
"(",
"'POSITION'",
")",
"return",
"accessor",
".",
"min",
",",
"accessor",
".",
"max"
] | 43.25 | 6.5 |
def hessian(pars, x, y):
"""
Create a hessian matrix corresponding to the source model 'pars'
Only parameters that vary will contribute to the hessian.
Thus there will be a total of nvar x nvar entries, each of which is a
len(x) x len(y) array.
Parameters
----------
pars : lmfit.Parameters
The model
x, y : list
locations at which to evaluate the Hessian
Returns
-------
h : np.array
Hessian. Shape will be (nvar, nvar, len(x), len(y))
See Also
--------
:func:`AegeanTools.fitting.emp_hessian`
"""
j = 0 # keeping track of the number of variable parameters
# total number of variable parameters
ntvar = np.sum([pars[k].vary for k in pars.keys() if k != 'components'])
# construct an empty matrix of the correct size
hmat = np.zeros((ntvar, ntvar, x.shape[0], x.shape[1]))
npvar = 0
for i in range(pars['components'].value):
prefix = "c{0}_".format(i)
amp = pars[prefix + 'amp'].value
xo = pars[prefix + 'xo'].value
yo = pars[prefix + 'yo'].value
sx = pars[prefix + 'sx'].value
sy = pars[prefix + 'sy'].value
theta = pars[prefix + 'theta'].value
amp_var = pars[prefix + 'amp'].vary
xo_var = pars[prefix + 'xo'].vary
yo_var = pars[prefix + 'yo'].vary
sx_var = pars[prefix + 'sx'].vary
sy_var = pars[prefix + 'sy'].vary
theta_var = pars[prefix + 'theta'].vary
# precomputed for speed
model = elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta)
sint = np.sin(np.radians(theta))
sin2t = np.sin(np.radians(2*theta))
cost = np.cos(np.radians(theta))
cos2t = np.cos(np.radians(2*theta))
sx2 = sx**2
sy2 = sy**2
xxo = x-xo
yyo = y-yo
xcos, ycos = xxo*cost, yyo*cost
xsin, ysin = xxo*sint, yyo*sint
if amp_var:
k = npvar # second round of keeping track of variable params
# H(amp,amp)/G = 0
hmat[j][k] = 0
k += 1
if xo_var:
# H(amp,xo)/G = 1.0*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t))/(amp*sx**2*sy**2)
hmat[j][k] = (xsin - ycos)*sint/sy2 + (xcos + ysin)*cost/sx2
hmat[j][k] *= model
k += 1
if yo_var:
# H(amp,yo)/G = 1.0*(-sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t))/(amp*sx**2*sy**2)
hmat[j][k] = -(xsin - ycos)*cost/sy2 + (xcos + ysin)*sint/sx2
hmat[j][k] *= model/amp
k += 1
if sx_var:
# H(amp,sx)/G = 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))**2/(amp*sx**3)
hmat[j][k] = (xcos + ysin)**2
hmat[j][k] *= model/(amp*sx**3)
k += 1
if sy_var:
# H(amp,sy) = 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2/(amp*sy**3)
hmat[j][k] = (xsin - ycos)**2
hmat[j][k] *= model/(amp*sy**3)
k += 1
if theta_var:
# H(amp,t) = (-1.0*sx**2 + sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))/(amp*sx**2*sy**2)
hmat[j][k] = (xsin - ycos)*(xcos + ysin)
hmat[j][k] *= sy2-sx2
hmat[j][k] *= model/(amp*sx2*sy2)
# k += 1
j += 1
if xo_var:
k = npvar
if amp_var:
# H(xo,amp)/G = H(amp,xo)
hmat[j][k] = hmat[k][j]
k += 1
# if xo_var:
# H(xo,xo)/G = 1.0*(-sx**2*sy**2*(sx**2*sin(t)**2 + sy**2*cos(t)**2) + (sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t))**2)/(sx**4*sy**4)
hmat[j][k] = -sx2*sy2*(sx2*sint**2 + sy2*cost**2)
hmat[j][k] += (sx2*(xsin - ycos)*sint + sy2*(xcos + ysin)*cost)**2
hmat[j][k] *= model/ (sx2**2*sy2**2)
k += 1
if yo_var:
# H(xo,yo)/G = 1.0*(sx**2*sy**2*(sx**2 - sy**2)*sin(2*t)/2 - (sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**4*sy**4)
hmat[j][k] = sx2*sy2*(sx2 - sy2)*sin2t/2
hmat[j][k] -= (sx2*(xsin - ycos)*sint + sy2*(xcos + ysin)*cost)*(sx2*(xsin -ycos)*cost - sy2*(xcos + ysin)*sint)
hmat[j][k] *= model / (sx**4*sy**4)
k += 1
if sx_var:
# H(xo,sx) = ((x - xo)*cos(t) + (y - yo)*sin(t))*(-2.0*sx**2*sy**2*cos(t) + 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t)))/(sx**5*sy**2)
hmat[j][k] = (xcos + ysin)
hmat[j][k] *= -2*sx2*sy2*cost + (xcos + ysin)*(sx2*(xsin - ycos)*sint + sy2*(xcos + ysin)*cost)
hmat[j][k] *= model / (sx**5*sy2)
k += 1
if sy_var:
# H(xo,sy) = ((x - xo)*sin(t) + (-y + yo)*cos(t))*(-2.0*sx**2*sy**2*sin(t) + 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t)))/(sx2*sy**5)
hmat[j][k] = (xsin - ycos)
hmat[j][k] *= -2*sx2*sy2*sint + (xsin - ycos)*(sx2*(xsin - ycos)*sint + sy2*(xcos + ysin)*cost)
hmat[j][k] *= model/(sx2*sy**5)
k += 1
if theta_var:
# H(xo,t) = 1.0*(sx**2*sy**2*(sx**2 - sy**2)*(x*sin(2*t) - xo*sin(2*t) - y*cos(2*t) + yo*cos(2*t)) + (-sx**2 + 1.0*sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t)))/(sx**4*sy**4)
# second part
hmat[j][k] = (sy2-sx2)*(xsin - ycos)*(xcos + ysin)
hmat[j][k] *= sx2*(xsin -ycos)*sint + sy2*(xcos + ysin)*cost
# first part
hmat[j][k] += sx2*sy2*(sx2 - sy2)*(xxo*sin2t -yyo*cos2t)
hmat[j][k] *= model/(sx**4*sy**4)
# k += 1
j += 1
if yo_var:
k = npvar
if amp_var:
# H(yo,amp)/G = H(amp,yo)
hmat[j][k] = hmat[0][2]
k += 1
if xo_var:
# H(yo,xo)/G = H(xo,yo)/G
hmat[j][k] =hmat[1][2]
k += 1
# if yo_var:
# H(yo,yo)/G = 1.0*(-sx**2*sy**2*(sx**2*cos(t)**2 + sy**2*sin(t)**2) + (sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t))**2)/(sx**4*sy**4)
hmat[j][k] = (sx2*(xsin - ycos)*cost - sy2*(xcos + ysin)*sint)**2 / (sx2**2*sy2**2)
hmat[j][k] -= cost**2/sy2 + sint**2/sx2
hmat[j][k] *= model
k += 1
if sx_var:
# H(yo,sx)/G = -((x - xo)*cos(t) + (y - yo)*sin(t))*(2.0*sx**2*sy**2*sin(t) + 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) - (y - yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**5*sy**2)
hmat[j][k] = -1*(xcos + ysin)
hmat[j][k] *= 2*sx2*sy2*sint + (xcos + ysin)*(sx2*(xsin - ycos)*cost - sy2*(xcos + ysin)*sint)
hmat[j][k] *= model/(sx**5*sy2)
k += 1
if sy_var:
# H(yo,sy)/G = ((x - xo)*sin(t) + (-y + yo)*cos(t))*(2.0*sx**2*sy**2*cos(t) - 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**2*sy**5)
hmat[j][k] = (xsin -ycos)
hmat[j][k] *= 2*sx2*sy2*cost - (xsin - ycos)*(sx2*(xsin - ycos)*cost - sy2*(xcos + ysin)*sint)
hmat[j][k] *= model/(sx2*sy**5)
k += 1
if theta_var:
# H(yo,t)/G = 1.0*(sx**2*sy**2*(sx**2*(-x*cos(2*t) + xo*cos(2*t) - y*sin(2*t) + yo*sin(2*t)) + sy**2*(x*cos(2*t) - xo*cos(2*t) + y*sin(2*t) - yo*sin(2*t))) + (1.0*sx**2 - sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**4*sy**4)
hmat[j][k] = (sx2 - sy2)*(xsin - ycos)*(xcos + ysin)
hmat[j][k] *= (sx2*(xsin - ycos)*cost - sy2*(xcos + ysin)*sint)
hmat[j][k] += sx2*sy2*(sx2-sy2)*(-x*cos2t + xo*cos2t - y*sin2t + yo*sin2t)
hmat[j][k] *= model/(sx**4*sy**4)
# k += 1
j += 1
if sx_var:
k = npvar
if amp_var:
# H(sx,amp)/G = H(amp,sx)/G
hmat[j][k] = hmat[k][j]
k += 1
if xo_var:
# H(sx,xo)/G = H(xo,sx)/G
hmat[j][k] = hmat[k][j]
k += 1
if yo_var:
# H(sx,yo)/G = H(yo/sx)/G
hmat[j][k] = hmat[k][j]
k += 1
# if sx_var:
# H(sx,sx)/G = (-3.0*sx**2 + 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))**2)*((x - xo)*cos(t) + (y - yo)*sin(t))**2/sx**6
hmat[j][k] = -3*sx2 + (xcos + ysin)**2
hmat[j][k] *= (xcos + ysin)**2
hmat[j][k] *= model/sx**6
k += 1
if sy_var:
# H(sx,sy)/G = 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2*((x - xo)*cos(t) + (y - yo)*sin(t))**2/(sx**3*sy**3)
hmat[j][k] = (xsin - ycos)**2 * (xcos + ysin)**2
hmat[j][k] *= model/(sx**3*sy**3)
k += 1
if theta_var:
# H(sx,t)/G = (-2.0*sx**2*sy**2 + 1.0*(-sx**2 + sy**2)*((x - xo)*cos(t) + (y - yo)*sin(t))**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))/(sx**5*sy**2)
hmat[j][k] = -2*sx2*sy2 + (sy2 - sx2)*(xcos + ysin)**2
hmat[j][k] *= (xsin -ycos)*(xcos + ysin)
hmat[j][k] *= model/(sx**5*sy**2)
# k += 1
j += 1
if sy_var:
k = npvar
if amp_var:
# H(sy,amp)/G = H(amp,sy)/G
hmat[j][k] = hmat[k][j]
k += 1
if xo_var:
# H(sy,xo)/G = H(xo,sy)/G
hmat[j][k] = hmat[k][j]
k += 1
if yo_var:
# H(sy,yo)/G = H(yo/sy)/G
hmat[j][k] = hmat[k][j]
k += 1
if sx_var:
# H(sy,sx)/G = H(sx,sy)/G
hmat[j][k] = hmat[k][j]
k += 1
# if sy_var:
# H(sy,sy)/G = (-3.0*sy**2 + 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))**2/sy**6
hmat[j][k] = -3*sy2 + (xsin - ycos)**2
hmat[j][k] *= (xsin - ycos)**2
hmat[j][k] *= model/sy**6
k += 1
if theta_var:
# H(sy,t)/G = (2.0*sx**2*sy**2 + 1.0*(-sx**2 + sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))/(sx**2*sy**5)
hmat[j][k] = 2*sx2*sy2 + (sy2 - sx2)*(xsin - ycos)**2
hmat[j][k] *= (xsin - ycos)*(xcos + ysin)
hmat[j][k] *= model/(sx**2*sy**5)
# k += 1
j += 1
if theta_var:
k = npvar
if amp_var:
# H(t,amp)/G = H(amp,t)/G
hmat[j][k] = hmat[k][j]
k += 1
if xo_var:
# H(t,xo)/G = H(xo,t)/G
hmat[j][k] = hmat[k][j]
k += 1
if yo_var:
# H(t,yo)/G = H(yo/t)/G
hmat[j][k] = hmat[k][j]
k += 1
if sx_var:
# H(t,sx)/G = H(sx,t)/G
hmat[j][k] = hmat[k][j]
k += 1
if sy_var:
# H(t,sy)/G = H(sy,t)/G
hmat[j][k] = hmat[k][j]
k += 1
# if theta_var:
# H(t,t)/G = (sx**2*sy**2*(sx**2*(((x - xo)*sin(t) + (-y + yo)*cos(t))**2 - 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))**2) + sy**2*(-1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2 + ((x - xo)*cos(t) + (y - yo)*sin(t))**2)) + (sx**2 - 1.0*sy**2)**2*((x - xo)*sin(t) + (-y + yo)*cos(t))**2*((x - xo)*cos(t) + (y - yo)*sin(t))**2)/(sx**4*sy**4)
hmat[j][k] = sx2*sy2
hmat[j][k] *= sx2*((xsin - ycos)**2 - (xcos + ysin)**2) + sy2*((xcos + ysin)**2 - (xsin - ycos)**2)
hmat[j][k] += (sx2 - sy2)**2*(xsin - ycos)**2*(xcos + ysin)**2
hmat[j][k] *= model/(sx**4*sy**4)
# j += 1
# save the number of variables for the next iteration
# as we need to start our indexing at this number
npvar = k
return np.array(hmat) | [
"def",
"hessian",
"(",
"pars",
",",
"x",
",",
"y",
")",
":",
"j",
"=",
"0",
"# keeping track of the number of variable parameters",
"# total number of variable parameters",
"ntvar",
"=",
"np",
".",
"sum",
"(",
"[",
"pars",
"[",
"k",
"]",
".",
"vary",
"for",
"k",
"in",
"pars",
".",
"keys",
"(",
")",
"if",
"k",
"!=",
"'components'",
"]",
")",
"# construct an empty matrix of the correct size",
"hmat",
"=",
"np",
".",
"zeros",
"(",
"(",
"ntvar",
",",
"ntvar",
",",
"x",
".",
"shape",
"[",
"0",
"]",
",",
"x",
".",
"shape",
"[",
"1",
"]",
")",
")",
"npvar",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"pars",
"[",
"'components'",
"]",
".",
"value",
")",
":",
"prefix",
"=",
"\"c{0}_\"",
".",
"format",
"(",
"i",
")",
"amp",
"=",
"pars",
"[",
"prefix",
"+",
"'amp'",
"]",
".",
"value",
"xo",
"=",
"pars",
"[",
"prefix",
"+",
"'xo'",
"]",
".",
"value",
"yo",
"=",
"pars",
"[",
"prefix",
"+",
"'yo'",
"]",
".",
"value",
"sx",
"=",
"pars",
"[",
"prefix",
"+",
"'sx'",
"]",
".",
"value",
"sy",
"=",
"pars",
"[",
"prefix",
"+",
"'sy'",
"]",
".",
"value",
"theta",
"=",
"pars",
"[",
"prefix",
"+",
"'theta'",
"]",
".",
"value",
"amp_var",
"=",
"pars",
"[",
"prefix",
"+",
"'amp'",
"]",
".",
"vary",
"xo_var",
"=",
"pars",
"[",
"prefix",
"+",
"'xo'",
"]",
".",
"vary",
"yo_var",
"=",
"pars",
"[",
"prefix",
"+",
"'yo'",
"]",
".",
"vary",
"sx_var",
"=",
"pars",
"[",
"prefix",
"+",
"'sx'",
"]",
".",
"vary",
"sy_var",
"=",
"pars",
"[",
"prefix",
"+",
"'sy'",
"]",
".",
"vary",
"theta_var",
"=",
"pars",
"[",
"prefix",
"+",
"'theta'",
"]",
".",
"vary",
"# precomputed for speed",
"model",
"=",
"elliptical_gaussian",
"(",
"x",
",",
"y",
",",
"amp",
",",
"xo",
",",
"yo",
",",
"sx",
",",
"sy",
",",
"theta",
")",
"sint",
"=",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"theta",
")",
")",
"sin2t",
"=",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"2",
"*",
"theta",
")",
")",
"cost",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"theta",
")",
")",
"cos2t",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"2",
"*",
"theta",
")",
")",
"sx2",
"=",
"sx",
"**",
"2",
"sy2",
"=",
"sy",
"**",
"2",
"xxo",
"=",
"x",
"-",
"xo",
"yyo",
"=",
"y",
"-",
"yo",
"xcos",
",",
"ycos",
"=",
"xxo",
"*",
"cost",
",",
"yyo",
"*",
"cost",
"xsin",
",",
"ysin",
"=",
"xxo",
"*",
"sint",
",",
"yyo",
"*",
"sint",
"if",
"amp_var",
":",
"k",
"=",
"npvar",
"# second round of keeping track of variable params",
"# H(amp,amp)/G = 0",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"0",
"k",
"+=",
"1",
"if",
"xo_var",
":",
"# H(amp,xo)/G = 1.0*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t))/(amp*sx**2*sy**2)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"sint",
"/",
"sy2",
"+",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"cost",
"/",
"sx2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"k",
"+=",
"1",
"if",
"yo_var",
":",
"# H(amp,yo)/G = 1.0*(-sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t))/(amp*sx**2*sy**2)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"-",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"cost",
"/",
"sy2",
"+",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"sint",
"/",
"sx2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"amp",
"k",
"+=",
"1",
"if",
"sx_var",
":",
"# H(amp,sx)/G = 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))**2/(amp*sx**3)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"(",
"xcos",
"+",
"ysin",
")",
"**",
"2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"amp",
"*",
"sx",
"**",
"3",
")",
"k",
"+=",
"1",
"if",
"sy_var",
":",
"# H(amp,sy) = 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2/(amp*sy**3)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"(",
"xsin",
"-",
"ycos",
")",
"**",
"2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"amp",
"*",
"sy",
"**",
"3",
")",
"k",
"+=",
"1",
"if",
"theta_var",
":",
"# H(amp,t) = (-1.0*sx**2 + sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))/(amp*sx**2*sy**2)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"sy2",
"-",
"sx2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"amp",
"*",
"sx2",
"*",
"sy2",
")",
"# k += 1",
"j",
"+=",
"1",
"if",
"xo_var",
":",
"k",
"=",
"npvar",
"if",
"amp_var",
":",
"# H(xo,amp)/G = H(amp,xo)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"# if xo_var:",
"# H(xo,xo)/G = 1.0*(-sx**2*sy**2*(sx**2*sin(t)**2 + sy**2*cos(t)**2) + (sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t))**2)/(sx**4*sy**4)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"-",
"sx2",
"*",
"sy2",
"*",
"(",
"sx2",
"*",
"sint",
"**",
"2",
"+",
"sy2",
"*",
"cost",
"**",
"2",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"+=",
"(",
"sx2",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"sint",
"+",
"sy2",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"cost",
")",
"**",
"2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"sx2",
"**",
"2",
"*",
"sy2",
"**",
"2",
")",
"k",
"+=",
"1",
"if",
"yo_var",
":",
"# H(xo,yo)/G = 1.0*(sx**2*sy**2*(sx**2 - sy**2)*sin(2*t)/2 - (sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**4*sy**4)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"sx2",
"*",
"sy2",
"*",
"(",
"sx2",
"-",
"sy2",
")",
"*",
"sin2t",
"/",
"2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"-=",
"(",
"sx2",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"sint",
"+",
"sy2",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"cost",
")",
"*",
"(",
"sx2",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"cost",
"-",
"sy2",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"sint",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"sx",
"**",
"4",
"*",
"sy",
"**",
"4",
")",
"k",
"+=",
"1",
"if",
"sx_var",
":",
"# H(xo,sx) = ((x - xo)*cos(t) + (y - yo)*sin(t))*(-2.0*sx**2*sy**2*cos(t) + 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t)))/(sx**5*sy**2)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"(",
"xcos",
"+",
"ysin",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"-",
"2",
"*",
"sx2",
"*",
"sy2",
"*",
"cost",
"+",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"(",
"sx2",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"sint",
"+",
"sy2",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"cost",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"sx",
"**",
"5",
"*",
"sy2",
")",
"k",
"+=",
"1",
"if",
"sy_var",
":",
"# H(xo,sy) = ((x - xo)*sin(t) + (-y + yo)*cos(t))*(-2.0*sx**2*sy**2*sin(t) + 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t)))/(sx2*sy**5)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"(",
"xsin",
"-",
"ycos",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"-",
"2",
"*",
"sx2",
"*",
"sy2",
"*",
"sint",
"+",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"(",
"sx2",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"sint",
"+",
"sy2",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"cost",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"sx2",
"*",
"sy",
"**",
"5",
")",
"k",
"+=",
"1",
"if",
"theta_var",
":",
"# H(xo,t) = 1.0*(sx**2*sy**2*(sx**2 - sy**2)*(x*sin(2*t) - xo*sin(2*t) - y*cos(2*t) + yo*cos(2*t)) + (-sx**2 + 1.0*sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t)))/(sx**4*sy**4)",
"# second part",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"(",
"sy2",
"-",
"sx2",
")",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"sx2",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"sint",
"+",
"sy2",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"cost",
"# first part",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"+=",
"sx2",
"*",
"sy2",
"*",
"(",
"sx2",
"-",
"sy2",
")",
"*",
"(",
"xxo",
"*",
"sin2t",
"-",
"yyo",
"*",
"cos2t",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"sx",
"**",
"4",
"*",
"sy",
"**",
"4",
")",
"# k += 1",
"j",
"+=",
"1",
"if",
"yo_var",
":",
"k",
"=",
"npvar",
"if",
"amp_var",
":",
"# H(yo,amp)/G = H(amp,yo)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"0",
"]",
"[",
"2",
"]",
"k",
"+=",
"1",
"if",
"xo_var",
":",
"# H(yo,xo)/G = H(xo,yo)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"1",
"]",
"[",
"2",
"]",
"k",
"+=",
"1",
"# if yo_var:",
"# H(yo,yo)/G = 1.0*(-sx**2*sy**2*(sx**2*cos(t)**2 + sy**2*sin(t)**2) + (sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t))**2)/(sx**4*sy**4)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"(",
"sx2",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"cost",
"-",
"sy2",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"sint",
")",
"**",
"2",
"/",
"(",
"sx2",
"**",
"2",
"*",
"sy2",
"**",
"2",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"-=",
"cost",
"**",
"2",
"/",
"sy2",
"+",
"sint",
"**",
"2",
"/",
"sx2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"k",
"+=",
"1",
"if",
"sx_var",
":",
"# H(yo,sx)/G = -((x - xo)*cos(t) + (y - yo)*sin(t))*(2.0*sx**2*sy**2*sin(t) + 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) - (y - yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**5*sy**2)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"-",
"1",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"2",
"*",
"sx2",
"*",
"sy2",
"*",
"sint",
"+",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"(",
"sx2",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"cost",
"-",
"sy2",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"sint",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"sx",
"**",
"5",
"*",
"sy2",
")",
"k",
"+=",
"1",
"if",
"sy_var",
":",
"# H(yo,sy)/G = ((x - xo)*sin(t) + (-y + yo)*cos(t))*(2.0*sx**2*sy**2*cos(t) - 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**2*sy**5)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"(",
"xsin",
"-",
"ycos",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"2",
"*",
"sx2",
"*",
"sy2",
"*",
"cost",
"-",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"(",
"sx2",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"cost",
"-",
"sy2",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"sint",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"sx2",
"*",
"sy",
"**",
"5",
")",
"k",
"+=",
"1",
"if",
"theta_var",
":",
"# H(yo,t)/G = 1.0*(sx**2*sy**2*(sx**2*(-x*cos(2*t) + xo*cos(2*t) - y*sin(2*t) + yo*sin(2*t)) + sy**2*(x*cos(2*t) - xo*cos(2*t) + y*sin(2*t) - yo*sin(2*t))) + (1.0*sx**2 - sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**4*sy**4)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"(",
"sx2",
"-",
"sy2",
")",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"(",
"sx2",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"cost",
"-",
"sy2",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"*",
"sint",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"+=",
"sx2",
"*",
"sy2",
"*",
"(",
"sx2",
"-",
"sy2",
")",
"*",
"(",
"-",
"x",
"*",
"cos2t",
"+",
"xo",
"*",
"cos2t",
"-",
"y",
"*",
"sin2t",
"+",
"yo",
"*",
"sin2t",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"sx",
"**",
"4",
"*",
"sy",
"**",
"4",
")",
"# k += 1",
"j",
"+=",
"1",
"if",
"sx_var",
":",
"k",
"=",
"npvar",
"if",
"amp_var",
":",
"# H(sx,amp)/G = H(amp,sx)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"if",
"xo_var",
":",
"# H(sx,xo)/G = H(xo,sx)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"if",
"yo_var",
":",
"# H(sx,yo)/G = H(yo/sx)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"# if sx_var:",
"# H(sx,sx)/G = (-3.0*sx**2 + 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))**2)*((x - xo)*cos(t) + (y - yo)*sin(t))**2/sx**6",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"-",
"3",
"*",
"sx2",
"+",
"(",
"xcos",
"+",
"ysin",
")",
"**",
"2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"(",
"xcos",
"+",
"ysin",
")",
"**",
"2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"sx",
"**",
"6",
"k",
"+=",
"1",
"if",
"sy_var",
":",
"# H(sx,sy)/G = 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2*((x - xo)*cos(t) + (y - yo)*sin(t))**2/(sx**3*sy**3)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"(",
"xsin",
"-",
"ycos",
")",
"**",
"2",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"**",
"2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"sx",
"**",
"3",
"*",
"sy",
"**",
"3",
")",
"k",
"+=",
"1",
"if",
"theta_var",
":",
"# H(sx,t)/G = (-2.0*sx**2*sy**2 + 1.0*(-sx**2 + sy**2)*((x - xo)*cos(t) + (y - yo)*sin(t))**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))/(sx**5*sy**2)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"-",
"2",
"*",
"sx2",
"*",
"sy2",
"+",
"(",
"sy2",
"-",
"sx2",
")",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"**",
"2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"sx",
"**",
"5",
"*",
"sy",
"**",
"2",
")",
"# k += 1",
"j",
"+=",
"1",
"if",
"sy_var",
":",
"k",
"=",
"npvar",
"if",
"amp_var",
":",
"# H(sy,amp)/G = H(amp,sy)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"if",
"xo_var",
":",
"# H(sy,xo)/G = H(xo,sy)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"if",
"yo_var",
":",
"# H(sy,yo)/G = H(yo/sy)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"if",
"sx_var",
":",
"# H(sy,sx)/G = H(sx,sy)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"# if sy_var:",
"# H(sy,sy)/G = (-3.0*sy**2 + 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))**2/sy**6",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"-",
"3",
"*",
"sy2",
"+",
"(",
"xsin",
"-",
"ycos",
")",
"**",
"2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"(",
"xsin",
"-",
"ycos",
")",
"**",
"2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"sy",
"**",
"6",
"k",
"+=",
"1",
"if",
"theta_var",
":",
"# H(sy,t)/G = (2.0*sx**2*sy**2 + 1.0*(-sx**2 + sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))/(sx**2*sy**5)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"2",
"*",
"sx2",
"*",
"sy2",
"+",
"(",
"sy2",
"-",
"sx2",
")",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"**",
"2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"(",
"xsin",
"-",
"ycos",
")",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"sx",
"**",
"2",
"*",
"sy",
"**",
"5",
")",
"# k += 1",
"j",
"+=",
"1",
"if",
"theta_var",
":",
"k",
"=",
"npvar",
"if",
"amp_var",
":",
"# H(t,amp)/G = H(amp,t)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"if",
"xo_var",
":",
"# H(t,xo)/G = H(xo,t)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"if",
"yo_var",
":",
"# H(t,yo)/G = H(yo/t)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"if",
"sx_var",
":",
"# H(t,sx)/G = H(sx,t)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"if",
"sy_var",
":",
"# H(t,sy)/G = H(sy,t)/G",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"hmat",
"[",
"k",
"]",
"[",
"j",
"]",
"k",
"+=",
"1",
"# if theta_var:",
"# H(t,t)/G = (sx**2*sy**2*(sx**2*(((x - xo)*sin(t) + (-y + yo)*cos(t))**2 - 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))**2) + sy**2*(-1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2 + ((x - xo)*cos(t) + (y - yo)*sin(t))**2)) + (sx**2 - 1.0*sy**2)**2*((x - xo)*sin(t) + (-y + yo)*cos(t))**2*((x - xo)*cos(t) + (y - yo)*sin(t))**2)/(sx**4*sy**4)",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"sx2",
"*",
"sy2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"sx2",
"*",
"(",
"(",
"xsin",
"-",
"ycos",
")",
"**",
"2",
"-",
"(",
"xcos",
"+",
"ysin",
")",
"**",
"2",
")",
"+",
"sy2",
"*",
"(",
"(",
"xcos",
"+",
"ysin",
")",
"**",
"2",
"-",
"(",
"xsin",
"-",
"ycos",
")",
"**",
"2",
")",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"+=",
"(",
"sx2",
"-",
"sy2",
")",
"**",
"2",
"*",
"(",
"xsin",
"-",
"ycos",
")",
"**",
"2",
"*",
"(",
"xcos",
"+",
"ysin",
")",
"**",
"2",
"hmat",
"[",
"j",
"]",
"[",
"k",
"]",
"*=",
"model",
"/",
"(",
"sx",
"**",
"4",
"*",
"sy",
"**",
"4",
")",
"# j += 1",
"# save the number of variables for the next iteration",
"# as we need to start our indexing at this number",
"npvar",
"=",
"k",
"return",
"np",
".",
"array",
"(",
"hmat",
")"
] | 44.539519 | 29.130584 |
def VxLANTunnelState_originator_switch_info_switchIpV4Address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
VxLANTunnelState = ET.SubElement(config, "VxLANTunnelState", xmlns="http://brocade.com/ns/brocade-notification-stream")
originator_switch_info = ET.SubElement(VxLANTunnelState, "originator-switch-info")
switchIpV4Address = ET.SubElement(originator_switch_info, "switchIpV4Address")
switchIpV4Address.text = kwargs.pop('switchIpV4Address')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"VxLANTunnelState_originator_switch_info_switchIpV4Address",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"VxLANTunnelState",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"VxLANTunnelState\"",
",",
"xmlns",
"=",
"\"http://brocade.com/ns/brocade-notification-stream\"",
")",
"originator_switch_info",
"=",
"ET",
".",
"SubElement",
"(",
"VxLANTunnelState",
",",
"\"originator-switch-info\"",
")",
"switchIpV4Address",
"=",
"ET",
".",
"SubElement",
"(",
"originator_switch_info",
",",
"\"switchIpV4Address\"",
")",
"switchIpV4Address",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'switchIpV4Address'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 55.545455 | 28.545455 |
def logger(filter='WARN'):
"""Set up CASA to write log messages to standard output.
filter
The log level filter: less urgent messages will not be shown. Valid values
are strings: "DEBUG1", "INFO5", ... "INFO1", "INFO", "WARN", "SEVERE".
This function creates and returns a CASA ”log sink” object that is
configured to write to standard output. The default CASA implementation
would *always* create a file named ``casapy.log`` in the current
directory; this function safely prevents such a file from being left
around. This is particularly important if you don’t have write permissions
to the current directory.
"""
import os, shutil, tempfile
cwd = os.getcwd()
tempdir = None
try:
tempdir = tempfile.mkdtemp(prefix='casautil')
try:
os.chdir(tempdir)
sink = tools.logsink()
sink.setlogfile(sanitize_unicode(os.devnull))
try:
os.unlink('casapy.log')
except OSError as e:
if e.errno != 2:
raise
# otherwise, it's a ENOENT, in which case, no worries.
finally:
os.chdir(cwd)
finally:
if tempdir is not None:
shutil.rmtree(tempdir, onerror=_rmtree_error)
sink.showconsole(True)
sink.setglobal(True)
sink.filter(sanitize_unicode(filter.upper()))
return sink | [
"def",
"logger",
"(",
"filter",
"=",
"'WARN'",
")",
":",
"import",
"os",
",",
"shutil",
",",
"tempfile",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"tempdir",
"=",
"None",
"try",
":",
"tempdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"prefix",
"=",
"'casautil'",
")",
"try",
":",
"os",
".",
"chdir",
"(",
"tempdir",
")",
"sink",
"=",
"tools",
".",
"logsink",
"(",
")",
"sink",
".",
"setlogfile",
"(",
"sanitize_unicode",
"(",
"os",
".",
"devnull",
")",
")",
"try",
":",
"os",
".",
"unlink",
"(",
"'casapy.log'",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"2",
":",
"raise",
"# otherwise, it's a ENOENT, in which case, no worries.",
"finally",
":",
"os",
".",
"chdir",
"(",
"cwd",
")",
"finally",
":",
"if",
"tempdir",
"is",
"not",
"None",
":",
"shutil",
".",
"rmtree",
"(",
"tempdir",
",",
"onerror",
"=",
"_rmtree_error",
")",
"sink",
".",
"showconsole",
"(",
"True",
")",
"sink",
".",
"setglobal",
"(",
"True",
")",
"sink",
".",
"filter",
"(",
"sanitize_unicode",
"(",
"filter",
".",
"upper",
"(",
")",
")",
")",
"return",
"sink"
] | 32.162791 | 22.651163 |
def asdict(
inst,
recurse=True,
filter=None,
dict_factory=dict,
retain_collection_types=False,
):
"""
Return the ``attrs`` attribute values of *inst* as a dict.
Optionally recurse into other ``attrs``-decorated classes.
:param inst: Instance of an ``attrs``-decorated class.
:param bool recurse: Recurse into classes that are also
``attrs``-decorated.
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the :class:`attr.Attribute` as the first argument and the
value as the second argument.
:param callable dict_factory: A callable to produce dictionaries from. For
example, to produce ordered dictionaries instead of normal Python
dictionaries, pass in ``collections.OrderedDict``.
:param bool retain_collection_types: Do not convert to ``list`` when
encountering an attribute whose type is ``tuple`` or ``set``. Only
meaningful if ``recurse`` is ``True``.
:rtype: return type of *dict_factory*
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 16.0.0 *dict_factory*
.. versionadded:: 16.1.0 *retain_collection_types*
"""
attrs = fields(inst.__class__)
rv = dict_factory()
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if recurse is True:
if has(v.__class__):
rv[a.name] = asdict(
v, True, filter, dict_factory, retain_collection_types
)
elif isinstance(v, (tuple, list, set)):
cf = v.__class__ if retain_collection_types is True else list
rv[a.name] = cf(
[
_asdict_anything(
i, filter, dict_factory, retain_collection_types
)
for i in v
]
)
elif isinstance(v, dict):
df = dict_factory
rv[a.name] = df(
(
_asdict_anything(
kk, filter, df, retain_collection_types
),
_asdict_anything(
vv, filter, df, retain_collection_types
),
)
for kk, vv in iteritems(v)
)
else:
rv[a.name] = v
else:
rv[a.name] = v
return rv | [
"def",
"asdict",
"(",
"inst",
",",
"recurse",
"=",
"True",
",",
"filter",
"=",
"None",
",",
"dict_factory",
"=",
"dict",
",",
"retain_collection_types",
"=",
"False",
",",
")",
":",
"attrs",
"=",
"fields",
"(",
"inst",
".",
"__class__",
")",
"rv",
"=",
"dict_factory",
"(",
")",
"for",
"a",
"in",
"attrs",
":",
"v",
"=",
"getattr",
"(",
"inst",
",",
"a",
".",
"name",
")",
"if",
"filter",
"is",
"not",
"None",
"and",
"not",
"filter",
"(",
"a",
",",
"v",
")",
":",
"continue",
"if",
"recurse",
"is",
"True",
":",
"if",
"has",
"(",
"v",
".",
"__class__",
")",
":",
"rv",
"[",
"a",
".",
"name",
"]",
"=",
"asdict",
"(",
"v",
",",
"True",
",",
"filter",
",",
"dict_factory",
",",
"retain_collection_types",
")",
"elif",
"isinstance",
"(",
"v",
",",
"(",
"tuple",
",",
"list",
",",
"set",
")",
")",
":",
"cf",
"=",
"v",
".",
"__class__",
"if",
"retain_collection_types",
"is",
"True",
"else",
"list",
"rv",
"[",
"a",
".",
"name",
"]",
"=",
"cf",
"(",
"[",
"_asdict_anything",
"(",
"i",
",",
"filter",
",",
"dict_factory",
",",
"retain_collection_types",
")",
"for",
"i",
"in",
"v",
"]",
")",
"elif",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"df",
"=",
"dict_factory",
"rv",
"[",
"a",
".",
"name",
"]",
"=",
"df",
"(",
"(",
"_asdict_anything",
"(",
"kk",
",",
"filter",
",",
"df",
",",
"retain_collection_types",
")",
",",
"_asdict_anything",
"(",
"vv",
",",
"filter",
",",
"df",
",",
"retain_collection_types",
")",
",",
")",
"for",
"kk",
",",
"vv",
"in",
"iteritems",
"(",
"v",
")",
")",
"else",
":",
"rv",
"[",
"a",
".",
"name",
"]",
"=",
"v",
"else",
":",
"rv",
"[",
"a",
".",
"name",
"]",
"=",
"v",
"return",
"rv"
] | 36.082192 | 19.643836 |
def get_term_pillar(filter_name,
term_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the term configuration given its name,
under a certain filter uniquely identified by its name.
filter_name
The name of the filter.
term_name
The name of the term.
pillar_key: ``acl``
The root key of the whole policy config. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_term_pillar'](filter_name,
term_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv) | [
"def",
"get_term_pillar",
"(",
"filter_name",
",",
"term_name",
",",
"pillar_key",
"=",
"'acl'",
",",
"pillarenv",
"=",
"None",
",",
"saltenv",
"=",
"None",
")",
":",
"return",
"__salt__",
"[",
"'capirca.get_term_pillar'",
"]",
"(",
"filter_name",
",",
"term_name",
",",
"pillar_key",
"=",
"pillar_key",
",",
"pillarenv",
"=",
"pillarenv",
",",
"saltenv",
"=",
"saltenv",
")"
] | 34.46875 | 22.15625 |
def get_bundle(self, bundle_id=None):
# type: (Union[Bundle, int]) -> Bundle
"""
Retrieves the :class:`~pelix.framework.Bundle` object for the bundle
matching the given ID (int). If no ID is given (None), the bundle
associated to this context is returned.
:param bundle_id: A bundle ID (optional)
:return: The requested :class:`~pelix.framework.Bundle` object
:raise BundleException: The given ID doesn't exist or is invalid
"""
if bundle_id is None:
# Current bundle
return self.__bundle
elif isinstance(bundle_id, Bundle):
# Got a bundle (compatibility with older install_bundle())
bundle_id = bundle_id.get_bundle_id()
return self.__framework.get_bundle_by_id(bundle_id) | [
"def",
"get_bundle",
"(",
"self",
",",
"bundle_id",
"=",
"None",
")",
":",
"# type: (Union[Bundle, int]) -> Bundle",
"if",
"bundle_id",
"is",
"None",
":",
"# Current bundle",
"return",
"self",
".",
"__bundle",
"elif",
"isinstance",
"(",
"bundle_id",
",",
"Bundle",
")",
":",
"# Got a bundle (compatibility with older install_bundle())",
"bundle_id",
"=",
"bundle_id",
".",
"get_bundle_id",
"(",
")",
"return",
"self",
".",
"__framework",
".",
"get_bundle_by_id",
"(",
"bundle_id",
")"
] | 42.157895 | 17.210526 |
def read_configuration(
filepath, find_others=False, ignore_option_errors=False):
"""Read given configuration file and returns options from it as a dict.
:param str|unicode filepath: Path to configuration file
to get options from.
:param bool find_others: Whether to search for other configuration files
which could be on in various places.
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: dict
"""
from setuptools.dist import Distribution, _Distribution
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise DistutilsFileError(
'Configuration file %s does not exist.' % filepath)
current_directory = os.getcwd()
os.chdir(os.path.dirname(filepath))
try:
dist = Distribution()
filenames = dist.find_config_files() if find_others else []
if filepath not in filenames:
filenames.append(filepath)
_Distribution.parse_config_files(dist, filenames=filenames)
handlers = parse_configuration(
dist, dist.command_options,
ignore_option_errors=ignore_option_errors)
finally:
os.chdir(current_directory)
return configuration_to_dict(handlers) | [
"def",
"read_configuration",
"(",
"filepath",
",",
"find_others",
"=",
"False",
",",
"ignore_option_errors",
"=",
"False",
")",
":",
"from",
"setuptools",
".",
"dist",
"import",
"Distribution",
",",
"_Distribution",
"filepath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"filepath",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
":",
"raise",
"DistutilsFileError",
"(",
"'Configuration file %s does not exist.'",
"%",
"filepath",
")",
"current_directory",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"filepath",
")",
")",
"try",
":",
"dist",
"=",
"Distribution",
"(",
")",
"filenames",
"=",
"dist",
".",
"find_config_files",
"(",
")",
"if",
"find_others",
"else",
"[",
"]",
"if",
"filepath",
"not",
"in",
"filenames",
":",
"filenames",
".",
"append",
"(",
"filepath",
")",
"_Distribution",
".",
"parse_config_files",
"(",
"dist",
",",
"filenames",
"=",
"filenames",
")",
"handlers",
"=",
"parse_configuration",
"(",
"dist",
",",
"dist",
".",
"command_options",
",",
"ignore_option_errors",
"=",
"ignore_option_errors",
")",
"finally",
":",
"os",
".",
"chdir",
"(",
"current_directory",
")",
"return",
"configuration_to_dict",
"(",
"handlers",
")"
] | 31.377778 | 21.244444 |
def _copy_calibration(self, calibration):
"""Copy another ``StereoCalibration`` object's values."""
for key, item in calibration.__dict__.items():
self.__dict__[key] = item | [
"def",
"_copy_calibration",
"(",
"self",
",",
"calibration",
")",
":",
"for",
"key",
",",
"item",
"in",
"calibration",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"self",
".",
"__dict__",
"[",
"key",
"]",
"=",
"item"
] | 49.25 | 4.5 |
def _pre_request(self, url, method = u"get", data = None, headers=None, **kwargs):
"""
hook for manipulating the _pre request data
"""
header = {
u"Content-Type": u"application/json",
u"User-Agent": u"salesking_api_py_v1",
}
if headers:
headers.update(header)
else:
headers = header
if url.find(self.base_url) !=0:
url = u"%s%s" %(self.base_url, url)
return (url, method, data, headers, kwargs) | [
"def",
"_pre_request",
"(",
"self",
",",
"url",
",",
"method",
"=",
"u\"get\"",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"header",
"=",
"{",
"u\"Content-Type\"",
":",
"u\"application/json\"",
",",
"u\"User-Agent\"",
":",
"u\"salesking_api_py_v1\"",
",",
"}",
"if",
"headers",
":",
"headers",
".",
"update",
"(",
"header",
")",
"else",
":",
"headers",
"=",
"header",
"if",
"url",
".",
"find",
"(",
"self",
".",
"base_url",
")",
"!=",
"0",
":",
"url",
"=",
"u\"%s%s\"",
"%",
"(",
"self",
".",
"base_url",
",",
"url",
")",
"return",
"(",
"url",
",",
"method",
",",
"data",
",",
"headers",
",",
"kwargs",
")"
] | 34.133333 | 14 |
def visit_list(self, node):
"""return an astroid.List node as string"""
return "[%s]" % ", ".join(child.accept(self) for child in node.elts) | [
"def",
"visit_list",
"(",
"self",
",",
"node",
")",
":",
"return",
"\"[%s]\"",
"%",
"\", \"",
".",
"join",
"(",
"child",
".",
"accept",
"(",
"self",
")",
"for",
"child",
"in",
"node",
".",
"elts",
")"
] | 51.333333 | 16.333333 |
def check_encoding_chars(encoding_chars):
"""
Validate the given encoding chars
:type encoding_chars: ``dict``
:param encoding_chars: the encoding chars (see :func:`hl7apy.set_default_encoding_chars`)
:raises: :exc:`hl7apy.exceptions.InvalidEncodingChars` if the given encoding chars are not valid
"""
if not isinstance(encoding_chars, collections.MutableMapping):
raise InvalidEncodingChars
required = {'FIELD', 'COMPONENT', 'SUBCOMPONENT', 'REPETITION', 'ESCAPE'}
missing = required - set(encoding_chars.keys())
if missing:
raise InvalidEncodingChars('Missing required encoding chars')
values = [v for k, v in encoding_chars.items() if k in required]
if len(values) > len(set(values)):
raise InvalidEncodingChars('Found duplicate encoding chars') | [
"def",
"check_encoding_chars",
"(",
"encoding_chars",
")",
":",
"if",
"not",
"isinstance",
"(",
"encoding_chars",
",",
"collections",
".",
"MutableMapping",
")",
":",
"raise",
"InvalidEncodingChars",
"required",
"=",
"{",
"'FIELD'",
",",
"'COMPONENT'",
",",
"'SUBCOMPONENT'",
",",
"'REPETITION'",
",",
"'ESCAPE'",
"}",
"missing",
"=",
"required",
"-",
"set",
"(",
"encoding_chars",
".",
"keys",
"(",
")",
")",
"if",
"missing",
":",
"raise",
"InvalidEncodingChars",
"(",
"'Missing required encoding chars'",
")",
"values",
"=",
"[",
"v",
"for",
"k",
",",
"v",
"in",
"encoding_chars",
".",
"items",
"(",
")",
"if",
"k",
"in",
"required",
"]",
"if",
"len",
"(",
"values",
")",
">",
"len",
"(",
"set",
"(",
"values",
")",
")",
":",
"raise",
"InvalidEncodingChars",
"(",
"'Found duplicate encoding chars'",
")"
] | 44.722222 | 21.944444 |
async def _process_access_form(self, html: str) -> (str, str):
"""
Parsing page with access rights
:param html: html page
:return: url and html from redirected page
"""
# Parse page
p = AccessPageParser()
p.feed(html)
p.close()
form_url = p.url
form_data = dict(p.inputs)
# Send request
url, html = await self.driver.post_text(form_url, form_data)
return url, html | [
"async",
"def",
"_process_access_form",
"(",
"self",
",",
"html",
":",
"str",
")",
"->",
"(",
"str",
",",
"str",
")",
":",
"# Parse page",
"p",
"=",
"AccessPageParser",
"(",
")",
"p",
".",
"feed",
"(",
"html",
")",
"p",
".",
"close",
"(",
")",
"form_url",
"=",
"p",
".",
"url",
"form_data",
"=",
"dict",
"(",
"p",
".",
"inputs",
")",
"# Send request",
"url",
",",
"html",
"=",
"await",
"self",
".",
"driver",
".",
"post_text",
"(",
"form_url",
",",
"form_data",
")",
"return",
"url",
",",
"html"
] | 25.722222 | 17.833333 |
def get_summary_dict(self):
"""
Returns a dict with a summary of the computed properties.
"""
d = defaultdict(list)
d["pressure"] = self.pressure
d["poisson"] = self.poisson
d["mass"] = self.mass
d["natoms"] = int(self.natoms)
d["bulk_modulus"] = self.bulk_modulus
d["gibbs_free_energy"] = self.gibbs_free_energy
d["temperatures"] = self.temperatures
d["optimum_volumes"] = self.optimum_volumes
for v, t in zip(self.optimum_volumes, self.temperatures):
d["debye_temperature"].append(self.debye_temperature(v))
d["gruneisen_parameter"].append(self.gruneisen_parameter(t, v))
d["thermal_conductivity"].append(self.thermal_conductivity(t, v))
return d | [
"def",
"get_summary_dict",
"(",
"self",
")",
":",
"d",
"=",
"defaultdict",
"(",
"list",
")",
"d",
"[",
"\"pressure\"",
"]",
"=",
"self",
".",
"pressure",
"d",
"[",
"\"poisson\"",
"]",
"=",
"self",
".",
"poisson",
"d",
"[",
"\"mass\"",
"]",
"=",
"self",
".",
"mass",
"d",
"[",
"\"natoms\"",
"]",
"=",
"int",
"(",
"self",
".",
"natoms",
")",
"d",
"[",
"\"bulk_modulus\"",
"]",
"=",
"self",
".",
"bulk_modulus",
"d",
"[",
"\"gibbs_free_energy\"",
"]",
"=",
"self",
".",
"gibbs_free_energy",
"d",
"[",
"\"temperatures\"",
"]",
"=",
"self",
".",
"temperatures",
"d",
"[",
"\"optimum_volumes\"",
"]",
"=",
"self",
".",
"optimum_volumes",
"for",
"v",
",",
"t",
"in",
"zip",
"(",
"self",
".",
"optimum_volumes",
",",
"self",
".",
"temperatures",
")",
":",
"d",
"[",
"\"debye_temperature\"",
"]",
".",
"append",
"(",
"self",
".",
"debye_temperature",
"(",
"v",
")",
")",
"d",
"[",
"\"gruneisen_parameter\"",
"]",
".",
"append",
"(",
"self",
".",
"gruneisen_parameter",
"(",
"t",
",",
"v",
")",
")",
"d",
"[",
"\"thermal_conductivity\"",
"]",
".",
"append",
"(",
"self",
".",
"thermal_conductivity",
"(",
"t",
",",
"v",
")",
")",
"return",
"d"
] | 43.277778 | 14.166667 |
def refresh(self, refresh_binary=True):
'''
Performs GET request and refreshes RDF information for resource.
Args:
None
Returns:
None
'''
updated_self = self.repo.get_resource(self.uri)
# if resource type of updated_self != self, raise exception
if not isinstance(self, type(updated_self)):
raise Exception('Instantiated %s, but repository reports this resource is %s' % (type(updated_self), type(self)) )
if updated_self:
# update attributes
self.status_code = updated_self.status_code
self.rdf.data = updated_self.rdf.data
self.headers = updated_self.headers
self.exists = updated_self.exists
# update graph if RDFSource
if type(self) != NonRDFSource:
self._parse_graph()
# empty versions
self.versions = SimpleNamespace()
# if NonRDF, set binary attributes
if type(updated_self) == NonRDFSource and refresh_binary:
self.binary.refresh(updated_self)
# fire resource._post_create hook if exists
if hasattr(self,'_post_refresh'):
self._post_refresh()
# cleanup
del(updated_self)
else:
logger.debug('resource %s not found, dumping values')
self._empty_resource_attributes() | [
"def",
"refresh",
"(",
"self",
",",
"refresh_binary",
"=",
"True",
")",
":",
"updated_self",
"=",
"self",
".",
"repo",
".",
"get_resource",
"(",
"self",
".",
"uri",
")",
"# if resource type of updated_self != self, raise exception",
"if",
"not",
"isinstance",
"(",
"self",
",",
"type",
"(",
"updated_self",
")",
")",
":",
"raise",
"Exception",
"(",
"'Instantiated %s, but repository reports this resource is %s'",
"%",
"(",
"type",
"(",
"updated_self",
")",
",",
"type",
"(",
"self",
")",
")",
")",
"if",
"updated_self",
":",
"# update attributes",
"self",
".",
"status_code",
"=",
"updated_self",
".",
"status_code",
"self",
".",
"rdf",
".",
"data",
"=",
"updated_self",
".",
"rdf",
".",
"data",
"self",
".",
"headers",
"=",
"updated_self",
".",
"headers",
"self",
".",
"exists",
"=",
"updated_self",
".",
"exists",
"# update graph if RDFSource",
"if",
"type",
"(",
"self",
")",
"!=",
"NonRDFSource",
":",
"self",
".",
"_parse_graph",
"(",
")",
"# empty versions",
"self",
".",
"versions",
"=",
"SimpleNamespace",
"(",
")",
"# if NonRDF, set binary attributes",
"if",
"type",
"(",
"updated_self",
")",
"==",
"NonRDFSource",
"and",
"refresh_binary",
":",
"self",
".",
"binary",
".",
"refresh",
"(",
"updated_self",
")",
"# fire resource._post_create hook if exists",
"if",
"hasattr",
"(",
"self",
",",
"'_post_refresh'",
")",
":",
"self",
".",
"_post_refresh",
"(",
")",
"# cleanup",
"del",
"(",
"updated_self",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'resource %s not found, dumping values'",
")",
"self",
".",
"_empty_resource_attributes",
"(",
")"
] | 24.06383 | 23.893617 |
def find_hba(self, all_atoms):
"""Find all possible hydrogen bond acceptors"""
data = namedtuple('hbondacceptor', 'a a_orig_atom a_orig_idx type')
a_set = []
for atom in filter(lambda at: at.OBAtom.IsHbondAcceptor(), all_atoms):
if atom.atomicnum not in [9, 17, 35, 53] and atom.idx not in self.altconf: # Exclude halogen atoms
a_orig_idx = self.Mapper.mapid(atom.idx, mtype=self.mtype, bsid=self.bsid)
a_orig_atom = self.Mapper.id_to_atom(a_orig_idx)
a_set.append(data(a=atom, a_orig_atom=a_orig_atom, a_orig_idx=a_orig_idx, type='regular'))
return a_set | [
"def",
"find_hba",
"(",
"self",
",",
"all_atoms",
")",
":",
"data",
"=",
"namedtuple",
"(",
"'hbondacceptor'",
",",
"'a a_orig_atom a_orig_idx type'",
")",
"a_set",
"=",
"[",
"]",
"for",
"atom",
"in",
"filter",
"(",
"lambda",
"at",
":",
"at",
".",
"OBAtom",
".",
"IsHbondAcceptor",
"(",
")",
",",
"all_atoms",
")",
":",
"if",
"atom",
".",
"atomicnum",
"not",
"in",
"[",
"9",
",",
"17",
",",
"35",
",",
"53",
"]",
"and",
"atom",
".",
"idx",
"not",
"in",
"self",
".",
"altconf",
":",
"# Exclude halogen atoms",
"a_orig_idx",
"=",
"self",
".",
"Mapper",
".",
"mapid",
"(",
"atom",
".",
"idx",
",",
"mtype",
"=",
"self",
".",
"mtype",
",",
"bsid",
"=",
"self",
".",
"bsid",
")",
"a_orig_atom",
"=",
"self",
".",
"Mapper",
".",
"id_to_atom",
"(",
"a_orig_idx",
")",
"a_set",
".",
"append",
"(",
"data",
"(",
"a",
"=",
"atom",
",",
"a_orig_atom",
"=",
"a_orig_atom",
",",
"a_orig_idx",
"=",
"a_orig_idx",
",",
"type",
"=",
"'regular'",
")",
")",
"return",
"a_set"
] | 64.7 | 33.6 |
def get_db_filename(impl, working_dir):
"""
Get the absolute path to the last-block file.
"""
db_filename = impl.get_virtual_chain_name() + ".db"
return os.path.join(working_dir, db_filename) | [
"def",
"get_db_filename",
"(",
"impl",
",",
"working_dir",
")",
":",
"db_filename",
"=",
"impl",
".",
"get_virtual_chain_name",
"(",
")",
"+",
"\".db\"",
"return",
"os",
".",
"path",
".",
"join",
"(",
"working_dir",
",",
"db_filename",
")"
] | 34.333333 | 5.666667 |
def separate_scalar_factor(element):
"""Construct a monomial with the coefficient separated
from an element in a polynomial.
"""
coeff = 1.0
monomial = S.One
if isinstance(element, (int, float, complex)):
coeff *= element
return monomial, coeff
for var in element.as_coeff_mul()[1]:
if not (var.is_Number or var.is_imaginary):
monomial = monomial * var
else:
if var.is_Number:
coeff = float(var)
# If not, then it is imaginary
else:
coeff = 1j * coeff
coeff = float(element.as_coeff_mul()[0]) * coeff
return monomial, coeff | [
"def",
"separate_scalar_factor",
"(",
"element",
")",
":",
"coeff",
"=",
"1.0",
"monomial",
"=",
"S",
".",
"One",
"if",
"isinstance",
"(",
"element",
",",
"(",
"int",
",",
"float",
",",
"complex",
")",
")",
":",
"coeff",
"*=",
"element",
"return",
"monomial",
",",
"coeff",
"for",
"var",
"in",
"element",
".",
"as_coeff_mul",
"(",
")",
"[",
"1",
"]",
":",
"if",
"not",
"(",
"var",
".",
"is_Number",
"or",
"var",
".",
"is_imaginary",
")",
":",
"monomial",
"=",
"monomial",
"*",
"var",
"else",
":",
"if",
"var",
".",
"is_Number",
":",
"coeff",
"=",
"float",
"(",
"var",
")",
"# If not, then it is imaginary",
"else",
":",
"coeff",
"=",
"1j",
"*",
"coeff",
"coeff",
"=",
"float",
"(",
"element",
".",
"as_coeff_mul",
"(",
")",
"[",
"0",
"]",
")",
"*",
"coeff",
"return",
"monomial",
",",
"coeff"
] | 32.6 | 10.25 |
def authenticator(function, challenges=()):
"""Wraps authentication logic, verify_user through to the authentication function.
The verify_user function passed in should accept an API key and return a user object to
store in the request context if authentication succeeded.
"""
challenges = challenges or ('{} realm="simple"'.format(function.__name__), )
def wrapper(verify_user):
def authenticate(request, response, **kwargs):
result = function(request, response, verify_user, **kwargs)
def authenticator_name():
try:
return function.__doc__.splitlines()[0]
except AttributeError:
return function.__name__
if result is None:
raise HTTPUnauthorized('Authentication Required',
'Please provide valid {0} credentials'.format(authenticator_name()),
challenges=challenges)
if result is False:
raise HTTPUnauthorized('Invalid Authentication',
'Provided {0} credentials were invalid'.format(authenticator_name()),
challenges=challenges)
request.context['user'] = result
return True
authenticate.__doc__ = function.__doc__
return authenticate
return wrapper | [
"def",
"authenticator",
"(",
"function",
",",
"challenges",
"=",
"(",
")",
")",
":",
"challenges",
"=",
"challenges",
"or",
"(",
"'{} realm=\"simple\"'",
".",
"format",
"(",
"function",
".",
"__name__",
")",
",",
")",
"def",
"wrapper",
"(",
"verify_user",
")",
":",
"def",
"authenticate",
"(",
"request",
",",
"response",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"function",
"(",
"request",
",",
"response",
",",
"verify_user",
",",
"*",
"*",
"kwargs",
")",
"def",
"authenticator_name",
"(",
")",
":",
"try",
":",
"return",
"function",
".",
"__doc__",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
"except",
"AttributeError",
":",
"return",
"function",
".",
"__name__",
"if",
"result",
"is",
"None",
":",
"raise",
"HTTPUnauthorized",
"(",
"'Authentication Required'",
",",
"'Please provide valid {0} credentials'",
".",
"format",
"(",
"authenticator_name",
"(",
")",
")",
",",
"challenges",
"=",
"challenges",
")",
"if",
"result",
"is",
"False",
":",
"raise",
"HTTPUnauthorized",
"(",
"'Invalid Authentication'",
",",
"'Provided {0} credentials were invalid'",
".",
"format",
"(",
"authenticator_name",
"(",
")",
")",
",",
"challenges",
"=",
"challenges",
")",
"request",
".",
"context",
"[",
"'user'",
"]",
"=",
"result",
"return",
"True",
"authenticate",
".",
"__doc__",
"=",
"function",
".",
"__doc__",
"return",
"authenticate",
"return",
"wrapper"
] | 40.171429 | 24.2 |
def eval(self, key, default=None, loc=None, correct_key=True):
"""Evaluates and sets the specified option value in
environment `loc`. Many options need ``N`` to be defined in
`loc`, some need `popsize`.
Details
-------
Keys that contain 'filename' are not evaluated.
For `loc` is None, the self-dict is used as environment
:See: `evalall()`, `__call__`
"""
# TODO: try: loc['dim'] = loc['N'] etc
if correct_key:
# in_key = key # for debugging only
key = self.corrected_key(key)
self[key] = self(key, default, loc)
return self[key] | [
"def",
"eval",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
",",
"loc",
"=",
"None",
",",
"correct_key",
"=",
"True",
")",
":",
"# TODO: try: loc['dim'] = loc['N'] etc",
"if",
"correct_key",
":",
"# in_key = key # for debugging only",
"key",
"=",
"self",
".",
"corrected_key",
"(",
"key",
")",
"self",
"[",
"key",
"]",
"=",
"self",
"(",
"key",
",",
"default",
",",
"loc",
")",
"return",
"self",
"[",
"key",
"]"
] | 33.894737 | 16.631579 |
def compact_view(self, merged_data, selected_meta, reference_no):
"""
Creates and returns the compact view where the index of the dataframe is a multi index of the selected metadata.
Side effect: Alters the merged_data parameter
:param merged_data: The merged data that is to be used to create the compact view
:param selected_meta: The selected metadata to create the multi index
:param reference_no: The reference number that the metadata are going to be taken
:return: Returns the multi-indexed dataframe w.r.t. the selected metadata
"""
meta_names = list(selected_meta)
meta_index = []
for x in meta_names:
meta_index.append(self.all_meta_data.ix[merged_data.index.get_level_values(reference_no)][x].values)
meta_index = np.asarray(meta_index)
multi_meta_index = pd.MultiIndex.from_arrays(meta_index, names=meta_names)
merged_data.index = multi_meta_index
return merged_data | [
"def",
"compact_view",
"(",
"self",
",",
"merged_data",
",",
"selected_meta",
",",
"reference_no",
")",
":",
"meta_names",
"=",
"list",
"(",
"selected_meta",
")",
"meta_index",
"=",
"[",
"]",
"for",
"x",
"in",
"meta_names",
":",
"meta_index",
".",
"append",
"(",
"self",
".",
"all_meta_data",
".",
"ix",
"[",
"merged_data",
".",
"index",
".",
"get_level_values",
"(",
"reference_no",
")",
"]",
"[",
"x",
"]",
".",
"values",
")",
"meta_index",
"=",
"np",
".",
"asarray",
"(",
"meta_index",
")",
"multi_meta_index",
"=",
"pd",
".",
"MultiIndex",
".",
"from_arrays",
"(",
"meta_index",
",",
"names",
"=",
"meta_names",
")",
"merged_data",
".",
"index",
"=",
"multi_meta_index",
"return",
"merged_data"
] | 49.7 | 28.9 |
def find(self, *args, **kwargs):
"""Find and return the files collection documents that match ``filter``.
Returns a cursor that iterates across files matching
arbitrary queries on the files collection. Can be combined
with other modifiers for additional control.
For example::
cursor = bucket.find({"filename": "lisa.txt"}, no_cursor_timeout=True)
while (yield cursor.fetch_next):
grid_out = cursor.next_object()
data = yield grid_out.read()
This iterates through all versions of "lisa.txt" stored in GridFS.
Note that setting no_cursor_timeout to True may be important to
prevent the cursor from timing out during long multi-file processing
work.
As another example, the call::
most_recent_three = fs.find().sort("uploadDate", -1).limit(3)
would return a cursor to the three most recently uploaded files
in GridFS.
Follows a similar interface to
:meth:`~motor.MotorCollection.find`
in :class:`~motor.MotorCollection`.
:Parameters:
- `filter`: Search query.
- `batch_size` (optional): The number of documents to return per
batch.
- `limit` (optional): The maximum number of documents to return.
- `no_cursor_timeout` (optional): The server normally times out idle
cursors after an inactivity period (10 minutes) to prevent excess
memory use. Set this option to True prevent that.
- `skip` (optional): The number of documents to skip before
returning.
- `sort` (optional): The order by which to sort results. Defaults to
None.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
If a :class:`~pymongo.client_session.ClientSession` is passed to
:meth:`find`, all returned :class:`MotorGridOut` instances
are associated with that session.
.. versionchanged:: 1.2
Added session parameter.
"""
cursor = self.delegate.find(*args, **kwargs)
grid_out_cursor = create_class_with_framework(
AgnosticGridOutCursor, self._framework, self.__module__)
return grid_out_cursor(cursor, self.collection) | [
"def",
"find",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cursor",
"=",
"self",
".",
"delegate",
".",
"find",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"grid_out_cursor",
"=",
"create_class_with_framework",
"(",
"AgnosticGridOutCursor",
",",
"self",
".",
"_framework",
",",
"self",
".",
"__module__",
")",
"return",
"grid_out_cursor",
"(",
"cursor",
",",
"self",
".",
"collection",
")"
] | 40.310345 | 23.12069 |
def __IpsToServerIds(self):
""" Get list of mapping of ip address into a server id"""
master_instance = self.__GetMasterInstance()
assert(master_instance)
retval, response = self.__RunMaprCli('node list -columns id')
ip_to_id = {}
for line_num, line in enumerate(response.split('\n')):
tokens = line.split()
if len(tokens) == 3 and tokens[0] != 'id':
instance_id = tokens[0]
ip = tokens[2]
ip_to_id[ip] = instance_id
return ip_to_id | [
"def",
"__IpsToServerIds",
"(",
"self",
")",
":",
"master_instance",
"=",
"self",
".",
"__GetMasterInstance",
"(",
")",
"assert",
"(",
"master_instance",
")",
"retval",
",",
"response",
"=",
"self",
".",
"__RunMaprCli",
"(",
"'node list -columns id'",
")",
"ip_to_id",
"=",
"{",
"}",
"for",
"line_num",
",",
"line",
"in",
"enumerate",
"(",
"response",
".",
"split",
"(",
"'\\n'",
")",
")",
":",
"tokens",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"tokens",
")",
"==",
"3",
"and",
"tokens",
"[",
"0",
"]",
"!=",
"'id'",
":",
"instance_id",
"=",
"tokens",
"[",
"0",
"]",
"ip",
"=",
"tokens",
"[",
"2",
"]",
"ip_to_id",
"[",
"ip",
"]",
"=",
"instance_id",
"return",
"ip_to_id"
] | 37.846154 | 14.076923 |
def _maybe_strip_i18n_prefix_and_normalize(number, possible_idd_prefix):
"""Strips any international prefix (such as +, 00, 011) present in the
number provided, normalizes the resulting number, and indicates if an
international prefix was present.
Arguments:
number -- The non-normalized telephone number that we wish to strip any international
dialing prefix from.
possible_idd_prefix -- The international direct dialing prefix from the region we
think this number may be dialed in.
Returns a 2-tuple containing:
- The corresponding CountryCodeSource if an international dialing prefix
could be removed from the number, otherwise
CountryCodeSource.FROM_DEFAULT_COUNTRY if the number did not seem to
be in international format.
- The number with the prefix stripped.
"""
if len(number) == 0:
return (CountryCodeSource.FROM_DEFAULT_COUNTRY, number)
# Check to see if the number begins with one or more plus signs.
m = _PLUS_CHARS_PATTERN.match(number)
if m:
number = number[m.end():]
# Can now normalize the rest of the number since we've consumed the
# "+" sign at the start.
return (CountryCodeSource.FROM_NUMBER_WITH_PLUS_SIGN,
_normalize(number))
# Attempt to parse the first digits as an international prefix.
idd_pattern = re.compile(possible_idd_prefix)
number = _normalize(number)
stripped, number = _parse_prefix_as_idd(idd_pattern, number)
if stripped:
return (CountryCodeSource.FROM_NUMBER_WITH_IDD, number)
else:
return (CountryCodeSource.FROM_DEFAULT_COUNTRY, number) | [
"def",
"_maybe_strip_i18n_prefix_and_normalize",
"(",
"number",
",",
"possible_idd_prefix",
")",
":",
"if",
"len",
"(",
"number",
")",
"==",
"0",
":",
"return",
"(",
"CountryCodeSource",
".",
"FROM_DEFAULT_COUNTRY",
",",
"number",
")",
"# Check to see if the number begins with one or more plus signs.",
"m",
"=",
"_PLUS_CHARS_PATTERN",
".",
"match",
"(",
"number",
")",
"if",
"m",
":",
"number",
"=",
"number",
"[",
"m",
".",
"end",
"(",
")",
":",
"]",
"# Can now normalize the rest of the number since we've consumed the",
"# \"+\" sign at the start.",
"return",
"(",
"CountryCodeSource",
".",
"FROM_NUMBER_WITH_PLUS_SIGN",
",",
"_normalize",
"(",
"number",
")",
")",
"# Attempt to parse the first digits as an international prefix.",
"idd_pattern",
"=",
"re",
".",
"compile",
"(",
"possible_idd_prefix",
")",
"number",
"=",
"_normalize",
"(",
"number",
")",
"stripped",
",",
"number",
"=",
"_parse_prefix_as_idd",
"(",
"idd_pattern",
",",
"number",
")",
"if",
"stripped",
":",
"return",
"(",
"CountryCodeSource",
".",
"FROM_NUMBER_WITH_IDD",
",",
"number",
")",
"else",
":",
"return",
"(",
"CountryCodeSource",
".",
"FROM_DEFAULT_COUNTRY",
",",
"number",
")"
] | 44.702703 | 20.783784 |
def get_suite_token(self, suite_id, suite_secret, suite_ticket):
"""
获取第三方应用凭证
https://work.weixin.qq.com/api/doc#90001/90143/9060
:param suite_id: 以ww或wx开头应用id(对应于旧的以tj开头的套件id)
:param suite_secret: 应用secret
:param suite_ticket: 企业微信后台推送的ticket
:return: 返回的 JSON 数据包
"""
return self._post(
'service/get_suite_token',
data={
'suite_id': suite_id,
'suite_secret': suite_secret,
'suite_ticket': suite_ticket
}
) | [
"def",
"get_suite_token",
"(",
"self",
",",
"suite_id",
",",
"suite_secret",
",",
"suite_ticket",
")",
":",
"return",
"self",
".",
"_post",
"(",
"'service/get_suite_token'",
",",
"data",
"=",
"{",
"'suite_id'",
":",
"suite_id",
",",
"'suite_secret'",
":",
"suite_secret",
",",
"'suite_ticket'",
":",
"suite_ticket",
"}",
")"
] | 29.315789 | 15 |
def vertical_gradient(self, x0, y0, x1, y1, start, end):
"""Draw a vertical gradient"""
x0, y0, x1, y1 = self.rect_helper(x0, y0, x1, y1)
grad = gradient_list(start, end, y1 - y0)
for x in range(x0, x1 + 1):
for y in range(y0, y1 + 1):
self.point(x, y, grad[y - y0]) | [
"def",
"vertical_gradient",
"(",
"self",
",",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
",",
"start",
",",
"end",
")",
":",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
"=",
"self",
".",
"rect_helper",
"(",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
")",
"grad",
"=",
"gradient_list",
"(",
"start",
",",
"end",
",",
"y1",
"-",
"y0",
")",
"for",
"x",
"in",
"range",
"(",
"x0",
",",
"x1",
"+",
"1",
")",
":",
"for",
"y",
"in",
"range",
"(",
"y0",
",",
"y1",
"+",
"1",
")",
":",
"self",
".",
"point",
"(",
"x",
",",
"y",
",",
"grad",
"[",
"y",
"-",
"y0",
"]",
")"
] | 45.714286 | 7.714286 |
def preserve_view(*predicates):
""" Raising ViewNotMatched when applied request was not apposite.
preserve_view calls all Predicates and when return values of them was
all True it will call a wrapped view.
It raises ViewNotMatched if this is not the case.
Predicates:
This decorator takes Predicates one or more, Predicate is callable
to return True or False in response to inputted request.
If the request was apposite it should return True.
"""
def wrapper(view_callable):
def _wrapped(self, request, context, *args, **kwargs):
if all([predicate(request, context) for predicate in predicates]):
return view_callable(self, request, context, *args, **kwargs)
else:
raise ViewNotMatched
return _wrapped
return wrapper | [
"def",
"preserve_view",
"(",
"*",
"predicates",
")",
":",
"def",
"wrapper",
"(",
"view_callable",
")",
":",
"def",
"_wrapped",
"(",
"self",
",",
"request",
",",
"context",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"all",
"(",
"[",
"predicate",
"(",
"request",
",",
"context",
")",
"for",
"predicate",
"in",
"predicates",
"]",
")",
":",
"return",
"view_callable",
"(",
"self",
",",
"request",
",",
"context",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"ViewNotMatched",
"return",
"_wrapped",
"return",
"wrapper"
] | 40.75 | 19.85 |
def get_absolute_url(self):
"""Determine where I am coming from and where I am going"""
# Determine if this configuration is on a stage
if self.stage:
# Stage specific configurations go back to the stage view
url = reverse('projects_stage_view', args=(self.project.pk, self.stage.pk))
else:
# Project specific configurations go back to the project page
url = self.project.get_absolute_url()
return url | [
"def",
"get_absolute_url",
"(",
"self",
")",
":",
"# Determine if this configuration is on a stage",
"if",
"self",
".",
"stage",
":",
"# Stage specific configurations go back to the stage view",
"url",
"=",
"reverse",
"(",
"'projects_stage_view'",
",",
"args",
"=",
"(",
"self",
".",
"project",
".",
"pk",
",",
"self",
".",
"stage",
".",
"pk",
")",
")",
"else",
":",
"# Project specific configurations go back to the project page",
"url",
"=",
"self",
".",
"project",
".",
"get_absolute_url",
"(",
")",
"return",
"url"
] | 40 | 24.416667 |
def groups_to_display(self, value):
"""An array containing the unsubscribe groups that you would like to be
displayed on the unsubscribe preferences page.
:param value: An array containing the unsubscribe groups that you
would like to be displayed on the unsubscribe
preferences page.
:type value: array(int)
"""
if value is not None and len(value) > 25:
raise ValueError("New groups_to_display exceeds max length of 25.")
self._groups_to_display = value | [
"def",
"groups_to_display",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
"and",
"len",
"(",
"value",
")",
">",
"25",
":",
"raise",
"ValueError",
"(",
"\"New groups_to_display exceeds max length of 25.\"",
")",
"self",
".",
"_groups_to_display",
"=",
"value"
] | 46.333333 | 14.833333 |
def _init_ssh(self):
""" Configure SSH client options
"""
self.ssh_host = self.config.get('ssh_host', self.hostname)
self.known_hosts = self.config.get('ssh_knownhosts_file',
self.tensor.config.get('ssh_knownhosts_file', None))
self.ssh_keyfile = self.config.get('ssh_keyfile',
self.tensor.config.get('ssh_keyfile', None))
self.ssh_key = self.config.get('ssh_key',
self.tensor.config.get('ssh_key', None))
# Not sure why you'd bother but maybe you've got a weird policy
self.ssh_keypass = self.config.get('ssh_keypass',
self.tensor.config.get('ssh_keypass', None))
self.ssh_user = self.config.get('ssh_username',
self.tensor.config.get('ssh_username', None))
self.ssh_password = self.config.get('ssh_password',
self.tensor.config.get('ssh_password', None))
self.ssh_port = self.config.get('ssh_port',
self.tensor.config.get('ssh_port', 22))
# Verify config to see if we're good to go
if not (self.ssh_key or self.ssh_keyfile or self.ssh_password):
raise Exception("To use SSH you must specify *one* of ssh_key,"
" ssh_keyfile or ssh_password for this source"
" check or globally")
if not self.ssh_user:
raise Exception("ssh_username must be set")
self.ssh_keydb = []
cHash = hashlib.sha1(':'.join((
self.ssh_host, self.ssh_user, str(self.ssh_port),
str(self.ssh_password), str(self.ssh_key),
str(self.ssh_keyfile)
)).encode()).hexdigest()
if cHash in self.tensor.hostConnectorCache:
self.ssh_client = self.tensor.hostConnectorCache.get(cHash)
self.ssh_connector = False
else:
self.ssh_connector = True
self.ssh_client = ssh.SSHClient(self.ssh_host, self.ssh_user,
self.ssh_port, password=self.ssh_password,
knownhosts=self.known_hosts)
if self.ssh_keyfile:
self.ssh_client.addKeyFile(self.ssh_keyfile, self.ssh_keypass)
if self.ssh_key:
self.ssh_client.addKeyString(self.ssh_key, self.ssh_keypass)
self.tensor.hostConnectorCache[cHash] = self.ssh_client | [
"def",
"_init_ssh",
"(",
"self",
")",
":",
"self",
".",
"ssh_host",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'ssh_host'",
",",
"self",
".",
"hostname",
")",
"self",
".",
"known_hosts",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'ssh_knownhosts_file'",
",",
"self",
".",
"tensor",
".",
"config",
".",
"get",
"(",
"'ssh_knownhosts_file'",
",",
"None",
")",
")",
"self",
".",
"ssh_keyfile",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'ssh_keyfile'",
",",
"self",
".",
"tensor",
".",
"config",
".",
"get",
"(",
"'ssh_keyfile'",
",",
"None",
")",
")",
"self",
".",
"ssh_key",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'ssh_key'",
",",
"self",
".",
"tensor",
".",
"config",
".",
"get",
"(",
"'ssh_key'",
",",
"None",
")",
")",
"# Not sure why you'd bother but maybe you've got a weird policy",
"self",
".",
"ssh_keypass",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'ssh_keypass'",
",",
"self",
".",
"tensor",
".",
"config",
".",
"get",
"(",
"'ssh_keypass'",
",",
"None",
")",
")",
"self",
".",
"ssh_user",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'ssh_username'",
",",
"self",
".",
"tensor",
".",
"config",
".",
"get",
"(",
"'ssh_username'",
",",
"None",
")",
")",
"self",
".",
"ssh_password",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'ssh_password'",
",",
"self",
".",
"tensor",
".",
"config",
".",
"get",
"(",
"'ssh_password'",
",",
"None",
")",
")",
"self",
".",
"ssh_port",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'ssh_port'",
",",
"self",
".",
"tensor",
".",
"config",
".",
"get",
"(",
"'ssh_port'",
",",
"22",
")",
")",
"# Verify config to see if we're good to go",
"if",
"not",
"(",
"self",
".",
"ssh_key",
"or",
"self",
".",
"ssh_keyfile",
"or",
"self",
".",
"ssh_password",
")",
":",
"raise",
"Exception",
"(",
"\"To use SSH you must specify *one* of ssh_key,\"",
"\" ssh_keyfile or ssh_password for this source\"",
"\" check or globally\"",
")",
"if",
"not",
"self",
".",
"ssh_user",
":",
"raise",
"Exception",
"(",
"\"ssh_username must be set\"",
")",
"self",
".",
"ssh_keydb",
"=",
"[",
"]",
"cHash",
"=",
"hashlib",
".",
"sha1",
"(",
"':'",
".",
"join",
"(",
"(",
"self",
".",
"ssh_host",
",",
"self",
".",
"ssh_user",
",",
"str",
"(",
"self",
".",
"ssh_port",
")",
",",
"str",
"(",
"self",
".",
"ssh_password",
")",
",",
"str",
"(",
"self",
".",
"ssh_key",
")",
",",
"str",
"(",
"self",
".",
"ssh_keyfile",
")",
")",
")",
".",
"encode",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
"if",
"cHash",
"in",
"self",
".",
"tensor",
".",
"hostConnectorCache",
":",
"self",
".",
"ssh_client",
"=",
"self",
".",
"tensor",
".",
"hostConnectorCache",
".",
"get",
"(",
"cHash",
")",
"self",
".",
"ssh_connector",
"=",
"False",
"else",
":",
"self",
".",
"ssh_connector",
"=",
"True",
"self",
".",
"ssh_client",
"=",
"ssh",
".",
"SSHClient",
"(",
"self",
".",
"ssh_host",
",",
"self",
".",
"ssh_user",
",",
"self",
".",
"ssh_port",
",",
"password",
"=",
"self",
".",
"ssh_password",
",",
"knownhosts",
"=",
"self",
".",
"known_hosts",
")",
"if",
"self",
".",
"ssh_keyfile",
":",
"self",
".",
"ssh_client",
".",
"addKeyFile",
"(",
"self",
".",
"ssh_keyfile",
",",
"self",
".",
"ssh_keypass",
")",
"if",
"self",
".",
"ssh_key",
":",
"self",
".",
"ssh_client",
".",
"addKeyString",
"(",
"self",
".",
"ssh_key",
",",
"self",
".",
"ssh_keypass",
")",
"self",
".",
"tensor",
".",
"hostConnectorCache",
"[",
"cHash",
"]",
"=",
"self",
".",
"ssh_client"
] | 38.145161 | 23.677419 |
def auto_detect_serial_unix(preferred_list=['*']):
'''try to auto-detect serial ports on unix'''
import glob
glist = glob.glob('/dev/ttyS*') + glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*') + glob.glob('/dev/serial/by-id/*')
ret = []
others = []
# try preferred ones first
for d in glist:
matches = False
for preferred in preferred_list:
if fnmatch.fnmatch(d, preferred):
matches = True
if matches:
ret.append(SerialPort(d))
else:
others.append(SerialPort(d))
if len(ret) > 0:
return ret
ret.extend(others)
return ret | [
"def",
"auto_detect_serial_unix",
"(",
"preferred_list",
"=",
"[",
"'*'",
"]",
")",
":",
"import",
"glob",
"glist",
"=",
"glob",
".",
"glob",
"(",
"'/dev/ttyS*'",
")",
"+",
"glob",
".",
"glob",
"(",
"'/dev/ttyUSB*'",
")",
"+",
"glob",
".",
"glob",
"(",
"'/dev/ttyACM*'",
")",
"+",
"glob",
".",
"glob",
"(",
"'/dev/serial/by-id/*'",
")",
"ret",
"=",
"[",
"]",
"others",
"=",
"[",
"]",
"# try preferred ones first",
"for",
"d",
"in",
"glist",
":",
"matches",
"=",
"False",
"for",
"preferred",
"in",
"preferred_list",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"d",
",",
"preferred",
")",
":",
"matches",
"=",
"True",
"if",
"matches",
":",
"ret",
".",
"append",
"(",
"SerialPort",
"(",
"d",
")",
")",
"else",
":",
"others",
".",
"append",
"(",
"SerialPort",
"(",
"d",
")",
")",
"if",
"len",
"(",
"ret",
")",
">",
"0",
":",
"return",
"ret",
"ret",
".",
"extend",
"(",
"others",
")",
"return",
"ret"
] | 31.85 | 19.15 |
def use_json(self, *paths):
"""
Args:
*paths (str | unicode): Paths to files to add as static DictProvider-s, only existing files are added
"""
for path in paths:
if path:
fpath = os.path.expanduser(path)
if os.path.exists(fpath):
with open(fpath) as fh:
provider = DictProvider(json.load(fh), name=path)
self.add(provider) | [
"def",
"use_json",
"(",
"self",
",",
"*",
"paths",
")",
":",
"for",
"path",
"in",
"paths",
":",
"if",
"path",
":",
"fpath",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fpath",
")",
":",
"with",
"open",
"(",
"fpath",
")",
"as",
"fh",
":",
"provider",
"=",
"DictProvider",
"(",
"json",
".",
"load",
"(",
"fh",
")",
",",
"name",
"=",
"path",
")",
"self",
".",
"add",
"(",
"provider",
")"
] | 39 | 16.166667 |
def restore_default_button_clicked(self, classification):
"""Action for restore default button clicked.
It will set the threshold with default value.
:param classification: The classification that being edited.
:type classification: dict
"""
# Obtain default value
class_dict = {}
for the_class in classification.get('classes'):
class_dict[the_class['key']] = {
'numeric_default_min': the_class['numeric_default_min'],
'numeric_default_max': the_class['numeric_default_max'],
}
# Set for all threshold
for key, value in list(self.threshold_classes.items()):
value[0].setValue(class_dict[key]['numeric_default_min'])
value[1].setValue(class_dict[key]['numeric_default_max']) | [
"def",
"restore_default_button_clicked",
"(",
"self",
",",
"classification",
")",
":",
"# Obtain default value",
"class_dict",
"=",
"{",
"}",
"for",
"the_class",
"in",
"classification",
".",
"get",
"(",
"'classes'",
")",
":",
"class_dict",
"[",
"the_class",
"[",
"'key'",
"]",
"]",
"=",
"{",
"'numeric_default_min'",
":",
"the_class",
"[",
"'numeric_default_min'",
"]",
",",
"'numeric_default_max'",
":",
"the_class",
"[",
"'numeric_default_max'",
"]",
",",
"}",
"# Set for all threshold",
"for",
"key",
",",
"value",
"in",
"list",
"(",
"self",
".",
"threshold_classes",
".",
"items",
"(",
")",
")",
":",
"value",
"[",
"0",
"]",
".",
"setValue",
"(",
"class_dict",
"[",
"key",
"]",
"[",
"'numeric_default_min'",
"]",
")",
"value",
"[",
"1",
"]",
".",
"setValue",
"(",
"class_dict",
"[",
"key",
"]",
"[",
"'numeric_default_max'",
"]",
")"
] | 43 | 19.526316 |
def replace_units(self, units, copy=True):
"""Change the unit system of this potential.
Parameters
----------
units : `~gala.units.UnitSystem`
Set of non-reducable units that specify (at minimum) the
length, mass, time, and angle units.
copy : bool (optional)
If True, returns a copy, if False, changes this object.
"""
_lock = self.lock
if copy:
pots = self.__class__()
else:
pots = self
pots._units = None
pots.lock = False
for k, v in self.items():
pots[k] = v.replace_units(units, copy=copy)
pots.lock = _lock
return pots | [
"def",
"replace_units",
"(",
"self",
",",
"units",
",",
"copy",
"=",
"True",
")",
":",
"_lock",
"=",
"self",
".",
"lock",
"if",
"copy",
":",
"pots",
"=",
"self",
".",
"__class__",
"(",
")",
"else",
":",
"pots",
"=",
"self",
"pots",
".",
"_units",
"=",
"None",
"pots",
".",
"lock",
"=",
"False",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
":",
"pots",
"[",
"k",
"]",
"=",
"v",
".",
"replace_units",
"(",
"units",
",",
"copy",
"=",
"copy",
")",
"pots",
".",
"lock",
"=",
"_lock",
"return",
"pots"
] | 27.56 | 18.16 |
def fmt_delta(timestamp):
""" Format a UNIX timestamp to a delta (relative to now).
"""
try:
return fmt.human_duration(float(timestamp), precision=2, short=True)
except (ValueError, TypeError):
return "N/A".rjust(len(fmt.human_duration(0, precision=2, short=True))) | [
"def",
"fmt_delta",
"(",
"timestamp",
")",
":",
"try",
":",
"return",
"fmt",
".",
"human_duration",
"(",
"float",
"(",
"timestamp",
")",
",",
"precision",
"=",
"2",
",",
"short",
"=",
"True",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"return",
"\"N/A\"",
".",
"rjust",
"(",
"len",
"(",
"fmt",
".",
"human_duration",
"(",
"0",
",",
"precision",
"=",
"2",
",",
"short",
"=",
"True",
")",
")",
")"
] | 41.571429 | 18.142857 |
def format(self):
"""PixelFormat: The raw format of the texture. The actual format may differ, but pixel transfers will use this
format.
"""
fmt = ffi.new('Uint32 *')
check_int_err(lib.SDL_QueryTexture(self._ptr, fmt, ffi.NULL, ffi.NULL, ffi.NULL))
return PixelFormat(fmt[0]) | [
"def",
"format",
"(",
"self",
")",
":",
"fmt",
"=",
"ffi",
".",
"new",
"(",
"'Uint32 *'",
")",
"check_int_err",
"(",
"lib",
".",
"SDL_QueryTexture",
"(",
"self",
".",
"_ptr",
",",
"fmt",
",",
"ffi",
".",
"NULL",
",",
"ffi",
".",
"NULL",
",",
"ffi",
".",
"NULL",
")",
")",
"return",
"PixelFormat",
"(",
"fmt",
"[",
"0",
"]",
")"
] | 47.571429 | 13.428571 |
def __check_response(self, msg):
""" Search general errors in server response and raise exceptions when found.
:keyword msg: Result message
:raises NotAllowed: Exception raised when operation was called with
insufficient privileges
:raises AuthorizationError: Credentials are invalid or missing
:raises APISyntaxError: Syntax error
"""
if not isinstance(msg, list):
msg = msg.split("\n")
if (len(msg) > 2) and self.RE_PATTERNS['not_allowed_pattern'].match(msg[2]):
raise NotAllowed(msg[2][2:])
if self.RE_PATTERNS['credentials_required_pattern'].match(msg[0]):
raise AuthorizationError('Credentials required.')
if self.RE_PATTERNS['syntax_error_pattern'].match(msg[0]):
raise APISyntaxError(msg[2][2:] if len(msg) > 2 else 'Syntax error.')
if self.RE_PATTERNS['bad_request_pattern'].match(msg[0]):
raise BadRequest(msg[3] if len(msg) > 2 else 'Bad request.') | [
"def",
"__check_response",
"(",
"self",
",",
"msg",
")",
":",
"if",
"not",
"isinstance",
"(",
"msg",
",",
"list",
")",
":",
"msg",
"=",
"msg",
".",
"split",
"(",
"\"\\n\"",
")",
"if",
"(",
"len",
"(",
"msg",
")",
">",
"2",
")",
"and",
"self",
".",
"RE_PATTERNS",
"[",
"'not_allowed_pattern'",
"]",
".",
"match",
"(",
"msg",
"[",
"2",
"]",
")",
":",
"raise",
"NotAllowed",
"(",
"msg",
"[",
"2",
"]",
"[",
"2",
":",
"]",
")",
"if",
"self",
".",
"RE_PATTERNS",
"[",
"'credentials_required_pattern'",
"]",
".",
"match",
"(",
"msg",
"[",
"0",
"]",
")",
":",
"raise",
"AuthorizationError",
"(",
"'Credentials required.'",
")",
"if",
"self",
".",
"RE_PATTERNS",
"[",
"'syntax_error_pattern'",
"]",
".",
"match",
"(",
"msg",
"[",
"0",
"]",
")",
":",
"raise",
"APISyntaxError",
"(",
"msg",
"[",
"2",
"]",
"[",
"2",
":",
"]",
"if",
"len",
"(",
"msg",
")",
">",
"2",
"else",
"'Syntax error.'",
")",
"if",
"self",
".",
"RE_PATTERNS",
"[",
"'bad_request_pattern'",
"]",
".",
"match",
"(",
"msg",
"[",
"0",
"]",
")",
":",
"raise",
"BadRequest",
"(",
"msg",
"[",
"3",
"]",
"if",
"len",
"(",
"msg",
")",
">",
"2",
"else",
"'Bad request.'",
")"
] | 53.526316 | 19.210526 |
def splitEkmDate(dateint):
"""Break out a date from Omnimeter read.
Note a corrupt date will raise an exception when you
convert it to int to hand to this method.
Args:
dateint (int): Omnimeter datetime as int.
Returns:
tuple: Named tuple which breaks out as followws:
========== =====================
yy Last 2 digits of year
mm Month 1-12
dd Day 1-31
weekday Zero based weekday
hh Hour 0-23
minutes Minutes 0-59
ss Seconds 0-59
========== =====================
"""
date_str = str(dateint)
dt = namedtuple('EkmDate', ['yy', 'mm', 'dd', 'weekday', 'hh', 'minutes', 'ss'])
if len(date_str) != 14:
dt.yy = dt.mm = dt.dd = dt.weekday = dt.hh = dt.minutes = dt.ss = 0
return dt
dt.yy = int(date_str[0:2])
dt.mm = int(date_str[2:4])
dt.dd = int(date_str[4:6])
dt.weekday = int(date_str[6:8])
dt.hh = int(date_str[8:10])
dt.minutes = int(date_str[10:12])
dt.ss = int(date_str[12:14])
return dt | [
"def",
"splitEkmDate",
"(",
"dateint",
")",
":",
"date_str",
"=",
"str",
"(",
"dateint",
")",
"dt",
"=",
"namedtuple",
"(",
"'EkmDate'",
",",
"[",
"'yy'",
",",
"'mm'",
",",
"'dd'",
",",
"'weekday'",
",",
"'hh'",
",",
"'minutes'",
",",
"'ss'",
"]",
")",
"if",
"len",
"(",
"date_str",
")",
"!=",
"14",
":",
"dt",
".",
"yy",
"=",
"dt",
".",
"mm",
"=",
"dt",
".",
"dd",
"=",
"dt",
".",
"weekday",
"=",
"dt",
".",
"hh",
"=",
"dt",
".",
"minutes",
"=",
"dt",
".",
"ss",
"=",
"0",
"return",
"dt",
"dt",
".",
"yy",
"=",
"int",
"(",
"date_str",
"[",
"0",
":",
"2",
"]",
")",
"dt",
".",
"mm",
"=",
"int",
"(",
"date_str",
"[",
"2",
":",
"4",
"]",
")",
"dt",
".",
"dd",
"=",
"int",
"(",
"date_str",
"[",
"4",
":",
"6",
"]",
")",
"dt",
".",
"weekday",
"=",
"int",
"(",
"date_str",
"[",
"6",
":",
"8",
"]",
")",
"dt",
".",
"hh",
"=",
"int",
"(",
"date_str",
"[",
"8",
":",
"10",
"]",
")",
"dt",
".",
"minutes",
"=",
"int",
"(",
"date_str",
"[",
"10",
":",
"12",
"]",
")",
"dt",
".",
"ss",
"=",
"int",
"(",
"date_str",
"[",
"12",
":",
"14",
"]",
")",
"return",
"dt"
] | 31.473684 | 16.605263 |
def create_build_configuration_process(repository, revision, **kwargs):
"""
Create a new BuildConfiguration. BuildConfigurations represent the settings and configuration required to run a build of a specific version of the associated Project's source code.
If a ProductVersion ID is provided, the BuildConfiguration will have access to artifacts which were produced for that version, but may not have been released yet.
:return BPM Task ID of the new BuildConfiguration creation
"""
if not kwargs.get("dependency_ids"):
kwargs["dependency_ids"] = []
if not kwargs.get("build_configuration_set_ids"):
kwargs["build_configuration_set_ids"] = []
if kwargs.get("generic_parameters"):
kwargs["generic_parameters"] = ast.literal_eval(kwargs.get("generic_parameters"))
if not kwargs.get("project"):
kwargs["project"] = pnc_api.projects.get_specific(kwargs.get("project_id")).content
if not kwargs.get("environment"):
kwargs["environment"] = pnc_api.environments.get_specific(kwargs.get("build_environment_id")).content
build_configuration = create_build_conf_object(scm_revision=revision, **kwargs)
repo_creation = swagger_client.RepositoryCreationUrlAutoRest()
repo_creation.scm_url = repository
repo_creation.build_configuration_rest = build_configuration
response = utils.checked_api_call(
pnc_api.bpm, 'start_r_creation_task_with_single_url', body=repo_creation)
if response:
return response | [
"def",
"create_build_configuration_process",
"(",
"repository",
",",
"revision",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"kwargs",
".",
"get",
"(",
"\"dependency_ids\"",
")",
":",
"kwargs",
"[",
"\"dependency_ids\"",
"]",
"=",
"[",
"]",
"if",
"not",
"kwargs",
".",
"get",
"(",
"\"build_configuration_set_ids\"",
")",
":",
"kwargs",
"[",
"\"build_configuration_set_ids\"",
"]",
"=",
"[",
"]",
"if",
"kwargs",
".",
"get",
"(",
"\"generic_parameters\"",
")",
":",
"kwargs",
"[",
"\"generic_parameters\"",
"]",
"=",
"ast",
".",
"literal_eval",
"(",
"kwargs",
".",
"get",
"(",
"\"generic_parameters\"",
")",
")",
"if",
"not",
"kwargs",
".",
"get",
"(",
"\"project\"",
")",
":",
"kwargs",
"[",
"\"project\"",
"]",
"=",
"pnc_api",
".",
"projects",
".",
"get_specific",
"(",
"kwargs",
".",
"get",
"(",
"\"project_id\"",
")",
")",
".",
"content",
"if",
"not",
"kwargs",
".",
"get",
"(",
"\"environment\"",
")",
":",
"kwargs",
"[",
"\"environment\"",
"]",
"=",
"pnc_api",
".",
"environments",
".",
"get_specific",
"(",
"kwargs",
".",
"get",
"(",
"\"build_environment_id\"",
")",
")",
".",
"content",
"build_configuration",
"=",
"create_build_conf_object",
"(",
"scm_revision",
"=",
"revision",
",",
"*",
"*",
"kwargs",
")",
"repo_creation",
"=",
"swagger_client",
".",
"RepositoryCreationUrlAutoRest",
"(",
")",
"repo_creation",
".",
"scm_url",
"=",
"repository",
"repo_creation",
".",
"build_configuration_rest",
"=",
"build_configuration",
"response",
"=",
"utils",
".",
"checked_api_call",
"(",
"pnc_api",
".",
"bpm",
",",
"'start_r_creation_task_with_single_url'",
",",
"body",
"=",
"repo_creation",
")",
"if",
"response",
":",
"return",
"response"
] | 47.903226 | 31.83871 |
def requires_login(func, *args, **kwargs):
"""Decorator to check that the user is logged in. Raises `BetfairError`
if instance variable `session_token` is absent.
"""
self = args[0]
if self.session_token:
return func(*args, **kwargs)
raise exceptions.NotLoggedIn() | [
"def",
"requires_login",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
"=",
"args",
"[",
"0",
"]",
"if",
"self",
".",
"session_token",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"raise",
"exceptions",
".",
"NotLoggedIn",
"(",
")"
] | 36.125 | 7.375 |
def pdb(self):
"""Compiles the PDB strings for each state into a single file."""
header_title = '{:<80}\n'.format('HEADER {}'.format(self.id))
data_type = '{:<80}\n'.format('EXPDTA ISAMBARD Model')
pdb_strs = []
for ampal in self:
if isinstance(ampal, Assembly):
pdb_str = ampal.make_pdb(header=False, footer=False)
else:
pdb_str = ampal.make_pdb()
pdb_strs.append(pdb_str)
merged_strs = 'ENDMDL\n'.join(pdb_strs) + 'ENDMDL\n'
merged_pdb = ''.join([header_title, data_type, merged_strs])
return merged_pdb | [
"def",
"pdb",
"(",
"self",
")",
":",
"header_title",
"=",
"'{:<80}\\n'",
".",
"format",
"(",
"'HEADER {}'",
".",
"format",
"(",
"self",
".",
"id",
")",
")",
"data_type",
"=",
"'{:<80}\\n'",
".",
"format",
"(",
"'EXPDTA ISAMBARD Model'",
")",
"pdb_strs",
"=",
"[",
"]",
"for",
"ampal",
"in",
"self",
":",
"if",
"isinstance",
"(",
"ampal",
",",
"Assembly",
")",
":",
"pdb_str",
"=",
"ampal",
".",
"make_pdb",
"(",
"header",
"=",
"False",
",",
"footer",
"=",
"False",
")",
"else",
":",
"pdb_str",
"=",
"ampal",
".",
"make_pdb",
"(",
")",
"pdb_strs",
".",
"append",
"(",
"pdb_str",
")",
"merged_strs",
"=",
"'ENDMDL\\n'",
".",
"join",
"(",
"pdb_strs",
")",
"+",
"'ENDMDL\\n'",
"merged_pdb",
"=",
"''",
".",
"join",
"(",
"[",
"header_title",
",",
"data_type",
",",
"merged_strs",
"]",
")",
"return",
"merged_pdb"
] | 45 | 17.071429 |
def get_index_from_filename(self, filename):
"""
Return the position index of a file in the tab bar of the editorstack
from its name.
"""
filenames = [d.filename for d in self.data]
return filenames.index(filename) | [
"def",
"get_index_from_filename",
"(",
"self",
",",
"filename",
")",
":",
"filenames",
"=",
"[",
"d",
".",
"filename",
"for",
"d",
"in",
"self",
".",
"data",
"]",
"return",
"filenames",
".",
"index",
"(",
"filename",
")"
] | 37.428571 | 10.285714 |
def deploy_service(ctx, path, name, regions, disabled):
"""Deploys a new service JSON to multiple accounts. NAME is the service name you wish to deploy."""
enabled = False if disabled else True
swag = create_swag_from_ctx(ctx)
accounts = swag.get_all(search_filter=path)
log.debug('Searching for accounts. Found: {} JMESPath: `{}`'.format(len(accounts), path))
for a in accounts:
try:
if not swag.get_service(name, search_filter="[?id=='{id}']".format(id=a['id'])):
log.info('Found an account to update. AccountName: {name} AccountNumber: {number}'.format(name=a['name'], number=a['id']))
status = []
for region in regions:
status.append(
{
'enabled': enabled,
'region': region
}
)
a['services'].append(
{
'name': name,
'status': status
}
)
swag.update(a, dry_run=ctx.dry_run)
except InvalidSWAGDataException as e:
log.warning('Found a data quality issue. AccountName: {name} AccountNumber: {number}'.format(name=a['name'], number=a['id']))
log.info('Service has been deployed to all matching accounts.') | [
"def",
"deploy_service",
"(",
"ctx",
",",
"path",
",",
"name",
",",
"regions",
",",
"disabled",
")",
":",
"enabled",
"=",
"False",
"if",
"disabled",
"else",
"True",
"swag",
"=",
"create_swag_from_ctx",
"(",
"ctx",
")",
"accounts",
"=",
"swag",
".",
"get_all",
"(",
"search_filter",
"=",
"path",
")",
"log",
".",
"debug",
"(",
"'Searching for accounts. Found: {} JMESPath: `{}`'",
".",
"format",
"(",
"len",
"(",
"accounts",
")",
",",
"path",
")",
")",
"for",
"a",
"in",
"accounts",
":",
"try",
":",
"if",
"not",
"swag",
".",
"get_service",
"(",
"name",
",",
"search_filter",
"=",
"\"[?id=='{id}']\"",
".",
"format",
"(",
"id",
"=",
"a",
"[",
"'id'",
"]",
")",
")",
":",
"log",
".",
"info",
"(",
"'Found an account to update. AccountName: {name} AccountNumber: {number}'",
".",
"format",
"(",
"name",
"=",
"a",
"[",
"'name'",
"]",
",",
"number",
"=",
"a",
"[",
"'id'",
"]",
")",
")",
"status",
"=",
"[",
"]",
"for",
"region",
"in",
"regions",
":",
"status",
".",
"append",
"(",
"{",
"'enabled'",
":",
"enabled",
",",
"'region'",
":",
"region",
"}",
")",
"a",
"[",
"'services'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"name",
",",
"'status'",
":",
"status",
"}",
")",
"swag",
".",
"update",
"(",
"a",
",",
"dry_run",
"=",
"ctx",
".",
"dry_run",
")",
"except",
"InvalidSWAGDataException",
"as",
"e",
":",
"log",
".",
"warning",
"(",
"'Found a data quality issue. AccountName: {name} AccountNumber: {number}'",
".",
"format",
"(",
"name",
"=",
"a",
"[",
"'name'",
"]",
",",
"number",
"=",
"a",
"[",
"'id'",
"]",
")",
")",
"log",
".",
"info",
"(",
"'Service has been deployed to all matching accounts.'",
")"
] | 42.96875 | 22.5625 |
def to_new(self, data, perplexity=None, return_distances=False):
"""Compute the affinities of new samples to the initial samples.
This is necessary for embedding new data points into an existing
embedding.
Please see the :ref:`parameter-guide` for more information.
Parameters
----------
data: np.ndarray
The data points to be added to the existing embedding.
perplexity: float
Perplexity can be thought of as the continuous :math:`k` number of
nearest neighbors, for which t-SNE will attempt to preserve
distances.
return_distances: bool
If needed, the function can return the indices of the nearest
neighbors and their corresponding distances.
Returns
-------
P: array_like
An :math:`N \\times M` affinity matrix expressing interactions
between :math:`N` new data points the initial :math:`M` data
samples.
indices: np.ndarray
Returned if ``return_distances=True``. The indices of the :math:`k`
nearest neighbors in the existing embedding for every new data
point.
distances: np.ndarray
Returned if ``return_distances=True``. The distances to the
:math:`k` nearest neighbors in the existing embedding for every new
data point.
"""
perplexity = perplexity if perplexity is not None else self.perplexity
perplexity = self.check_perplexity(perplexity)
k_neighbors = min(self.n_samples - 1, int(3 * perplexity))
neighbors, distances = self.knn_index.query(data, k_neighbors)
P = joint_probabilities_nn(
neighbors,
distances,
[perplexity],
symmetrize=False,
normalization="point-wise",
n_reference_samples=self.n_samples,
n_jobs=self.n_jobs,
)
if return_distances:
return P, neighbors, distances
return P | [
"def",
"to_new",
"(",
"self",
",",
"data",
",",
"perplexity",
"=",
"None",
",",
"return_distances",
"=",
"False",
")",
":",
"perplexity",
"=",
"perplexity",
"if",
"perplexity",
"is",
"not",
"None",
"else",
"self",
".",
"perplexity",
"perplexity",
"=",
"self",
".",
"check_perplexity",
"(",
"perplexity",
")",
"k_neighbors",
"=",
"min",
"(",
"self",
".",
"n_samples",
"-",
"1",
",",
"int",
"(",
"3",
"*",
"perplexity",
")",
")",
"neighbors",
",",
"distances",
"=",
"self",
".",
"knn_index",
".",
"query",
"(",
"data",
",",
"k_neighbors",
")",
"P",
"=",
"joint_probabilities_nn",
"(",
"neighbors",
",",
"distances",
",",
"[",
"perplexity",
"]",
",",
"symmetrize",
"=",
"False",
",",
"normalization",
"=",
"\"point-wise\"",
",",
"n_reference_samples",
"=",
"self",
".",
"n_samples",
",",
"n_jobs",
"=",
"self",
".",
"n_jobs",
",",
")",
"if",
"return_distances",
":",
"return",
"P",
",",
"neighbors",
",",
"distances",
"return",
"P"
] | 33.583333 | 24.9 |
async def paginate(self):
"""Actually paginate the entries and run the interactive loop if necessary."""
await self.show_page(1, first=True)
while self.paginating:
react = await self.bot.wait_for_reaction(message=self.message, check=self.react_check, timeout=120.0)
if react is None:
self.paginating = False
try:
await self.bot.clear_reactions(self.message)
except:
pass
finally:
break
try:
await self.bot.remove_reaction(self.message, react.reaction.emoji, react.user)
except:
pass # can't remove it so don't bother doing so
await self.match() | [
"async",
"def",
"paginate",
"(",
"self",
")",
":",
"await",
"self",
".",
"show_page",
"(",
"1",
",",
"first",
"=",
"True",
")",
"while",
"self",
".",
"paginating",
":",
"react",
"=",
"await",
"self",
".",
"bot",
".",
"wait_for_reaction",
"(",
"message",
"=",
"self",
".",
"message",
",",
"check",
"=",
"self",
".",
"react_check",
",",
"timeout",
"=",
"120.0",
")",
"if",
"react",
"is",
"None",
":",
"self",
".",
"paginating",
"=",
"False",
"try",
":",
"await",
"self",
".",
"bot",
".",
"clear_reactions",
"(",
"self",
".",
"message",
")",
"except",
":",
"pass",
"finally",
":",
"break",
"try",
":",
"await",
"self",
".",
"bot",
".",
"remove_reaction",
"(",
"self",
".",
"message",
",",
"react",
".",
"reaction",
".",
"emoji",
",",
"react",
".",
"user",
")",
"except",
":",
"pass",
"# can't remove it so don't bother doing so",
"await",
"self",
".",
"match",
"(",
")"
] | 36.52381 | 22.52381 |
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at
a specific column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol | [
"def",
"matchOnlyAtCol",
"(",
"n",
")",
":",
"def",
"verifyCol",
"(",
"strg",
",",
"locn",
",",
"toks",
")",
":",
"if",
"col",
"(",
"locn",
",",
"strg",
")",
"!=",
"n",
":",
"raise",
"ParseException",
"(",
"strg",
",",
"locn",
",",
"\"matched token not at column %d\"",
"%",
"n",
")",
"return",
"verifyCol"
] | 38.25 | 11.625 |
def _seconds_or_timedelta(duration):
"""Returns `datetime.timedelta` object for the passed duration.
Keyword Arguments:
duration -- `datetime.timedelta` object or seconds in `int` format.
"""
if isinstance(duration, int):
dt_timedelta = timedelta(seconds=duration)
elif isinstance(duration, timedelta):
dt_timedelta = duration
else:
raise TypeError(
'Expects argument as `datetime.timedelta` object '
'or seconds in `int` format'
)
return dt_timedelta | [
"def",
"_seconds_or_timedelta",
"(",
"duration",
")",
":",
"if",
"isinstance",
"(",
"duration",
",",
"int",
")",
":",
"dt_timedelta",
"=",
"timedelta",
"(",
"seconds",
"=",
"duration",
")",
"elif",
"isinstance",
"(",
"duration",
",",
"timedelta",
")",
":",
"dt_timedelta",
"=",
"duration",
"else",
":",
"raise",
"TypeError",
"(",
"'Expects argument as `datetime.timedelta` object '",
"'or seconds in `int` format'",
")",
"return",
"dt_timedelta"
] | 31.117647 | 16.529412 |
def _add_vector(self, hash_name, bucket_key, v, data, redis_object):
'''
Store vector and JSON-serializable data in bucket with specified key.
'''
redis_key = self._format_redis_key(hash_name, bucket_key)
val_dict = {}
# Depending on type (sparse or not) fill value dict
if scipy.sparse.issparse(v):
# Make sure that we are using COO format (easy to handle)
if not scipy.sparse.isspmatrix_coo(v):
v = scipy.sparse.coo_matrix(v)
# Construct list of [index, value] items,
# one for each non-zero element of the sparse vector
encoded_values = []
for k in range(v.data.size):
row_index = v.row[k]
value = v.data[k]
encoded_values.append([int(row_index), value])
val_dict['sparse'] = 1
val_dict['nonzeros'] = encoded_values
val_dict['dim'] = v.shape[0]
else:
# Make sure it is a 1d vector
v = numpy.reshape(v, v.shape[0])
val_dict['vector'] = v.tostring()
val_dict['dtype'] = v.dtype.name
# Add data if set
if data is not None:
val_dict['data'] = data
# Push JSON representation of dict to end of bucket list
self.redis_object.rpush(redis_key, pickle.dumps(val_dict, protocol=2)) | [
"def",
"_add_vector",
"(",
"self",
",",
"hash_name",
",",
"bucket_key",
",",
"v",
",",
"data",
",",
"redis_object",
")",
":",
"redis_key",
"=",
"self",
".",
"_format_redis_key",
"(",
"hash_name",
",",
"bucket_key",
")",
"val_dict",
"=",
"{",
"}",
"# Depending on type (sparse or not) fill value dict",
"if",
"scipy",
".",
"sparse",
".",
"issparse",
"(",
"v",
")",
":",
"# Make sure that we are using COO format (easy to handle)",
"if",
"not",
"scipy",
".",
"sparse",
".",
"isspmatrix_coo",
"(",
"v",
")",
":",
"v",
"=",
"scipy",
".",
"sparse",
".",
"coo_matrix",
"(",
"v",
")",
"# Construct list of [index, value] items,",
"# one for each non-zero element of the sparse vector",
"encoded_values",
"=",
"[",
"]",
"for",
"k",
"in",
"range",
"(",
"v",
".",
"data",
".",
"size",
")",
":",
"row_index",
"=",
"v",
".",
"row",
"[",
"k",
"]",
"value",
"=",
"v",
".",
"data",
"[",
"k",
"]",
"encoded_values",
".",
"append",
"(",
"[",
"int",
"(",
"row_index",
")",
",",
"value",
"]",
")",
"val_dict",
"[",
"'sparse'",
"]",
"=",
"1",
"val_dict",
"[",
"'nonzeros'",
"]",
"=",
"encoded_values",
"val_dict",
"[",
"'dim'",
"]",
"=",
"v",
".",
"shape",
"[",
"0",
"]",
"else",
":",
"# Make sure it is a 1d vector",
"v",
"=",
"numpy",
".",
"reshape",
"(",
"v",
",",
"v",
".",
"shape",
"[",
"0",
"]",
")",
"val_dict",
"[",
"'vector'",
"]",
"=",
"v",
".",
"tostring",
"(",
")",
"val_dict",
"[",
"'dtype'",
"]",
"=",
"v",
".",
"dtype",
".",
"name",
"# Add data if set",
"if",
"data",
"is",
"not",
"None",
":",
"val_dict",
"[",
"'data'",
"]",
"=",
"data",
"# Push JSON representation of dict to end of bucket list",
"self",
".",
"redis_object",
".",
"rpush",
"(",
"redis_key",
",",
"pickle",
".",
"dumps",
"(",
"val_dict",
",",
"protocol",
"=",
"2",
")",
")"
] | 35.076923 | 20 |
def path_regex(self):
"""Return the regex for the path to the build folder."""
regex = r'%(PREFIX)s%(BUILD)s/%(PLATFORM)s/%(LOCALE)s/'
return regex % {'PREFIX': self.candidate_build_list_regex,
'BUILD': self.builds[self.build_index],
'LOCALE': self.locale,
'PLATFORM': self.platform_regex} | [
"def",
"path_regex",
"(",
"self",
")",
":",
"regex",
"=",
"r'%(PREFIX)s%(BUILD)s/%(PLATFORM)s/%(LOCALE)s/'",
"return",
"regex",
"%",
"{",
"'PREFIX'",
":",
"self",
".",
"candidate_build_list_regex",
",",
"'BUILD'",
":",
"self",
".",
"builds",
"[",
"self",
".",
"build_index",
"]",
",",
"'LOCALE'",
":",
"self",
".",
"locale",
",",
"'PLATFORM'",
":",
"self",
".",
"platform_regex",
"}"
] | 54.142857 | 16.142857 |
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
sedfile = args.sed_file
if is_not_null(args.config):
configfile = os.path.join(os.path.dirname(sedfile), args.config)
else:
configfile = os.path.join(os.path.dirname(sedfile), 'config.yaml')
nbins = _get_enum_bins(configfile)
first = args.seed
last = first + args.nsims
flist = [sedfile.replace("_SEED.fits", "_%06i.fits" % seed)
for seed in range(first, last)]
outfile = args.outfile
summaryfile = args.summaryfile
outtable = fill_output_table(
flist, "SED", CollectSED.collist, nbins=nbins)
if is_not_null(outfile):
outtable.write(outfile)
if is_not_null(summaryfile):
summary = summarize_sed_results(outtable)
summary.write(summaryfile) | [
"def",
"run_analysis",
"(",
"self",
",",
"argv",
")",
":",
"args",
"=",
"self",
".",
"_parser",
".",
"parse_args",
"(",
"argv",
")",
"sedfile",
"=",
"args",
".",
"sed_file",
"if",
"is_not_null",
"(",
"args",
".",
"config",
")",
":",
"configfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"sedfile",
")",
",",
"args",
".",
"config",
")",
"else",
":",
"configfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"sedfile",
")",
",",
"'config.yaml'",
")",
"nbins",
"=",
"_get_enum_bins",
"(",
"configfile",
")",
"first",
"=",
"args",
".",
"seed",
"last",
"=",
"first",
"+",
"args",
".",
"nsims",
"flist",
"=",
"[",
"sedfile",
".",
"replace",
"(",
"\"_SEED.fits\"",
",",
"\"_%06i.fits\"",
"%",
"seed",
")",
"for",
"seed",
"in",
"range",
"(",
"first",
",",
"last",
")",
"]",
"outfile",
"=",
"args",
".",
"outfile",
"summaryfile",
"=",
"args",
".",
"summaryfile",
"outtable",
"=",
"fill_output_table",
"(",
"flist",
",",
"\"SED\"",
",",
"CollectSED",
".",
"collist",
",",
"nbins",
"=",
"nbins",
")",
"if",
"is_not_null",
"(",
"outfile",
")",
":",
"outtable",
".",
"write",
"(",
"outfile",
")",
"if",
"is_not_null",
"(",
"summaryfile",
")",
":",
"summary",
"=",
"summarize_sed_results",
"(",
"outtable",
")",
"summary",
".",
"write",
"(",
"summaryfile",
")"
] | 29.354839 | 19.774194 |
def _to_key_ranges_by_shard(cls, app, namespaces, shard_count, query_spec):
"""Get a list of key_ranges.KeyRanges objects, one for each shard.
This method uses scatter index to split each namespace into pieces
and assign those pieces to shards.
Args:
app: app_id in str.
namespaces: a list of namespaces in str.
shard_count: number of shards to split.
query_spec: model.QuerySpec.
Returns:
a list of key_ranges.KeyRanges objects.
"""
key_ranges_by_ns = []
# Split each ns into n splits. If a ns doesn't have enough scatter to
# split into n, the last few splits are None.
for namespace in namespaces:
ranges = cls._split_ns_by_scatter(
shard_count,
namespace,
query_spec.entity_kind,
app)
# The nth split of each ns will be assigned to the nth shard.
# Shuffle so that None are not all by the end.
random.shuffle(ranges)
key_ranges_by_ns.append(ranges)
# KeyRanges from different namespaces might be very different in size.
# Use round robin to make sure each shard can have at most one split
# or a None from a ns.
ranges_by_shard = [[] for _ in range(shard_count)]
for ranges in key_ranges_by_ns:
for i, k_range in enumerate(ranges):
if k_range:
ranges_by_shard[i].append(k_range)
key_ranges_by_shard = []
for ranges in ranges_by_shard:
if ranges:
key_ranges_by_shard.append(key_ranges.KeyRangesFactory.create_from_list(
ranges))
return key_ranges_by_shard | [
"def",
"_to_key_ranges_by_shard",
"(",
"cls",
",",
"app",
",",
"namespaces",
",",
"shard_count",
",",
"query_spec",
")",
":",
"key_ranges_by_ns",
"=",
"[",
"]",
"# Split each ns into n splits. If a ns doesn't have enough scatter to",
"# split into n, the last few splits are None.",
"for",
"namespace",
"in",
"namespaces",
":",
"ranges",
"=",
"cls",
".",
"_split_ns_by_scatter",
"(",
"shard_count",
",",
"namespace",
",",
"query_spec",
".",
"entity_kind",
",",
"app",
")",
"# The nth split of each ns will be assigned to the nth shard.",
"# Shuffle so that None are not all by the end.",
"random",
".",
"shuffle",
"(",
"ranges",
")",
"key_ranges_by_ns",
".",
"append",
"(",
"ranges",
")",
"# KeyRanges from different namespaces might be very different in size.",
"# Use round robin to make sure each shard can have at most one split",
"# or a None from a ns.",
"ranges_by_shard",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"shard_count",
")",
"]",
"for",
"ranges",
"in",
"key_ranges_by_ns",
":",
"for",
"i",
",",
"k_range",
"in",
"enumerate",
"(",
"ranges",
")",
":",
"if",
"k_range",
":",
"ranges_by_shard",
"[",
"i",
"]",
".",
"append",
"(",
"k_range",
")",
"key_ranges_by_shard",
"=",
"[",
"]",
"for",
"ranges",
"in",
"ranges_by_shard",
":",
"if",
"ranges",
":",
"key_ranges_by_shard",
".",
"append",
"(",
"key_ranges",
".",
"KeyRangesFactory",
".",
"create_from_list",
"(",
"ranges",
")",
")",
"return",
"key_ranges_by_shard"
] | 35.045455 | 17.977273 |
def _get_schema():
"""Get the schema for validation"""
schema_path = os.path.join(os.path.dirname(__file__),
'schema', 'scheduling_block_schema.json')
with open(schema_path, 'r') as file:
schema_data = file.read()
schema = json.loads(schema_data)
return schema | [
"def",
"_get_schema",
"(",
")",
":",
"schema_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'schema'",
",",
"'scheduling_block_schema.json'",
")",
"with",
"open",
"(",
"schema_path",
",",
"'r'",
")",
"as",
"file",
":",
"schema_data",
"=",
"file",
".",
"read",
"(",
")",
"schema",
"=",
"json",
".",
"loads",
"(",
"schema_data",
")",
"return",
"schema"
] | 42.5 | 13.125 |
def record_messages(self):
"""Records all messages. Use in unit tests for example::
with mail.record_messages() as outbox:
response = app.test_client.get("/email-sending-view/")
assert len(outbox) == 1
assert outbox[0].subject == "testing"
You must have blinker installed in order to use this feature.
:versionadded: 0.4
"""
if not email_dispatched:
raise RuntimeError("blinker must be installed")
outbox = []
def _record(message, mail):
outbox.append(message)
email_dispatched.connect(_record)
try:
yield outbox
finally:
email_dispatched.disconnect(_record) | [
"def",
"record_messages",
"(",
"self",
")",
":",
"if",
"not",
"email_dispatched",
":",
"raise",
"RuntimeError",
"(",
"\"blinker must be installed\"",
")",
"outbox",
"=",
"[",
"]",
"def",
"_record",
"(",
"message",
",",
"mail",
")",
":",
"outbox",
".",
"append",
"(",
"message",
")",
"email_dispatched",
".",
"connect",
"(",
"_record",
")",
"try",
":",
"yield",
"outbox",
"finally",
":",
"email_dispatched",
".",
"disconnect",
"(",
"_record",
")"
] | 28 | 20.269231 |
def view_conflicts(L, normalize=True, colorbar=True):
"""Display an [m, m] matrix of conflicts"""
L = L.todense() if sparse.issparse(L) else L
C = _get_conflicts_matrix(L, normalize=normalize)
plt.imshow(C, aspect="auto")
plt.title("Conflicts")
if colorbar:
plt.colorbar()
plt.show() | [
"def",
"view_conflicts",
"(",
"L",
",",
"normalize",
"=",
"True",
",",
"colorbar",
"=",
"True",
")",
":",
"L",
"=",
"L",
".",
"todense",
"(",
")",
"if",
"sparse",
".",
"issparse",
"(",
"L",
")",
"else",
"L",
"C",
"=",
"_get_conflicts_matrix",
"(",
"L",
",",
"normalize",
"=",
"normalize",
")",
"plt",
".",
"imshow",
"(",
"C",
",",
"aspect",
"=",
"\"auto\"",
")",
"plt",
".",
"title",
"(",
"\"Conflicts\"",
")",
"if",
"colorbar",
":",
"plt",
".",
"colorbar",
"(",
")",
"plt",
".",
"show",
"(",
")"
] | 34.555556 | 13.777778 |
def remove_transcript_preferences(course_id):
"""
Deletes course-wide transcript preferences.
Arguments:
course_id(str): course id
"""
try:
transcript_preference = TranscriptPreference.objects.get(course_id=course_id)
transcript_preference.delete()
except TranscriptPreference.DoesNotExist:
pass | [
"def",
"remove_transcript_preferences",
"(",
"course_id",
")",
":",
"try",
":",
"transcript_preference",
"=",
"TranscriptPreference",
".",
"objects",
".",
"get",
"(",
"course_id",
"=",
"course_id",
")",
"transcript_preference",
".",
"delete",
"(",
")",
"except",
"TranscriptPreference",
".",
"DoesNotExist",
":",
"pass"
] | 28.416667 | 16.416667 |
def _index(array, item, key=None):
"""
Array search function.
Written, because ``.index()`` method for array doesn't have `key` parameter
and raises `ValueError`, if the item is not found.
Args:
array (list): List of items, which will be searched.
item (whatever): Item, which will be matched to elements in `array`.
key (function, default None): Function, which will be used for lookup
into each element in `array`.
Return:
Index of `item` in `array`, if the `item` is in `array`, else `-1`.
"""
for i, el in enumerate(array):
resolved_el = key(el) if key else el
if resolved_el == item:
return i
return -1 | [
"def",
"_index",
"(",
"array",
",",
"item",
",",
"key",
"=",
"None",
")",
":",
"for",
"i",
",",
"el",
"in",
"enumerate",
"(",
"array",
")",
":",
"resolved_el",
"=",
"key",
"(",
"el",
")",
"if",
"key",
"else",
"el",
"if",
"resolved_el",
"==",
"item",
":",
"return",
"i",
"return",
"-",
"1"
] | 31.478261 | 24.086957 |
def get_pull_command(self, remote=None, revision=None):
"""
Get the command to pull changes from a remote repository into the local repository.
When you pull a specific branch using git, the default behavior is to
pull the change sets from the remote branch into the local repository
and merge them into the *currently checked out* branch.
What Mercurial does is to pull the change sets from the remote branch
into the local repository and create a local branch whose contents
mirror those of the remote branch. Merging is left to the operator.
In my opinion the default behavior of Mercurial is more sane and
predictable than the default behavior of git and so :class:`GitRepo`
tries to emulate the default behavior of Mercurial.
When a specific revision is pulled, the revision is assumed to be a
branch name and git is instructed to pull the change sets from the
remote branch into a local branch with the same name.
.. warning:: The logic described above will undoubtedly break when
`revision` is given but is not a branch name. I'd fix
this if I knew how to, but I don't...
"""
if revision:
revision = '%s:%s' % (revision, revision)
if self.bare:
return [
'git', 'fetch',
remote or 'origin',
# http://stackoverflow.com/a/10697486
revision or '+refs/heads/*:refs/heads/*',
]
else:
command = ['git', 'pull']
if remote or revision:
command.append(remote or 'origin')
if revision:
command.append(revision)
return command | [
"def",
"get_pull_command",
"(",
"self",
",",
"remote",
"=",
"None",
",",
"revision",
"=",
"None",
")",
":",
"if",
"revision",
":",
"revision",
"=",
"'%s:%s'",
"%",
"(",
"revision",
",",
"revision",
")",
"if",
"self",
".",
"bare",
":",
"return",
"[",
"'git'",
",",
"'fetch'",
",",
"remote",
"or",
"'origin'",
",",
"# http://stackoverflow.com/a/10697486",
"revision",
"or",
"'+refs/heads/*:refs/heads/*'",
",",
"]",
"else",
":",
"command",
"=",
"[",
"'git'",
",",
"'pull'",
"]",
"if",
"remote",
"or",
"revision",
":",
"command",
".",
"append",
"(",
"remote",
"or",
"'origin'",
")",
"if",
"revision",
":",
"command",
".",
"append",
"(",
"revision",
")",
"return",
"command"
] | 44.125 | 23.875 |
def _got_addresses(self, name, port, addrs):
"""Handler DNS address record lookup result.
:Parameters:
- `name`: the name requested
- `port`: port number to connect to
- `addrs`: list of (family, address) tuples
"""
with self.lock:
if not addrs:
if self._dst_nameports:
self._set_state("resolve-hostname")
return
else:
self._dst_addrs = []
self._set_state("aborted")
raise DNSError("Could not resolve address record for {0!r}"
.format(name))
self._dst_addrs = [ (family, (addr, port)) for (family, addr)
in addrs ]
self._set_state("connect") | [
"def",
"_got_addresses",
"(",
"self",
",",
"name",
",",
"port",
",",
"addrs",
")",
":",
"with",
"self",
".",
"lock",
":",
"if",
"not",
"addrs",
":",
"if",
"self",
".",
"_dst_nameports",
":",
"self",
".",
"_set_state",
"(",
"\"resolve-hostname\"",
")",
"return",
"else",
":",
"self",
".",
"_dst_addrs",
"=",
"[",
"]",
"self",
".",
"_set_state",
"(",
"\"aborted\"",
")",
"raise",
"DNSError",
"(",
"\"Could not resolve address record for {0!r}\"",
".",
"format",
"(",
"name",
")",
")",
"self",
".",
"_dst_addrs",
"=",
"[",
"(",
"family",
",",
"(",
"addr",
",",
"port",
")",
")",
"for",
"(",
"family",
",",
"addr",
")",
"in",
"addrs",
"]",
"self",
".",
"_set_state",
"(",
"\"connect\"",
")"
] | 42.380952 | 15.380952 |
def paga_compare(
adata,
basis=None,
edges=False,
color=None,
alpha=None,
groups=None,
components=None,
projection='2d',
legend_loc='on data',
legend_fontsize=None,
legend_fontweight='bold',
color_map=None,
palette=None,
frameon=False,
size=None,
title=None,
right_margin=None,
left_margin=0.05,
show=None,
save=None,
title_graph=None,
groups_graph=None,
**paga_graph_params):
"""Scatter and PAGA graph side-by-side.
Consists in a scatter plot and the abstracted graph. See
:func:`~scanpy.api.pl.paga` for all related parameters.
See :func:`~scanpy.api.pl.paga_path` for visualizing gene changes along paths
through the abstracted graph.
Additional parameters are as follows.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
kwds_scatter : `dict`
Keywords for :func:`~scanpy.api.pl.scatter`.
kwds_paga : `dict`
Keywords for :func:`~scanpy.api.pl.paga`.
Returns
-------
A list of `matplotlib.axes.Axes` if `show` is `False`.
"""
axs, _, _, _ = utils.setup_axes(panels=[0, 1],
right_margin=right_margin)
if color is None:
color = adata.uns['paga']['groups']
suptitle = None # common title for entire figure
if title_graph is None:
suptitle = color if title is None else title
title, title_graph = '', ''
if basis is None:
if 'X_draw_graph_fa' in adata.obsm.keys():
basis = 'draw_graph_fa'
elif 'X_umap' in adata.obsm.keys():
basis = 'umap'
elif 'X_tsne' in adata.obsm.keys():
basis = 'tsne'
elif 'X_draw_graph_fr' in adata.obsm.keys():
basis = 'draw_graph_fr'
else:
basis = 'umap'
from .scatterplots import plot_scatter
plot_scatter(
adata,
ax=axs[0],
basis=basis,
color=color,
edges=edges,
alpha=alpha,
groups=groups,
components=components,
legend_loc=legend_loc,
legend_fontsize=legend_fontsize,
legend_fontweight=legend_fontweight,
color_map=color_map,
palette=palette,
frameon=frameon,
size=size,
title=title,
show=False,
save=False)
if 'pos' not in paga_graph_params:
if color == adata.uns['paga']['groups']:
paga_graph_params['pos'] = utils._tmp_cluster_pos
else:
paga_graph_params['pos'] = adata.uns['paga']['pos']
xlim, ylim = axs[0].get_xlim(), axs[0].get_ylim()
axs[1].set_xlim(xlim)
axs[1].set_ylim(ylim)
if 'labels' in paga_graph_params:
labels = paga_graph_params.pop('labels')
else:
labels = groups_graph
paga(
adata,
ax=axs[1],
show=False,
save=False,
title=title_graph,
labels=labels,
colors=color,
frameon=frameon,
**paga_graph_params)
if suptitle is not None: pl.suptitle(suptitle)
utils.savefig_or_show('paga_compare', show=show, save=save)
if show == False: return axs | [
"def",
"paga_compare",
"(",
"adata",
",",
"basis",
"=",
"None",
",",
"edges",
"=",
"False",
",",
"color",
"=",
"None",
",",
"alpha",
"=",
"None",
",",
"groups",
"=",
"None",
",",
"components",
"=",
"None",
",",
"projection",
"=",
"'2d'",
",",
"legend_loc",
"=",
"'on data'",
",",
"legend_fontsize",
"=",
"None",
",",
"legend_fontweight",
"=",
"'bold'",
",",
"color_map",
"=",
"None",
",",
"palette",
"=",
"None",
",",
"frameon",
"=",
"False",
",",
"size",
"=",
"None",
",",
"title",
"=",
"None",
",",
"right_margin",
"=",
"None",
",",
"left_margin",
"=",
"0.05",
",",
"show",
"=",
"None",
",",
"save",
"=",
"None",
",",
"title_graph",
"=",
"None",
",",
"groups_graph",
"=",
"None",
",",
"*",
"*",
"paga_graph_params",
")",
":",
"axs",
",",
"_",
",",
"_",
",",
"_",
"=",
"utils",
".",
"setup_axes",
"(",
"panels",
"=",
"[",
"0",
",",
"1",
"]",
",",
"right_margin",
"=",
"right_margin",
")",
"if",
"color",
"is",
"None",
":",
"color",
"=",
"adata",
".",
"uns",
"[",
"'paga'",
"]",
"[",
"'groups'",
"]",
"suptitle",
"=",
"None",
"# common title for entire figure",
"if",
"title_graph",
"is",
"None",
":",
"suptitle",
"=",
"color",
"if",
"title",
"is",
"None",
"else",
"title",
"title",
",",
"title_graph",
"=",
"''",
",",
"''",
"if",
"basis",
"is",
"None",
":",
"if",
"'X_draw_graph_fa'",
"in",
"adata",
".",
"obsm",
".",
"keys",
"(",
")",
":",
"basis",
"=",
"'draw_graph_fa'",
"elif",
"'X_umap'",
"in",
"adata",
".",
"obsm",
".",
"keys",
"(",
")",
":",
"basis",
"=",
"'umap'",
"elif",
"'X_tsne'",
"in",
"adata",
".",
"obsm",
".",
"keys",
"(",
")",
":",
"basis",
"=",
"'tsne'",
"elif",
"'X_draw_graph_fr'",
"in",
"adata",
".",
"obsm",
".",
"keys",
"(",
")",
":",
"basis",
"=",
"'draw_graph_fr'",
"else",
":",
"basis",
"=",
"'umap'",
"from",
".",
"scatterplots",
"import",
"plot_scatter",
"plot_scatter",
"(",
"adata",
",",
"ax",
"=",
"axs",
"[",
"0",
"]",
",",
"basis",
"=",
"basis",
",",
"color",
"=",
"color",
",",
"edges",
"=",
"edges",
",",
"alpha",
"=",
"alpha",
",",
"groups",
"=",
"groups",
",",
"components",
"=",
"components",
",",
"legend_loc",
"=",
"legend_loc",
",",
"legend_fontsize",
"=",
"legend_fontsize",
",",
"legend_fontweight",
"=",
"legend_fontweight",
",",
"color_map",
"=",
"color_map",
",",
"palette",
"=",
"palette",
",",
"frameon",
"=",
"frameon",
",",
"size",
"=",
"size",
",",
"title",
"=",
"title",
",",
"show",
"=",
"False",
",",
"save",
"=",
"False",
")",
"if",
"'pos'",
"not",
"in",
"paga_graph_params",
":",
"if",
"color",
"==",
"adata",
".",
"uns",
"[",
"'paga'",
"]",
"[",
"'groups'",
"]",
":",
"paga_graph_params",
"[",
"'pos'",
"]",
"=",
"utils",
".",
"_tmp_cluster_pos",
"else",
":",
"paga_graph_params",
"[",
"'pos'",
"]",
"=",
"adata",
".",
"uns",
"[",
"'paga'",
"]",
"[",
"'pos'",
"]",
"xlim",
",",
"ylim",
"=",
"axs",
"[",
"0",
"]",
".",
"get_xlim",
"(",
")",
",",
"axs",
"[",
"0",
"]",
".",
"get_ylim",
"(",
")",
"axs",
"[",
"1",
"]",
".",
"set_xlim",
"(",
"xlim",
")",
"axs",
"[",
"1",
"]",
".",
"set_ylim",
"(",
"ylim",
")",
"if",
"'labels'",
"in",
"paga_graph_params",
":",
"labels",
"=",
"paga_graph_params",
".",
"pop",
"(",
"'labels'",
")",
"else",
":",
"labels",
"=",
"groups_graph",
"paga",
"(",
"adata",
",",
"ax",
"=",
"axs",
"[",
"1",
"]",
",",
"show",
"=",
"False",
",",
"save",
"=",
"False",
",",
"title",
"=",
"title_graph",
",",
"labels",
"=",
"labels",
",",
"colors",
"=",
"color",
",",
"frameon",
"=",
"frameon",
",",
"*",
"*",
"paga_graph_params",
")",
"if",
"suptitle",
"is",
"not",
"None",
":",
"pl",
".",
"suptitle",
"(",
"suptitle",
")",
"utils",
".",
"savefig_or_show",
"(",
"'paga_compare'",
",",
"show",
"=",
"show",
",",
"save",
"=",
"save",
")",
"if",
"show",
"==",
"False",
":",
"return",
"axs"
] | 28.648649 | 16.846847 |
def handle_backend_error(self, exception):
"""
See super class satosa.frontends.base.FrontendModule
:type exception: satosa.exception.SATOSAError
:rtype: oic.utils.http_util.Response
"""
auth_req = self._get_authn_request_from_state(exception.state)
# If the client sent us a state parameter, we should reflect it back according to the spec
if 'state' in auth_req:
error_resp = AuthorizationErrorResponse(error="access_denied",
error_description=exception.message,
state=auth_req['state'])
else:
error_resp = AuthorizationErrorResponse(error="access_denied",
error_description=exception.message)
satosa_logging(logger, logging.DEBUG, exception.message, exception.state)
return SeeOther(error_resp.request(auth_req["redirect_uri"], should_fragment_encode(auth_req))) | [
"def",
"handle_backend_error",
"(",
"self",
",",
"exception",
")",
":",
"auth_req",
"=",
"self",
".",
"_get_authn_request_from_state",
"(",
"exception",
".",
"state",
")",
"# If the client sent us a state parameter, we should reflect it back according to the spec",
"if",
"'state'",
"in",
"auth_req",
":",
"error_resp",
"=",
"AuthorizationErrorResponse",
"(",
"error",
"=",
"\"access_denied\"",
",",
"error_description",
"=",
"exception",
".",
"message",
",",
"state",
"=",
"auth_req",
"[",
"'state'",
"]",
")",
"else",
":",
"error_resp",
"=",
"AuthorizationErrorResponse",
"(",
"error",
"=",
"\"access_denied\"",
",",
"error_description",
"=",
"exception",
".",
"message",
")",
"satosa_logging",
"(",
"logger",
",",
"logging",
".",
"DEBUG",
",",
"exception",
".",
"message",
",",
"exception",
".",
"state",
")",
"return",
"SeeOther",
"(",
"error_resp",
".",
"request",
"(",
"auth_req",
"[",
"\"redirect_uri\"",
"]",
",",
"should_fragment_encode",
"(",
"auth_req",
")",
")",
")"
] | 59.823529 | 27.470588 |
def get(self, url):
'''Get the entity that corresponds to URL.'''
robots_url = Robots.robots_url(url)
if robots_url not in self.cache:
self.cache[robots_url] = ExpiringObject(partial(self.factory, robots_url))
return self.cache[robots_url].get() | [
"def",
"get",
"(",
"self",
",",
"url",
")",
":",
"robots_url",
"=",
"Robots",
".",
"robots_url",
"(",
"url",
")",
"if",
"robots_url",
"not",
"in",
"self",
".",
"cache",
":",
"self",
".",
"cache",
"[",
"robots_url",
"]",
"=",
"ExpiringObject",
"(",
"partial",
"(",
"self",
".",
"factory",
",",
"robots_url",
")",
")",
"return",
"self",
".",
"cache",
"[",
"robots_url",
"]",
".",
"get",
"(",
")"
] | 47.333333 | 14.333333 |
def list(self):
"""Retrieve all reports for parent app
Returns:
:class:`list` of :class:`~swimlane.core.resources.report.Report`: List of all returned reports
"""
raw_reports = self._swimlane.request('get', "reports?appId={}".format(self._app.id)).json()
# Ignore StatsReports for now
return [Report(self._app, raw_report) for raw_report in raw_reports if raw_report['$type'] == Report._type] | [
"def",
"list",
"(",
"self",
")",
":",
"raw_reports",
"=",
"self",
".",
"_swimlane",
".",
"request",
"(",
"'get'",
",",
"\"reports?appId={}\"",
".",
"format",
"(",
"self",
".",
"_app",
".",
"id",
")",
")",
".",
"json",
"(",
")",
"# Ignore StatsReports for now",
"return",
"[",
"Report",
"(",
"self",
".",
"_app",
",",
"raw_report",
")",
"for",
"raw_report",
"in",
"raw_reports",
"if",
"raw_report",
"[",
"'$type'",
"]",
"==",
"Report",
".",
"_type",
"]"
] | 49.444444 | 32.444444 |
def get_splitting_stream(self, input_plate_value):
"""
Get the splitting stream
:param input_plate_value: The input plate value
:return: The splitting stream
"""
if not self.splitting_node:
return None
if len(self.splitting_node.plates) == 0:
# Use global plate value
return self.splitting_node.streams[None]
if len(self.splitting_node.plates) > 1:
raise ValueError("Splitting node cannot live on multiple plates for factor {}"
.format(self.factor_id))
# now len(self.splitting_node.plates) == 1:
if not self.input_plate and len(self.splitting_node.plates) > 0:
raise ValueError("Splitting node cannot live on a plate if there is no input plate")
splitting_plate = self.splitting_node.plates[0]
if self.input_plate == splitting_plate:
# Use matching plate value
splitting_stream = self.splitting_node.streams[input_plate_value]
else:
# First check if it's a direct child
if splitting_plate.is_child(self.input_plate):
ppv = filter(lambda x: all(p in input_plate_value for p in x), self.input_plate.parent.values)
if len(ppv) != 1:
raise ValueError("Parent plate value not found")
splitting_stream = self.splitting_node.streams[ppv[0]]
# Then more generally if it's a descendant
elif splitting_plate.is_descendant(self.input_plate):
# Here we need to find the splitting plate value that is valid for the
# current input plate value
# TODO: This needs checking - is the logic still the same as for the case above?
ppv = filter(lambda x: all(p in input_plate_value for p in x), self.input_plate.parent.values)
if len(ppv) != 1:
raise ValueError("Parent plate value not found")
splitting_stream = self.splitting_node.streams[ppv]
else:
raise IncompatiblePlatesError(
"Splitting node plate {} does not match input plate {} for factor {}"
.format(self.input_plate, self.splitting_node.plates[0], self.factor_id))
return splitting_stream | [
"def",
"get_splitting_stream",
"(",
"self",
",",
"input_plate_value",
")",
":",
"if",
"not",
"self",
".",
"splitting_node",
":",
"return",
"None",
"if",
"len",
"(",
"self",
".",
"splitting_node",
".",
"plates",
")",
"==",
"0",
":",
"# Use global plate value",
"return",
"self",
".",
"splitting_node",
".",
"streams",
"[",
"None",
"]",
"if",
"len",
"(",
"self",
".",
"splitting_node",
".",
"plates",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Splitting node cannot live on multiple plates for factor {}\"",
".",
"format",
"(",
"self",
".",
"factor_id",
")",
")",
"# now len(self.splitting_node.plates) == 1:",
"if",
"not",
"self",
".",
"input_plate",
"and",
"len",
"(",
"self",
".",
"splitting_node",
".",
"plates",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"Splitting node cannot live on a plate if there is no input plate\"",
")",
"splitting_plate",
"=",
"self",
".",
"splitting_node",
".",
"plates",
"[",
"0",
"]",
"if",
"self",
".",
"input_plate",
"==",
"splitting_plate",
":",
"# Use matching plate value",
"splitting_stream",
"=",
"self",
".",
"splitting_node",
".",
"streams",
"[",
"input_plate_value",
"]",
"else",
":",
"# First check if it's a direct child",
"if",
"splitting_plate",
".",
"is_child",
"(",
"self",
".",
"input_plate",
")",
":",
"ppv",
"=",
"filter",
"(",
"lambda",
"x",
":",
"all",
"(",
"p",
"in",
"input_plate_value",
"for",
"p",
"in",
"x",
")",
",",
"self",
".",
"input_plate",
".",
"parent",
".",
"values",
")",
"if",
"len",
"(",
"ppv",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Parent plate value not found\"",
")",
"splitting_stream",
"=",
"self",
".",
"splitting_node",
".",
"streams",
"[",
"ppv",
"[",
"0",
"]",
"]",
"# Then more generally if it's a descendant",
"elif",
"splitting_plate",
".",
"is_descendant",
"(",
"self",
".",
"input_plate",
")",
":",
"# Here we need to find the splitting plate value that is valid for the",
"# current input plate value",
"# TODO: This needs checking - is the logic still the same as for the case above?",
"ppv",
"=",
"filter",
"(",
"lambda",
"x",
":",
"all",
"(",
"p",
"in",
"input_plate_value",
"for",
"p",
"in",
"x",
")",
",",
"self",
".",
"input_plate",
".",
"parent",
".",
"values",
")",
"if",
"len",
"(",
"ppv",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Parent plate value not found\"",
")",
"splitting_stream",
"=",
"self",
".",
"splitting_node",
".",
"streams",
"[",
"ppv",
"]",
"else",
":",
"raise",
"IncompatiblePlatesError",
"(",
"\"Splitting node plate {} does not match input plate {} for factor {}\"",
".",
"format",
"(",
"self",
".",
"input_plate",
",",
"self",
".",
"splitting_node",
".",
"plates",
"[",
"0",
"]",
",",
"self",
".",
"factor_id",
")",
")",
"return",
"splitting_stream"
] | 49.234043 | 23.744681 |
def urlunparse(parts):
"""Unparse and encode parts of a URI."""
scheme, netloc, path, params, query, fragment = parts
# Avoid encoding the windows drive letter colon
if RE_DRIVE_LETTER_PATH.match(path):
quoted_path = path[:3] + parse.quote(path[3:])
else:
quoted_path = parse.quote(path)
return parse.urlunparse((
parse.quote(scheme),
parse.quote(netloc),
quoted_path,
parse.quote(params),
parse.quote(query),
parse.quote(fragment)
)) | [
"def",
"urlunparse",
"(",
"parts",
")",
":",
"scheme",
",",
"netloc",
",",
"path",
",",
"params",
",",
"query",
",",
"fragment",
"=",
"parts",
"# Avoid encoding the windows drive letter colon",
"if",
"RE_DRIVE_LETTER_PATH",
".",
"match",
"(",
"path",
")",
":",
"quoted_path",
"=",
"path",
"[",
":",
"3",
"]",
"+",
"parse",
".",
"quote",
"(",
"path",
"[",
"3",
":",
"]",
")",
"else",
":",
"quoted_path",
"=",
"parse",
".",
"quote",
"(",
"path",
")",
"return",
"parse",
".",
"urlunparse",
"(",
"(",
"parse",
".",
"quote",
"(",
"scheme",
")",
",",
"parse",
".",
"quote",
"(",
"netloc",
")",
",",
"quoted_path",
",",
"parse",
".",
"quote",
"(",
"params",
")",
",",
"parse",
".",
"quote",
"(",
"query",
")",
",",
"parse",
".",
"quote",
"(",
"fragment",
")",
")",
")"
] | 28.388889 | 16.5 |
def _scheduleUpgrade(self,
ev_data: UpgradeLogData,
failTimeout) -> None:
"""
Schedules node upgrade to a newer version
:param ev_data: upgrade event parameters
"""
logger.info(
"{}'s upgrader processing upgrade for version {}={}"
.format(self, ev_data.pkg_name, ev_data.version))
now = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
self._notifier.sendMessageUponNodeUpgradeScheduled(
"Upgrade of package {} on node '{}' to version {} "
"has been scheduled on {}"
.format(ev_data.pkg_name, self.nodeName,
ev_data.version, ev_data.when))
self._actionLog.append_scheduled(ev_data)
callAgent = partial(self._callUpgradeAgent, ev_data, failTimeout)
delay = 0
if now < ev_data.when:
delay = (ev_data.when - now).total_seconds()
self.scheduledAction = ev_data
self._schedule(callAgent, delay) | [
"def",
"_scheduleUpgrade",
"(",
"self",
",",
"ev_data",
":",
"UpgradeLogData",
",",
"failTimeout",
")",
"->",
"None",
":",
"logger",
".",
"info",
"(",
"\"{}'s upgrader processing upgrade for version {}={}\"",
".",
"format",
"(",
"self",
",",
"ev_data",
".",
"pkg_name",
",",
"ev_data",
".",
"version",
")",
")",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"dateutil",
".",
"tz",
".",
"tzutc",
"(",
")",
")",
"self",
".",
"_notifier",
".",
"sendMessageUponNodeUpgradeScheduled",
"(",
"\"Upgrade of package {} on node '{}' to version {} \"",
"\"has been scheduled on {}\"",
".",
"format",
"(",
"ev_data",
".",
"pkg_name",
",",
"self",
".",
"nodeName",
",",
"ev_data",
".",
"version",
",",
"ev_data",
".",
"when",
")",
")",
"self",
".",
"_actionLog",
".",
"append_scheduled",
"(",
"ev_data",
")",
"callAgent",
"=",
"partial",
"(",
"self",
".",
"_callUpgradeAgent",
",",
"ev_data",
",",
"failTimeout",
")",
"delay",
"=",
"0",
"if",
"now",
"<",
"ev_data",
".",
"when",
":",
"delay",
"=",
"(",
"ev_data",
".",
"when",
"-",
"now",
")",
".",
"total_seconds",
"(",
")",
"self",
".",
"scheduledAction",
"=",
"ev_data",
"self",
".",
"_schedule",
"(",
"callAgent",
",",
"delay",
")"
] | 39.153846 | 16.076923 |
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref)
for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ()) | [
"def",
"resolve_egg_link",
"(",
"path",
")",
":",
"referenced_paths",
"=",
"non_empty_lines",
"(",
"path",
")",
"resolved_paths",
"=",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"ref",
")",
"for",
"ref",
"in",
"referenced_paths",
")",
"dist_groups",
"=",
"map",
"(",
"find_distributions",
",",
"resolved_paths",
")",
"return",
"next",
"(",
"dist_groups",
",",
"(",
")",
")"
] | 31.166667 | 10.666667 |
def order_market_sell(self, **params):
"""Send in a new market sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_market(**params) | [
"def",
"order_market_sell",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"params",
".",
"update",
"(",
"{",
"'side'",
":",
"self",
".",
"SIDE_SELL",
"}",
")",
"return",
"self",
".",
"order_market",
"(",
"*",
"*",
"params",
")"
] | 40.8 | 27.96 |
def _parse_json(self, page, exactly_one):
"""Returns location, (latitude, longitude) from json feed."""
if not page.get('success'):
return None
latitude = page['latitude']
longitude = page['longitude']
place = page.get('place')
address = ", ".join([place['city'], place['countryCode']])
result = Location(address, (latitude, longitude), page)
if exactly_one:
return result
else:
return [result] | [
"def",
"_parse_json",
"(",
"self",
",",
"page",
",",
"exactly_one",
")",
":",
"if",
"not",
"page",
".",
"get",
"(",
"'success'",
")",
":",
"return",
"None",
"latitude",
"=",
"page",
"[",
"'latitude'",
"]",
"longitude",
"=",
"page",
"[",
"'longitude'",
"]",
"place",
"=",
"page",
".",
"get",
"(",
"'place'",
")",
"address",
"=",
"\", \"",
".",
"join",
"(",
"[",
"place",
"[",
"'city'",
"]",
",",
"place",
"[",
"'countryCode'",
"]",
"]",
")",
"result",
"=",
"Location",
"(",
"address",
",",
"(",
"latitude",
",",
"longitude",
")",
",",
"page",
")",
"if",
"exactly_one",
":",
"return",
"result",
"else",
":",
"return",
"[",
"result",
"]"
] | 30.625 | 17.4375 |
def set_zero_config(self):
"""Set config such that radiative forcing and temperature output will be zero
This method is intended as a convenience only, it does not handle everything in
an obvious way. Adjusting the parameter settings still requires great care and
may behave unepexctedly.
"""
# zero_emissions is imported from scenarios module
zero_emissions.write(join(self.run_dir, self._scen_file_name), self.version)
time = zero_emissions.filter(variable="Emissions|CH4", region="World")[
"time"
].values
no_timesteps = len(time)
# value doesn't actually matter as calculations are done from difference but
# chose sensible value nonetheless
ch4_conc_pi = 722
ch4_conc = ch4_conc_pi * np.ones(no_timesteps)
ch4_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": "Atmospheric Concentrations|CH4",
"unit": "ppb",
"todo": "SET",
"region": "World",
"value": ch4_conc,
}
)
ch4_conc_writer = MAGICCData(ch4_conc_df)
ch4_conc_filename = "HIST_CONSTANT_CH4_CONC.IN"
ch4_conc_writer.metadata = {
"header": "Constant pre-industrial CH4 concentrations"
}
ch4_conc_writer.write(join(self.run_dir, ch4_conc_filename), self.version)
fgas_conc_pi = 0
fgas_conc = fgas_conc_pi * np.ones(no_timesteps)
# MAGICC6 doesn't read this so not a problem, for MAGICC7 we might have to
# write each file separately
varname = "FGAS_CONC"
fgas_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": varname,
"unit": "ppt",
"todo": "SET",
"region": "World",
"value": fgas_conc,
}
)
fgas_conc_writer = MAGICCData(fgas_conc_df)
fgas_conc_filename = "HIST_ZERO_{}.IN".format(varname)
fgas_conc_writer.metadata = {"header": "Zero concentrations"}
fgas_conc_writer.write(join(self.run_dir, fgas_conc_filename), self.version)
emis_config = self._fix_any_backwards_emissions_scen_key_in_config(
{"file_emissionscenario": self._scen_file_name}
)
self.set_config(
**emis_config,
rf_initialization_method="ZEROSTARTSHIFT",
rf_total_constantafteryr=10000,
file_co2i_emis="",
file_co2b_emis="",
co2_switchfromconc2emis_year=1750,
file_ch4i_emis="",
file_ch4b_emis="",
file_ch4n_emis="",
file_ch4_conc=ch4_conc_filename,
ch4_switchfromconc2emis_year=10000,
file_n2oi_emis="",
file_n2ob_emis="",
file_n2on_emis="",
file_n2o_conc="",
n2o_switchfromconc2emis_year=1750,
file_noxi_emis="",
file_noxb_emis="",
file_noxi_ot="",
file_noxb_ot="",
file_noxt_rf="",
file_soxnb_ot="",
file_soxi_ot="",
file_soxt_rf="",
file_soxi_emis="",
file_soxb_emis="",
file_soxn_emis="",
file_oci_emis="",
file_ocb_emis="",
file_oci_ot="",
file_ocb_ot="",
file_oci_rf="",
file_ocb_rf="",
file_bci_emis="",
file_bcb_emis="",
file_bci_ot="",
file_bcb_ot="",
file_bci_rf="",
file_bcb_rf="",
bcoc_switchfromrf2emis_year=1750,
file_nh3i_emis="",
file_nh3b_emis="",
file_nmvoci_emis="",
file_nmvocb_emis="",
file_coi_emis="",
file_cob_emis="",
file_mineraldust_rf="",
file_landuse_rf="",
file_bcsnow_rf="",
# rf_fgassum_scale=0, # this appears to do nothing, hence the next two lines
file_fgas_conc=[fgas_conc_filename] * 12,
fgas_switchfromconc2emis_year=10000,
rf_mhalosum_scale=0,
mhalo_switch_conc2emis_yr=1750,
stratoz_o3scale=0,
rf_volcanic_scale=0,
rf_solar_scale=0,
) | [
"def",
"set_zero_config",
"(",
"self",
")",
":",
"# zero_emissions is imported from scenarios module",
"zero_emissions",
".",
"write",
"(",
"join",
"(",
"self",
".",
"run_dir",
",",
"self",
".",
"_scen_file_name",
")",
",",
"self",
".",
"version",
")",
"time",
"=",
"zero_emissions",
".",
"filter",
"(",
"variable",
"=",
"\"Emissions|CH4\"",
",",
"region",
"=",
"\"World\"",
")",
"[",
"\"time\"",
"]",
".",
"values",
"no_timesteps",
"=",
"len",
"(",
"time",
")",
"# value doesn't actually matter as calculations are done from difference but",
"# chose sensible value nonetheless",
"ch4_conc_pi",
"=",
"722",
"ch4_conc",
"=",
"ch4_conc_pi",
"*",
"np",
".",
"ones",
"(",
"no_timesteps",
")",
"ch4_conc_df",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"\"time\"",
":",
"time",
",",
"\"scenario\"",
":",
"\"idealised\"",
",",
"\"model\"",
":",
"\"unspecified\"",
",",
"\"climate_model\"",
":",
"\"unspecified\"",
",",
"\"variable\"",
":",
"\"Atmospheric Concentrations|CH4\"",
",",
"\"unit\"",
":",
"\"ppb\"",
",",
"\"todo\"",
":",
"\"SET\"",
",",
"\"region\"",
":",
"\"World\"",
",",
"\"value\"",
":",
"ch4_conc",
",",
"}",
")",
"ch4_conc_writer",
"=",
"MAGICCData",
"(",
"ch4_conc_df",
")",
"ch4_conc_filename",
"=",
"\"HIST_CONSTANT_CH4_CONC.IN\"",
"ch4_conc_writer",
".",
"metadata",
"=",
"{",
"\"header\"",
":",
"\"Constant pre-industrial CH4 concentrations\"",
"}",
"ch4_conc_writer",
".",
"write",
"(",
"join",
"(",
"self",
".",
"run_dir",
",",
"ch4_conc_filename",
")",
",",
"self",
".",
"version",
")",
"fgas_conc_pi",
"=",
"0",
"fgas_conc",
"=",
"fgas_conc_pi",
"*",
"np",
".",
"ones",
"(",
"no_timesteps",
")",
"# MAGICC6 doesn't read this so not a problem, for MAGICC7 we might have to",
"# write each file separately",
"varname",
"=",
"\"FGAS_CONC\"",
"fgas_conc_df",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"\"time\"",
":",
"time",
",",
"\"scenario\"",
":",
"\"idealised\"",
",",
"\"model\"",
":",
"\"unspecified\"",
",",
"\"climate_model\"",
":",
"\"unspecified\"",
",",
"\"variable\"",
":",
"varname",
",",
"\"unit\"",
":",
"\"ppt\"",
",",
"\"todo\"",
":",
"\"SET\"",
",",
"\"region\"",
":",
"\"World\"",
",",
"\"value\"",
":",
"fgas_conc",
",",
"}",
")",
"fgas_conc_writer",
"=",
"MAGICCData",
"(",
"fgas_conc_df",
")",
"fgas_conc_filename",
"=",
"\"HIST_ZERO_{}.IN\"",
".",
"format",
"(",
"varname",
")",
"fgas_conc_writer",
".",
"metadata",
"=",
"{",
"\"header\"",
":",
"\"Zero concentrations\"",
"}",
"fgas_conc_writer",
".",
"write",
"(",
"join",
"(",
"self",
".",
"run_dir",
",",
"fgas_conc_filename",
")",
",",
"self",
".",
"version",
")",
"emis_config",
"=",
"self",
".",
"_fix_any_backwards_emissions_scen_key_in_config",
"(",
"{",
"\"file_emissionscenario\"",
":",
"self",
".",
"_scen_file_name",
"}",
")",
"self",
".",
"set_config",
"(",
"*",
"*",
"emis_config",
",",
"rf_initialization_method",
"=",
"\"ZEROSTARTSHIFT\"",
",",
"rf_total_constantafteryr",
"=",
"10000",
",",
"file_co2i_emis",
"=",
"\"\"",
",",
"file_co2b_emis",
"=",
"\"\"",
",",
"co2_switchfromconc2emis_year",
"=",
"1750",
",",
"file_ch4i_emis",
"=",
"\"\"",
",",
"file_ch4b_emis",
"=",
"\"\"",
",",
"file_ch4n_emis",
"=",
"\"\"",
",",
"file_ch4_conc",
"=",
"ch4_conc_filename",
",",
"ch4_switchfromconc2emis_year",
"=",
"10000",
",",
"file_n2oi_emis",
"=",
"\"\"",
",",
"file_n2ob_emis",
"=",
"\"\"",
",",
"file_n2on_emis",
"=",
"\"\"",
",",
"file_n2o_conc",
"=",
"\"\"",
",",
"n2o_switchfromconc2emis_year",
"=",
"1750",
",",
"file_noxi_emis",
"=",
"\"\"",
",",
"file_noxb_emis",
"=",
"\"\"",
",",
"file_noxi_ot",
"=",
"\"\"",
",",
"file_noxb_ot",
"=",
"\"\"",
",",
"file_noxt_rf",
"=",
"\"\"",
",",
"file_soxnb_ot",
"=",
"\"\"",
",",
"file_soxi_ot",
"=",
"\"\"",
",",
"file_soxt_rf",
"=",
"\"\"",
",",
"file_soxi_emis",
"=",
"\"\"",
",",
"file_soxb_emis",
"=",
"\"\"",
",",
"file_soxn_emis",
"=",
"\"\"",
",",
"file_oci_emis",
"=",
"\"\"",
",",
"file_ocb_emis",
"=",
"\"\"",
",",
"file_oci_ot",
"=",
"\"\"",
",",
"file_ocb_ot",
"=",
"\"\"",
",",
"file_oci_rf",
"=",
"\"\"",
",",
"file_ocb_rf",
"=",
"\"\"",
",",
"file_bci_emis",
"=",
"\"\"",
",",
"file_bcb_emis",
"=",
"\"\"",
",",
"file_bci_ot",
"=",
"\"\"",
",",
"file_bcb_ot",
"=",
"\"\"",
",",
"file_bci_rf",
"=",
"\"\"",
",",
"file_bcb_rf",
"=",
"\"\"",
",",
"bcoc_switchfromrf2emis_year",
"=",
"1750",
",",
"file_nh3i_emis",
"=",
"\"\"",
",",
"file_nh3b_emis",
"=",
"\"\"",
",",
"file_nmvoci_emis",
"=",
"\"\"",
",",
"file_nmvocb_emis",
"=",
"\"\"",
",",
"file_coi_emis",
"=",
"\"\"",
",",
"file_cob_emis",
"=",
"\"\"",
",",
"file_mineraldust_rf",
"=",
"\"\"",
",",
"file_landuse_rf",
"=",
"\"\"",
",",
"file_bcsnow_rf",
"=",
"\"\"",
",",
"# rf_fgassum_scale=0, # this appears to do nothing, hence the next two lines",
"file_fgas_conc",
"=",
"[",
"fgas_conc_filename",
"]",
"*",
"12",
",",
"fgas_switchfromconc2emis_year",
"=",
"10000",
",",
"rf_mhalosum_scale",
"=",
"0",
",",
"mhalo_switch_conc2emis_yr",
"=",
"1750",
",",
"stratoz_o3scale",
"=",
"0",
",",
"rf_volcanic_scale",
"=",
"0",
",",
"rf_solar_scale",
"=",
"0",
",",
")"
] | 36.544715 | 15.243902 |
def tcp_traceflow(packet):
"""Trace packet flow for TCP."""
if 'TCP' in packet:
ip = packet.ip if 'IP' in packet else packet.ipv6
tcp = packet.tcp
data = dict(
protocol=LINKTYPE.get(packet.layers[0].layer_name.upper()), # data link type from global header
index=int(packet.number), # frame number
frame=packet2dict(packet), # extracted packet
syn=bool(int(tcp.flags_syn)), # TCP synchronise (SYN) flag
fin=bool(int(tcp.flags_fin)), # TCP finish (FIN) flag
src=ipaddress.ip_address(ip.src), # source IP
dst=ipaddress.ip_address(ip.dst), # destination IP
srcport=int(tcp.srcport), # TCP source port
dstport=int(tcp.dstport), # TCP destination port
timestamp=packet.frame_info.time_epoch, # timestamp
)
return True, data
return False, None | [
"def",
"tcp_traceflow",
"(",
"packet",
")",
":",
"if",
"'TCP'",
"in",
"packet",
":",
"ip",
"=",
"packet",
".",
"ip",
"if",
"'IP'",
"in",
"packet",
"else",
"packet",
".",
"ipv6",
"tcp",
"=",
"packet",
".",
"tcp",
"data",
"=",
"dict",
"(",
"protocol",
"=",
"LINKTYPE",
".",
"get",
"(",
"packet",
".",
"layers",
"[",
"0",
"]",
".",
"layer_name",
".",
"upper",
"(",
")",
")",
",",
"# data link type from global header",
"index",
"=",
"int",
"(",
"packet",
".",
"number",
")",
",",
"# frame number",
"frame",
"=",
"packet2dict",
"(",
"packet",
")",
",",
"# extracted packet",
"syn",
"=",
"bool",
"(",
"int",
"(",
"tcp",
".",
"flags_syn",
")",
")",
",",
"# TCP synchronise (SYN) flag",
"fin",
"=",
"bool",
"(",
"int",
"(",
"tcp",
".",
"flags_fin",
")",
")",
",",
"# TCP finish (FIN) flag",
"src",
"=",
"ipaddress",
".",
"ip_address",
"(",
"ip",
".",
"src",
")",
",",
"# source IP",
"dst",
"=",
"ipaddress",
".",
"ip_address",
"(",
"ip",
".",
"dst",
")",
",",
"# destination IP",
"srcport",
"=",
"int",
"(",
"tcp",
".",
"srcport",
")",
",",
"# TCP source port",
"dstport",
"=",
"int",
"(",
"tcp",
".",
"dstport",
")",
",",
"# TCP destination port",
"timestamp",
"=",
"packet",
".",
"frame_info",
".",
"time_epoch",
",",
"# timestamp",
")",
"return",
"True",
",",
"data",
"return",
"False",
",",
"None"
] | 63 | 37 |
def is_legal_sequence(self, packet: DataPacket) -> bool:
"""
Check if the Sequence number of the DataPacket is legal.
For more information see page 17 of http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf.
:param packet: the packet to check
:return: true if the sequence is legal. False if the sequence number is bad
"""
# if the sequence of the packet is smaller than the last received sequence, return false
# therefore calculate the difference between the two values:
try: # try, because self.lastSequence might not been initialized
diff = packet.sequence - self.lastSequence[packet.universe]
# if diff is between ]-20,0], return False for a bad packet sequence
if 0 >= diff > -20:
return False
except:
pass
# if the sequence is good, return True and refresh the list with the new value
self.lastSequence[packet.universe] = packet.sequence
return True | [
"def",
"is_legal_sequence",
"(",
"self",
",",
"packet",
":",
"DataPacket",
")",
"->",
"bool",
":",
"# if the sequence of the packet is smaller than the last received sequence, return false",
"# therefore calculate the difference between the two values:",
"try",
":",
"# try, because self.lastSequence might not been initialized",
"diff",
"=",
"packet",
".",
"sequence",
"-",
"self",
".",
"lastSequence",
"[",
"packet",
".",
"universe",
"]",
"# if diff is between ]-20,0], return False for a bad packet sequence",
"if",
"0",
">=",
"diff",
">",
"-",
"20",
":",
"return",
"False",
"except",
":",
"pass",
"# if the sequence is good, return True and refresh the list with the new value",
"self",
".",
"lastSequence",
"[",
"packet",
".",
"universe",
"]",
"=",
"packet",
".",
"sequence",
"return",
"True"
] | 53.052632 | 25.684211 |
def _microcanonical_average_spanning_cluster(has_spanning_cluster, alpha):
r'''
Compute the average number of runs that have a spanning cluster
Helper function for :func:`microcanonical_averages`
Parameters
----------
has_spanning_cluster : 1-D :py:class:`numpy.ndarray` of bool
Each entry is the ``has_spanning_cluster`` field of the output of
:func:`sample_states`:
An entry is ``True`` if there is a spanning cluster in that respective
run, and ``False`` otherwise.
alpha : float
Significance level.
Returns
-------
ret : dict
Spanning cluster statistics
ret['spanning_cluster'] : float
The average relative number (Binomial proportion) of runs that have a
spanning cluster.
This is the Bayesian point estimate of the posterior mean, with a
uniform prior.
ret['spanning_cluster_ci'] : 1-D :py:class:`numpy.ndarray` of float, size 2
The lower and upper bounds of the Binomial proportion confidence
interval with uniform prior.
See Also
--------
sample_states : spanning cluster detection
microcanonical_averages : spanning cluster statistics
Notes
-----
Averages and confidence intervals for Binomial proportions
As Cameron [8]_ puts it, the normal approximation to the confidence
interval for a Binomial proportion :math:`p` "suffers a *systematic*
decline in performance (...) towards extreme values of :math:`p` near
:math:`0` and :math:`1`, generating binomial [confidence intervals]
with effective coverage far below the desired level." (see also
References [6]_ and [7]_).
A different approach to quantifying uncertainty is Bayesian inference.
[5]_
For :math:`n` independent Bernoulli trails with common success
probability :math:`p`, the *likelihood* to have :math:`k` successes
given :math:`p` is the binomial distribution
.. math::
P(k|p) = \binom{n}{k} p^k (1-p)^{n-k} \equiv B(a,b),
where :math:`B(a, b)` is the *Beta distribution* with parameters
:math:`a = k + 1` and :math:`b = n - k + 1`.
Assuming a uniform prior :math:`P(p) = 1`, the *posterior* is [5]_
.. math::
P(p|k) = P(k|p)=B(a,b).
A point estimate is the posterior mean
.. math::
\bar{p} = \frac{k+1}{n+2}
with the :math:`1 - \alpha` credible interval :math:`(p_l, p_u)` given
by
.. math::
\int_0^{p_l} dp B(a,b) = \int_{p_u}^1 dp B(a,b) = \frac{\alpha}{2}.
References
----------
.. [5] Wasserman, L. All of Statistics (Springer New York, 2004),
`doi:10.1007/978-0-387-21736-9 <http://dx.doi.org/10.1007/978-0-387-21736-9>`_.
.. [6] DasGupta, A., Cai, T. T. & Brown, L. D. Interval Estimation for a
Binomial Proportion. Statistical Science 16, 101-133 (2001).
`doi:10.1214/ss/1009213286 <http://dx.doi.org/10.1214/ss/1009213286>`_.
.. [7] Agresti, A. & Coull, B. A. Approximate is Better than "Exact" for
Interval Estimation of Binomial Proportions. The American Statistician
52, 119-126 (1998),
`doi:10.2307/2685469 <http://dx.doi.org/10.2307/2685469>`_.
.. [8] Cameron, E. On the Estimation of Confidence Intervals for Binomial
Population Proportions in Astronomy: The Simplicity and Superiority of
the Bayesian Approach. Publications of the Astronomical Society of
Australia 28, 128-139 (2011),
`doi:10.1071/as10046 <http://dx.doi.org/10.1071/as10046>`_.
'''
ret = dict()
runs = has_spanning_cluster.size
# Bayesian posterior mean for Binomial proportion (uniform prior)
k = has_spanning_cluster.sum(dtype=np.float)
ret['spanning_cluster'] = (
(k + 1) / (runs + 2)
)
# Bayesian credible interval for Binomial proportion (uniform
# prior)
ret['spanning_cluster_ci'] = scipy.stats.beta.ppf(
[alpha / 2, 1 - alpha / 2], k + 1, runs - k + 1
)
return ret | [
"def",
"_microcanonical_average_spanning_cluster",
"(",
"has_spanning_cluster",
",",
"alpha",
")",
":",
"ret",
"=",
"dict",
"(",
")",
"runs",
"=",
"has_spanning_cluster",
".",
"size",
"# Bayesian posterior mean for Binomial proportion (uniform prior)",
"k",
"=",
"has_spanning_cluster",
".",
"sum",
"(",
"dtype",
"=",
"np",
".",
"float",
")",
"ret",
"[",
"'spanning_cluster'",
"]",
"=",
"(",
"(",
"k",
"+",
"1",
")",
"/",
"(",
"runs",
"+",
"2",
")",
")",
"# Bayesian credible interval for Binomial proportion (uniform",
"# prior)",
"ret",
"[",
"'spanning_cluster_ci'",
"]",
"=",
"scipy",
".",
"stats",
".",
"beta",
".",
"ppf",
"(",
"[",
"alpha",
"/",
"2",
",",
"1",
"-",
"alpha",
"/",
"2",
"]",
",",
"k",
"+",
"1",
",",
"runs",
"-",
"k",
"+",
"1",
")",
"return",
"ret"
] | 31.609756 | 28.97561 |
def can_route(self, endpoint, method=None, **kwargs):
"""Make sure we can route to the given endpoint or url.
This checks for `http.get` permission (or other methods) on the ACL of
route functions, attached via the `ACL` decorator.
:param endpoint: A URL or endpoint to check for permission to access.
:param method: The HTTP method to check; defaults to `'GET'`.
:param **kwargs: The context to pass to predicates.
"""
view = flask.current_app.view_functions.get(endpoint)
if not view:
endpoint, args = flask._request_ctx.top.match(endpoint)
view = flask.current_app.view_functions.get(endpoint)
if not view:
return False
return self.can('http.' + (method or 'GET').lower(), view, **kwargs) | [
"def",
"can_route",
"(",
"self",
",",
"endpoint",
",",
"method",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"view",
"=",
"flask",
".",
"current_app",
".",
"view_functions",
".",
"get",
"(",
"endpoint",
")",
"if",
"not",
"view",
":",
"endpoint",
",",
"args",
"=",
"flask",
".",
"_request_ctx",
".",
"top",
".",
"match",
"(",
"endpoint",
")",
"view",
"=",
"flask",
".",
"current_app",
".",
"view_functions",
".",
"get",
"(",
"endpoint",
")",
"if",
"not",
"view",
":",
"return",
"False",
"return",
"self",
".",
"can",
"(",
"'http.'",
"+",
"(",
"method",
"or",
"'GET'",
")",
".",
"lower",
"(",
")",
",",
"view",
",",
"*",
"*",
"kwargs",
")"
] | 40.05 | 25.95 |
def drive(self, speed, rotation_speed, tm_diff):
"""Call this from your :func:`PhysicsEngine.update_sim` function.
Will update the robot's position on the simulation field.
You can either calculate the speed & rotation manually, or you
can use the predefined functions in :mod:`pyfrc.physics.drivetrains`.
The outputs of the `drivetrains.*` functions should be passed
to this function.
.. note:: The simulator currently only allows 2D motion
:param speed: Speed of robot in ft/s
:param rotation_speed: Clockwise rotational speed in radians/s
:param tm_diff: Amount of time speed was traveled (this is the
same value that was passed to update_sim)
"""
# if the robot is disabled, don't do anything
if not self.robot_enabled:
return
distance = speed * tm_diff
angle = rotation_speed * tm_diff
x = distance * math.cos(angle)
y = distance * math.sin(angle)
self.distance_drive(x, y, angle) | [
"def",
"drive",
"(",
"self",
",",
"speed",
",",
"rotation_speed",
",",
"tm_diff",
")",
":",
"# if the robot is disabled, don't do anything",
"if",
"not",
"self",
".",
"robot_enabled",
":",
"return",
"distance",
"=",
"speed",
"*",
"tm_diff",
"angle",
"=",
"rotation_speed",
"*",
"tm_diff",
"x",
"=",
"distance",
"*",
"math",
".",
"cos",
"(",
"angle",
")",
"y",
"=",
"distance",
"*",
"math",
".",
"sin",
"(",
"angle",
")",
"self",
".",
"distance_drive",
"(",
"x",
",",
"y",
",",
"angle",
")"
] | 38.758621 | 22.62069 |
def create_host_template(resource_root, name, cluster_name):
"""
Create a host template.
@param resource_root: The root Resource object.
@param name: Host template name
@param cluster_name: Cluster name
@return: An ApiHostTemplate object for the created host template.
@since: API v3
"""
apitemplate = ApiHostTemplate(resource_root, name, [])
return call(resource_root.post,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, data=[apitemplate], api_version=3)[0] | [
"def",
"create_host_template",
"(",
"resource_root",
",",
"name",
",",
"cluster_name",
")",
":",
"apitemplate",
"=",
"ApiHostTemplate",
"(",
"resource_root",
",",
"name",
",",
"[",
"]",
")",
"return",
"call",
"(",
"resource_root",
".",
"post",
",",
"HOST_TEMPLATES_PATH",
"%",
"(",
"cluster_name",
",",
")",
",",
"ApiHostTemplate",
",",
"True",
",",
"data",
"=",
"[",
"apitemplate",
"]",
",",
"api_version",
"=",
"3",
")",
"[",
"0",
"]"
] | 38 | 12.307692 |
def _print_daily_stats(self, conn):
"""
Prints a Today/Last 24 hour stats section.
"""
stats = conn.get_send_statistics()
stats = stats['GetSendStatisticsResponse']['GetSendStatisticsResult']
stats = stats['SendDataPoints']
today = datetime.date.today()
current_day = {'HeaderName': 'Current Day: %s/%s' % (today.month,
today.day)}
prev_day = {'HeaderName': 'Past two weeks'}
for data_point in stats:
if self._is_data_from_today(data_point):
day_dict = current_day
else:
day_dict = prev_day
self._update_day_dict(data_point, day_dict)
for day in [current_day, prev_day]:
print "--- %s ---" % day.get('HeaderName', 0)
print " Delivery attempts: %s" % day.get('DeliveryAttempts', 0)
print " Bounces: %s" % day.get('Bounces', 0)
print " Rejects: %s" % day.get('Rejects', 0)
print " Complaints: %s" % day.get('Complaints', 0) | [
"def",
"_print_daily_stats",
"(",
"self",
",",
"conn",
")",
":",
"stats",
"=",
"conn",
".",
"get_send_statistics",
"(",
")",
"stats",
"=",
"stats",
"[",
"'GetSendStatisticsResponse'",
"]",
"[",
"'GetSendStatisticsResult'",
"]",
"stats",
"=",
"stats",
"[",
"'SendDataPoints'",
"]",
"today",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"current_day",
"=",
"{",
"'HeaderName'",
":",
"'Current Day: %s/%s'",
"%",
"(",
"today",
".",
"month",
",",
"today",
".",
"day",
")",
"}",
"prev_day",
"=",
"{",
"'HeaderName'",
":",
"'Past two weeks'",
"}",
"for",
"data_point",
"in",
"stats",
":",
"if",
"self",
".",
"_is_data_from_today",
"(",
"data_point",
")",
":",
"day_dict",
"=",
"current_day",
"else",
":",
"day_dict",
"=",
"prev_day",
"self",
".",
"_update_day_dict",
"(",
"data_point",
",",
"day_dict",
")",
"for",
"day",
"in",
"[",
"current_day",
",",
"prev_day",
"]",
":",
"print",
"\"--- %s ---\"",
"%",
"day",
".",
"get",
"(",
"'HeaderName'",
",",
"0",
")",
"print",
"\" Delivery attempts: %s\"",
"%",
"day",
".",
"get",
"(",
"'DeliveryAttempts'",
",",
"0",
")",
"print",
"\" Bounces: %s\"",
"%",
"day",
".",
"get",
"(",
"'Bounces'",
",",
"0",
")",
"print",
"\" Rejects: %s\"",
"%",
"day",
".",
"get",
"(",
"'Rejects'",
",",
"0",
")",
"print",
"\" Complaints: %s\"",
"%",
"day",
".",
"get",
"(",
"'Complaints'",
",",
"0",
")"
] | 41.444444 | 16.555556 |
def ephemeral(cls):
"""
Creates a new ephemeral key constructed using a raw 32-byte string from urandom.
Ephemeral keys are used once for each encryption task and are then discarded;
they are not intended for long-term or repeat use.
"""
private_key = nacl.public.PrivateKey(os.urandom(32))
return cls(private_key.public_key, private_key) | [
"def",
"ephemeral",
"(",
"cls",
")",
":",
"private_key",
"=",
"nacl",
".",
"public",
".",
"PrivateKey",
"(",
"os",
".",
"urandom",
"(",
"32",
")",
")",
"return",
"cls",
"(",
"private_key",
".",
"public_key",
",",
"private_key",
")"
] | 48.375 | 20.875 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.