text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
|---|---|---|---|
def setSr(self, fs):
"""Sets the samplerate of the input operation being plotted"""
self.tracePlot.setSr(fs)
self.stimPlot.setSr(fs)
|
[
"def",
"setSr",
"(",
"self",
",",
"fs",
")",
":",
"self",
".",
"tracePlot",
".",
"setSr",
"(",
"fs",
")",
"self",
".",
"stimPlot",
".",
"setSr",
"(",
"fs",
")"
] | 38.25
| 9.25
|
def money_flow_index(close_data, high_data, low_data, volume, period):
"""
Money Flow Index.
Formula:
MFI = 100 - (100 / (1 + PMF / NMF))
"""
catch_errors.check_for_input_len_diff(
close_data, high_data, low_data, volume
)
catch_errors.check_for_period_error(close_data, period)
mf = money_flow(close_data, high_data, low_data, volume)
tp = typical_price(close_data, high_data, low_data)
flow = [tp[idx] > tp[idx-1] for idx in range(1, len(tp))]
pf = [mf[idx] if flow[idx] else 0 for idx in range(0, len(flow))]
nf = [mf[idx] if not flow[idx] else 0 for idx in range(0, len(flow))]
pmf = [sum(pf[idx+1-period:idx+1]) for idx in range(period-1, len(pf))]
nmf = [sum(nf[idx+1-period:idx+1]) for idx in range(period-1, len(nf))]
# Dividing by 0 is not an issue, it turns the value into NaN which we would
# want in that case
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
money_ratio = np.array(pmf) / np.array(nmf)
mfi = 100 - (100 / (1 + money_ratio))
mfi = fill_for_noncomputable_vals(close_data, mfi)
return mfi
|
[
"def",
"money_flow_index",
"(",
"close_data",
",",
"high_data",
",",
"low_data",
",",
"volume",
",",
"period",
")",
":",
"catch_errors",
".",
"check_for_input_len_diff",
"(",
"close_data",
",",
"high_data",
",",
"low_data",
",",
"volume",
")",
"catch_errors",
".",
"check_for_period_error",
"(",
"close_data",
",",
"period",
")",
"mf",
"=",
"money_flow",
"(",
"close_data",
",",
"high_data",
",",
"low_data",
",",
"volume",
")",
"tp",
"=",
"typical_price",
"(",
"close_data",
",",
"high_data",
",",
"low_data",
")",
"flow",
"=",
"[",
"tp",
"[",
"idx",
"]",
">",
"tp",
"[",
"idx",
"-",
"1",
"]",
"for",
"idx",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"tp",
")",
")",
"]",
"pf",
"=",
"[",
"mf",
"[",
"idx",
"]",
"if",
"flow",
"[",
"idx",
"]",
"else",
"0",
"for",
"idx",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"flow",
")",
")",
"]",
"nf",
"=",
"[",
"mf",
"[",
"idx",
"]",
"if",
"not",
"flow",
"[",
"idx",
"]",
"else",
"0",
"for",
"idx",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"flow",
")",
")",
"]",
"pmf",
"=",
"[",
"sum",
"(",
"pf",
"[",
"idx",
"+",
"1",
"-",
"period",
":",
"idx",
"+",
"1",
"]",
")",
"for",
"idx",
"in",
"range",
"(",
"period",
"-",
"1",
",",
"len",
"(",
"pf",
")",
")",
"]",
"nmf",
"=",
"[",
"sum",
"(",
"nf",
"[",
"idx",
"+",
"1",
"-",
"period",
":",
"idx",
"+",
"1",
"]",
")",
"for",
"idx",
"in",
"range",
"(",
"period",
"-",
"1",
",",
"len",
"(",
"nf",
")",
")",
"]",
"# Dividing by 0 is not an issue, it turns the value into NaN which we would",
"# want in that case",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
",",
"category",
"=",
"RuntimeWarning",
")",
"money_ratio",
"=",
"np",
".",
"array",
"(",
"pmf",
")",
"/",
"np",
".",
"array",
"(",
"nmf",
")",
"mfi",
"=",
"100",
"-",
"(",
"100",
"/",
"(",
"1",
"+",
"money_ratio",
")",
")",
"mfi",
"=",
"fill_for_noncomputable_vals",
"(",
"close_data",
",",
"mfi",
")",
"return",
"mfi"
] | 34.606061
| 23.69697
|
async def connect(self):
"""
Perform ICE handshake.
This coroutine returns if a candidate pair was successfuly nominated
and raises an exception otherwise.
"""
if not self._local_candidates_end:
raise ConnectionError('Local candidates gathering was not performed')
if (self.remote_username is None or
self.remote_password is None):
raise ConnectionError('Remote username or password is missing')
# 5.7.1. Forming Candidate Pairs
for remote_candidate in self._remote_candidates:
for protocol in self._protocols:
if (protocol.local_candidate.can_pair_with(remote_candidate) and
not self._find_pair(protocol, remote_candidate)):
pair = CandidatePair(protocol, remote_candidate)
self._check_list.append(pair)
self.sort_check_list()
self._unfreeze_initial()
# handle early checks
for check in self._early_checks:
self.check_incoming(*check)
self._early_checks = []
# perform checks
while True:
if not self.check_periodic():
break
await asyncio.sleep(0.02)
# wait for completion
if self._check_list:
res = await self._check_list_state.get()
else:
res = ICE_FAILED
# cancel remaining checks
for check in self._check_list:
if check.handle:
check.handle.cancel()
if res != ICE_COMPLETED:
raise ConnectionError('ICE negotiation failed')
# start consent freshness tests
self._query_consent_handle = asyncio.ensure_future(self.query_consent())
|
[
"async",
"def",
"connect",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_local_candidates_end",
":",
"raise",
"ConnectionError",
"(",
"'Local candidates gathering was not performed'",
")",
"if",
"(",
"self",
".",
"remote_username",
"is",
"None",
"or",
"self",
".",
"remote_password",
"is",
"None",
")",
":",
"raise",
"ConnectionError",
"(",
"'Remote username or password is missing'",
")",
"# 5.7.1. Forming Candidate Pairs",
"for",
"remote_candidate",
"in",
"self",
".",
"_remote_candidates",
":",
"for",
"protocol",
"in",
"self",
".",
"_protocols",
":",
"if",
"(",
"protocol",
".",
"local_candidate",
".",
"can_pair_with",
"(",
"remote_candidate",
")",
"and",
"not",
"self",
".",
"_find_pair",
"(",
"protocol",
",",
"remote_candidate",
")",
")",
":",
"pair",
"=",
"CandidatePair",
"(",
"protocol",
",",
"remote_candidate",
")",
"self",
".",
"_check_list",
".",
"append",
"(",
"pair",
")",
"self",
".",
"sort_check_list",
"(",
")",
"self",
".",
"_unfreeze_initial",
"(",
")",
"# handle early checks",
"for",
"check",
"in",
"self",
".",
"_early_checks",
":",
"self",
".",
"check_incoming",
"(",
"*",
"check",
")",
"self",
".",
"_early_checks",
"=",
"[",
"]",
"# perform checks",
"while",
"True",
":",
"if",
"not",
"self",
".",
"check_periodic",
"(",
")",
":",
"break",
"await",
"asyncio",
".",
"sleep",
"(",
"0.02",
")",
"# wait for completion",
"if",
"self",
".",
"_check_list",
":",
"res",
"=",
"await",
"self",
".",
"_check_list_state",
".",
"get",
"(",
")",
"else",
":",
"res",
"=",
"ICE_FAILED",
"# cancel remaining checks",
"for",
"check",
"in",
"self",
".",
"_check_list",
":",
"if",
"check",
".",
"handle",
":",
"check",
".",
"handle",
".",
"cancel",
"(",
")",
"if",
"res",
"!=",
"ICE_COMPLETED",
":",
"raise",
"ConnectionError",
"(",
"'ICE negotiation failed'",
")",
"# start consent freshness tests",
"self",
".",
"_query_consent_handle",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"query_consent",
"(",
")",
")"
] | 33.076923
| 18
|
def _setup_xy(self, p):
"""
Produce pattern coordinate matrices from the bounds and
density (or rows and cols), and transforms them according to
x, y, and orientation.
"""
self.debug("bounds=%s, xdensity=%s, ydensity=%s, x=%s, y=%s, orientation=%s",p.bounds, p.xdensity, p.ydensity, p.x, p.y, p.orientation)
x_points,y_points = SheetCoordinateSystem(p.bounds, p.xdensity, p.ydensity).sheetcoordinates_of_matrixidx()
self.pattern_x, self.pattern_y = self._create_and_rotate_coordinate_arrays(x_points-p.x, y_points-p.y, p)
|
[
"def",
"_setup_xy",
"(",
"self",
",",
"p",
")",
":",
"self",
".",
"debug",
"(",
"\"bounds=%s, xdensity=%s, ydensity=%s, x=%s, y=%s, orientation=%s\"",
",",
"p",
".",
"bounds",
",",
"p",
".",
"xdensity",
",",
"p",
".",
"ydensity",
",",
"p",
".",
"x",
",",
"p",
".",
"y",
",",
"p",
".",
"orientation",
")",
"x_points",
",",
"y_points",
"=",
"SheetCoordinateSystem",
"(",
"p",
".",
"bounds",
",",
"p",
".",
"xdensity",
",",
"p",
".",
"ydensity",
")",
".",
"sheetcoordinates_of_matrixidx",
"(",
")",
"self",
".",
"pattern_x",
",",
"self",
".",
"pattern_y",
"=",
"self",
".",
"_create_and_rotate_coordinate_arrays",
"(",
"x_points",
"-",
"p",
".",
"x",
",",
"y_points",
"-",
"p",
".",
"y",
",",
"p",
")"
] | 52.454545
| 37.181818
|
def find_debugged_frame(frame):
"""Find the first frame that is a debugged frame. We do this
Generally we want traceback information without polluting it with
debugger frames. We can tell these because those are frames on the
top which don't have f_trace set. So we'll look back from the top
to find the fist frame where f_trace is set.
"""
f_prev = f = frame
while f is not None and f.f_trace is None:
f_prev = f
f = f.f_back
pass
if f_prev:
val = f_prev.f_locals.get('tracer_func_frame')
if val == f_prev:
if f_prev.f_back:
f_prev = f_prev.f_back
pass
pass
pass
else:
return frame
return f_prev
|
[
"def",
"find_debugged_frame",
"(",
"frame",
")",
":",
"f_prev",
"=",
"f",
"=",
"frame",
"while",
"f",
"is",
"not",
"None",
"and",
"f",
".",
"f_trace",
"is",
"None",
":",
"f_prev",
"=",
"f",
"f",
"=",
"f",
".",
"f_back",
"pass",
"if",
"f_prev",
":",
"val",
"=",
"f_prev",
".",
"f_locals",
".",
"get",
"(",
"'tracer_func_frame'",
")",
"if",
"val",
"==",
"f_prev",
":",
"if",
"f_prev",
".",
"f_back",
":",
"f_prev",
"=",
"f_prev",
".",
"f_back",
"pass",
"pass",
"pass",
"else",
":",
"return",
"frame",
"return",
"f_prev"
] | 31.73913
| 17.956522
|
def immutable(function):
'''Add the instance internal state as the second parameter
of the decorated function.'''
def wrapper(self, *args, **kwargs):
state = freeze(self._get_state())
return function(self, state, *args, **kwargs)
return wrapper
|
[
"def",
"immutable",
"(",
"function",
")",
":",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"state",
"=",
"freeze",
"(",
"self",
".",
"_get_state",
"(",
")",
")",
"return",
"function",
"(",
"self",
",",
"state",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | 30
| 18
|
def stochastic_choice(machines):
"""Stochastically choose one random machine distributed along their
scores."""
if len(machines) < 4:
return random.choice(machines)
r = random.random()
if r < 0.5:
return random.choice(machines[:len(machines)/4])
elif r < 0.75:
return random.choice(machines[:len(machines)/2])
else:
return random.choice(machines)
|
[
"def",
"stochastic_choice",
"(",
"machines",
")",
":",
"if",
"len",
"(",
"machines",
")",
"<",
"4",
":",
"return",
"random",
".",
"choice",
"(",
"machines",
")",
"r",
"=",
"random",
".",
"random",
"(",
")",
"if",
"r",
"<",
"0.5",
":",
"return",
"random",
".",
"choice",
"(",
"machines",
"[",
":",
"len",
"(",
"machines",
")",
"/",
"4",
"]",
")",
"elif",
"r",
"<",
"0.75",
":",
"return",
"random",
".",
"choice",
"(",
"machines",
"[",
":",
"len",
"(",
"machines",
")",
"/",
"2",
"]",
")",
"else",
":",
"return",
"random",
".",
"choice",
"(",
"machines",
")"
] | 30.384615
| 14.923077
|
def stop(self, nowait=False):
"""Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
If nowait is False then thread will handle remaining items in queue and
stop.
If nowait is True then thread will be stopped even if the queue still
contains items.
"""
self._stop.set()
if nowait:
self._stop_nowait.set()
self.queue.put_nowait(self._sentinel_item)
if (self._thread.isAlive() and
self._thread is not threading.currentThread()):
self._thread.join()
self._thread = None
|
[
"def",
"stop",
"(",
"self",
",",
"nowait",
"=",
"False",
")",
":",
"self",
".",
"_stop",
".",
"set",
"(",
")",
"if",
"nowait",
":",
"self",
".",
"_stop_nowait",
".",
"set",
"(",
")",
"self",
".",
"queue",
".",
"put_nowait",
"(",
"self",
".",
"_sentinel_item",
")",
"if",
"(",
"self",
".",
"_thread",
".",
"isAlive",
"(",
")",
"and",
"self",
".",
"_thread",
"is",
"not",
"threading",
".",
"currentThread",
"(",
")",
")",
":",
"self",
".",
"_thread",
".",
"join",
"(",
")",
"self",
".",
"_thread",
"=",
"None"
] | 40.842105
| 20
|
def update_history(self, it, j=0, M=None, **kwargs):
"""Add the current state for all kwargs to the history
"""
# Create a new entry in the history for new variables (if they don't exist)
if not np.any([k in self.history[j] for k in kwargs]):
for k in kwargs:
if M is None or M == 0:
self.history[j][k] = [[]]
else:
self.history[j][k] = [[] for m in range(M)]
"""
# Check that the variables have been updated once per iteration
elif np.any([[len(h)!=it+self.offset for h in self.history[j][k]] for k in kwargs.keys()]):
for k in kwargs.keys():
for n,h in enumerate(self.history[j][k]):
if len(h) != it+self.offset:
err_str = "At iteration {0}, {1}[{2}] already has {3} entries"
raise Exception(err_str.format(it, k, n, len(h)-self.offset))
"""
# Add the variables to the history
for k,v in kwargs.items():
if M is None or M == 0:
self._store_variable(j, k, 0, v)
else:
for m in range(M):
self._store_variable(j, k, m, v[m])
|
[
"def",
"update_history",
"(",
"self",
",",
"it",
",",
"j",
"=",
"0",
",",
"M",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Create a new entry in the history for new variables (if they don't exist)",
"if",
"not",
"np",
".",
"any",
"(",
"[",
"k",
"in",
"self",
".",
"history",
"[",
"j",
"]",
"for",
"k",
"in",
"kwargs",
"]",
")",
":",
"for",
"k",
"in",
"kwargs",
":",
"if",
"M",
"is",
"None",
"or",
"M",
"==",
"0",
":",
"self",
".",
"history",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"[",
"[",
"]",
"]",
"else",
":",
"self",
".",
"history",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"[",
"[",
"]",
"for",
"m",
"in",
"range",
"(",
"M",
")",
"]",
"\"\"\"\n # Check that the variables have been updated once per iteration\n elif np.any([[len(h)!=it+self.offset for h in self.history[j][k]] for k in kwargs.keys()]):\n for k in kwargs.keys():\n for n,h in enumerate(self.history[j][k]):\n if len(h) != it+self.offset:\n err_str = \"At iteration {0}, {1}[{2}] already has {3} entries\"\n raise Exception(err_str.format(it, k, n, len(h)-self.offset))\n \"\"\"",
"# Add the variables to the history",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"M",
"is",
"None",
"or",
"M",
"==",
"0",
":",
"self",
".",
"_store_variable",
"(",
"j",
",",
"k",
",",
"0",
",",
"v",
")",
"else",
":",
"for",
"m",
"in",
"range",
"(",
"M",
")",
":",
"self",
".",
"_store_variable",
"(",
"j",
",",
"k",
",",
"m",
",",
"v",
"[",
"m",
"]",
")"
] | 47.461538
| 15.884615
|
def versionString(version):
"""Create version string.
For a sequence containing version information such as (2, 0, 0, 'pre'),
this returns a printable string such as '2.0pre'.
The micro version number is only excluded from the string if it is zero.
"""
ver = list(map(str, version))
numbers, rest = ver[:2 if ver[2] == '0' else 3], ver[3:]
return '.'.join(numbers) + '-'.join(rest)
|
[
"def",
"versionString",
"(",
"version",
")",
":",
"ver",
"=",
"list",
"(",
"map",
"(",
"str",
",",
"version",
")",
")",
"numbers",
",",
"rest",
"=",
"ver",
"[",
":",
"2",
"if",
"ver",
"[",
"2",
"]",
"==",
"'0'",
"else",
"3",
"]",
",",
"ver",
"[",
"3",
":",
"]",
"return",
"'.'",
".",
"join",
"(",
"numbers",
")",
"+",
"'-'",
".",
"join",
"(",
"rest",
")"
] | 36.818182
| 19
|
def wordnet_annotations(self):
"""The list of wordnet annotations of ``words`` layer."""
if not self.is_tagged(WORDNET):
self.tag_wordnet()
return [[a[WORDNET] for a in analysis] for analysis in self.analysis]
|
[
"def",
"wordnet_annotations",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_tagged",
"(",
"WORDNET",
")",
":",
"self",
".",
"tag_wordnet",
"(",
")",
"return",
"[",
"[",
"a",
"[",
"WORDNET",
"]",
"for",
"a",
"in",
"analysis",
"]",
"for",
"analysis",
"in",
"self",
".",
"analysis",
"]"
] | 48.2
| 11.6
|
def write_table(self, table, rows, append=False, gzip=False):
"""
Encode and write out *table* to the profile directory.
Args:
table: The name of the table to write
rows: The rows to write to the table
append: If `True`, append the encoded rows to any existing
data.
gzip: If `True`, compress the resulting table with `gzip`.
The table's filename will have `.gz` appended.
"""
_write_table(self.root,
table,
rows,
self.table_relations(table),
append=append,
gzip=gzip,
encoding=self.encoding)
|
[
"def",
"write_table",
"(",
"self",
",",
"table",
",",
"rows",
",",
"append",
"=",
"False",
",",
"gzip",
"=",
"False",
")",
":",
"_write_table",
"(",
"self",
".",
"root",
",",
"table",
",",
"rows",
",",
"self",
".",
"table_relations",
"(",
"table",
")",
",",
"append",
"=",
"append",
",",
"gzip",
"=",
"gzip",
",",
"encoding",
"=",
"self",
".",
"encoding",
")"
] | 37.947368
| 15.315789
|
def create(cls, files):
"""Creates file group and returns ``FileGroup`` instance.
It expects iterable object that contains ``File`` instances, e.g.::
>>> file_1 = File('6c5e9526-b0fe-4739-8975-72e8d5ee6342')
>>> file_2 = File('a771f854-c2cb-408a-8c36-71af77811f3b')
>>> FileGroup.create((file_1, file_2))
<uploadcare.FileGroup 0513dda0-6666-447d-846f-096e5df9e2bb~2>
"""
data = {}
for index, file_ in enumerate(files):
if isinstance(file_, File):
file_index = 'files[{index}]'.format(index=index)
data[file_index] = six.text_type(file_)
else:
raise InvalidParamError(
'all items have to be ``File`` instance'
)
if not data:
raise InvalidParamError('set of files is empty')
group_info = uploading_request('POST', 'group/', data=data)
group = cls.construct_from(group_info)
return group
|
[
"def",
"create",
"(",
"cls",
",",
"files",
")",
":",
"data",
"=",
"{",
"}",
"for",
"index",
",",
"file_",
"in",
"enumerate",
"(",
"files",
")",
":",
"if",
"isinstance",
"(",
"file_",
",",
"File",
")",
":",
"file_index",
"=",
"'files[{index}]'",
".",
"format",
"(",
"index",
"=",
"index",
")",
"data",
"[",
"file_index",
"]",
"=",
"six",
".",
"text_type",
"(",
"file_",
")",
"else",
":",
"raise",
"InvalidParamError",
"(",
"'all items have to be ``File`` instance'",
")",
"if",
"not",
"data",
":",
"raise",
"InvalidParamError",
"(",
"'set of files is empty'",
")",
"group_info",
"=",
"uploading_request",
"(",
"'POST'",
",",
"'group/'",
",",
"data",
"=",
"data",
")",
"group",
"=",
"cls",
".",
"construct_from",
"(",
"group_info",
")",
"return",
"group"
] | 37.148148
| 21.518519
|
def find_clusters(struct, connected_list):
"""
Finds bonded clusters of atoms in the structure with periodic boundary conditions.
If there are atoms that are not bonded to anything, returns [0,1,0].(For faster computation time in FindDimension())
Args:
struct (Structure): Input structure
connected_list: Must be made from the same structure with FindConnected() function.
An array of shape (number of bonded pairs, 2); each row of is of the form [atomi, atomj].
Returns:
max_cluster: the size of the largest cluster in the crystal structure
min_cluster: the size of the smallest cluster in the crystal structure
clusters: list of bonded clusters found here, clusters are formatted as sets of indices of atoms
"""
n_atoms = len(struct.species)
if len(np.unique(connected_list)) != n_atoms:
return [0, 1, 0]
if n_atoms == 0:
return [0, 0, 0]
cluster_sizes = []
clusters = []
for atom in range(n_atoms):
connected_inds = np.where(connected_list == atom)[0]
atom_cluster = np.unique(connected_list[connected_inds])
atom_cluster = set(atom_cluster)
if len(clusters) == 0:
new_clusters = [atom_cluster]
new_cluster_sizes = [len(atom_cluster)]
else:
clusters_w_atom = [atom_cluster]
clusters_noatom = []
clusters_noatom_sizes = []
for cluster in clusters:
if len(cluster.intersection(atom_cluster)) > 0:
clusters_w_atom.append(cluster)
else:
clusters_noatom.append(cluster)
clusters_noatom_sizes.append(len(cluster))
if len(clusters_w_atom) > 1:
clusters_w_atom = [set.union(*clusters_w_atom)]
new_clusters = clusters_noatom + clusters_w_atom
new_cluster_sizes = clusters_noatom_sizes + [len(clusters_w_atom[0])]
clusters = list(new_clusters)
cluster_sizes = list(new_cluster_sizes)
if n_atoms in cluster_sizes:
break
max_cluster = max(cluster_sizes)
min_cluster = min(cluster_sizes)
return [max_cluster, min_cluster, clusters]
|
[
"def",
"find_clusters",
"(",
"struct",
",",
"connected_list",
")",
":",
"n_atoms",
"=",
"len",
"(",
"struct",
".",
"species",
")",
"if",
"len",
"(",
"np",
".",
"unique",
"(",
"connected_list",
")",
")",
"!=",
"n_atoms",
":",
"return",
"[",
"0",
",",
"1",
",",
"0",
"]",
"if",
"n_atoms",
"==",
"0",
":",
"return",
"[",
"0",
",",
"0",
",",
"0",
"]",
"cluster_sizes",
"=",
"[",
"]",
"clusters",
"=",
"[",
"]",
"for",
"atom",
"in",
"range",
"(",
"n_atoms",
")",
":",
"connected_inds",
"=",
"np",
".",
"where",
"(",
"connected_list",
"==",
"atom",
")",
"[",
"0",
"]",
"atom_cluster",
"=",
"np",
".",
"unique",
"(",
"connected_list",
"[",
"connected_inds",
"]",
")",
"atom_cluster",
"=",
"set",
"(",
"atom_cluster",
")",
"if",
"len",
"(",
"clusters",
")",
"==",
"0",
":",
"new_clusters",
"=",
"[",
"atom_cluster",
"]",
"new_cluster_sizes",
"=",
"[",
"len",
"(",
"atom_cluster",
")",
"]",
"else",
":",
"clusters_w_atom",
"=",
"[",
"atom_cluster",
"]",
"clusters_noatom",
"=",
"[",
"]",
"clusters_noatom_sizes",
"=",
"[",
"]",
"for",
"cluster",
"in",
"clusters",
":",
"if",
"len",
"(",
"cluster",
".",
"intersection",
"(",
"atom_cluster",
")",
")",
">",
"0",
":",
"clusters_w_atom",
".",
"append",
"(",
"cluster",
")",
"else",
":",
"clusters_noatom",
".",
"append",
"(",
"cluster",
")",
"clusters_noatom_sizes",
".",
"append",
"(",
"len",
"(",
"cluster",
")",
")",
"if",
"len",
"(",
"clusters_w_atom",
")",
">",
"1",
":",
"clusters_w_atom",
"=",
"[",
"set",
".",
"union",
"(",
"*",
"clusters_w_atom",
")",
"]",
"new_clusters",
"=",
"clusters_noatom",
"+",
"clusters_w_atom",
"new_cluster_sizes",
"=",
"clusters_noatom_sizes",
"+",
"[",
"len",
"(",
"clusters_w_atom",
"[",
"0",
"]",
")",
"]",
"clusters",
"=",
"list",
"(",
"new_clusters",
")",
"cluster_sizes",
"=",
"list",
"(",
"new_cluster_sizes",
")",
"if",
"n_atoms",
"in",
"cluster_sizes",
":",
"break",
"max_cluster",
"=",
"max",
"(",
"cluster_sizes",
")",
"min_cluster",
"=",
"min",
"(",
"cluster_sizes",
")",
"return",
"[",
"max_cluster",
",",
"min_cluster",
",",
"clusters",
"]"
] | 43.88
| 19.44
|
def __calculate_always_decrease_rw_values(
table_name, read_units, provisioned_reads,
write_units, provisioned_writes):
""" Calculate values for always-decrease-rw-together
This will only return reads and writes decreases if both reads and writes
are lower than the current provisioning
:type table_name: str
:param table_name: Name of the DynamoDB table
:type read_units: int
:param read_units: New read unit provisioning
:type provisioned_reads: int
:param provisioned_reads: Currently provisioned reads
:type write_units: int
:param write_units: New write unit provisioning
:type provisioned_writes: int
:param provisioned_writes: Currently provisioned writes
:returns: (int, int) -- (reads, writes)
"""
if read_units <= provisioned_reads and write_units <= provisioned_writes:
return (read_units, write_units)
if read_units < provisioned_reads:
logger.info(
'{0} - Reads could be decreased, but we are waiting for '
'writes to get lower than the threshold before '
'scaling down'.format(table_name))
read_units = provisioned_reads
elif write_units < provisioned_writes:
logger.info(
'{0} - Writes could be decreased, but we are waiting for '
'reads to get lower than the threshold before '
'scaling down'.format(table_name))
write_units = provisioned_writes
return (read_units, write_units)
|
[
"def",
"__calculate_always_decrease_rw_values",
"(",
"table_name",
",",
"read_units",
",",
"provisioned_reads",
",",
"write_units",
",",
"provisioned_writes",
")",
":",
"if",
"read_units",
"<=",
"provisioned_reads",
"and",
"write_units",
"<=",
"provisioned_writes",
":",
"return",
"(",
"read_units",
",",
"write_units",
")",
"if",
"read_units",
"<",
"provisioned_reads",
":",
"logger",
".",
"info",
"(",
"'{0} - Reads could be decreased, but we are waiting for '",
"'writes to get lower than the threshold before '",
"'scaling down'",
".",
"format",
"(",
"table_name",
")",
")",
"read_units",
"=",
"provisioned_reads",
"elif",
"write_units",
"<",
"provisioned_writes",
":",
"logger",
".",
"info",
"(",
"'{0} - Writes could be decreased, but we are waiting for '",
"'reads to get lower than the threshold before '",
"'scaling down'",
".",
"format",
"(",
"table_name",
")",
")",
"write_units",
"=",
"provisioned_writes",
"return",
"(",
"read_units",
",",
"write_units",
")"
] | 35.756098
| 17
|
def find_key(debug=False, skip=0):
"""
Locate a connected YubiKey. Throws an exception if none is found.
This function is supposed to be possible to extend if any other YubiKeys
appear in the future.
Attributes :
skip -- number of YubiKeys to skip
debug -- True or False
"""
try:
hid_device = YubiKeyHIDDevice(debug, skip)
yk_version = hid_device.status().ykver()
if (2, 1, 4) <= yk_version <= (2, 1, 9):
return YubiKeyNEO_USBHID(debug, skip, hid_device)
if yk_version < (3, 0, 0):
return YubiKeyUSBHID(debug, skip, hid_device)
if yk_version < (4, 0, 0):
return YubiKeyNEO_USBHID(debug, skip, hid_device)
return YubiKey4_USBHID(debug, skip, hid_device)
except YubiKeyUSBHIDError as inst:
if 'No USB YubiKey found' in str(inst):
# generalize this error
raise YubiKeyError('No YubiKey found')
else:
raise
|
[
"def",
"find_key",
"(",
"debug",
"=",
"False",
",",
"skip",
"=",
"0",
")",
":",
"try",
":",
"hid_device",
"=",
"YubiKeyHIDDevice",
"(",
"debug",
",",
"skip",
")",
"yk_version",
"=",
"hid_device",
".",
"status",
"(",
")",
".",
"ykver",
"(",
")",
"if",
"(",
"2",
",",
"1",
",",
"4",
")",
"<=",
"yk_version",
"<=",
"(",
"2",
",",
"1",
",",
"9",
")",
":",
"return",
"YubiKeyNEO_USBHID",
"(",
"debug",
",",
"skip",
",",
"hid_device",
")",
"if",
"yk_version",
"<",
"(",
"3",
",",
"0",
",",
"0",
")",
":",
"return",
"YubiKeyUSBHID",
"(",
"debug",
",",
"skip",
",",
"hid_device",
")",
"if",
"yk_version",
"<",
"(",
"4",
",",
"0",
",",
"0",
")",
":",
"return",
"YubiKeyNEO_USBHID",
"(",
"debug",
",",
"skip",
",",
"hid_device",
")",
"return",
"YubiKey4_USBHID",
"(",
"debug",
",",
"skip",
",",
"hid_device",
")",
"except",
"YubiKeyUSBHIDError",
"as",
"inst",
":",
"if",
"'No USB YubiKey found'",
"in",
"str",
"(",
"inst",
")",
":",
"# generalize this error",
"raise",
"YubiKeyError",
"(",
"'No YubiKey found'",
")",
"else",
":",
"raise"
] | 35.666667
| 15.592593
|
def server_show(endpoint_id, server_id):
"""
Executor for `globus endpoint server show`
"""
client = get_client()
server_doc = client.get_endpoint_server(endpoint_id, server_id)
if not server_doc["uri"]: # GCP endpoint server
fields = (("ID", "id"),)
text_epilog = dedent(
"""
This server is for a Globus Connect Personal installation.
For its connection status, try:
globus endpoint show {}
""".format(
endpoint_id
)
)
else:
def advertised_port_summary(server):
def get_range_summary(start, end):
return (
"unspecified"
if not start and not end
else "unrestricted"
if start == 1024 and end == 65535
else "{}-{}".format(start, end)
)
return "incoming {}, outgoing {}".format(
get_range_summary(
server["incoming_data_port_start"], server["incoming_data_port_end"]
),
get_range_summary(
server["outgoing_data_port_start"], server["outgoing_data_port_end"]
),
)
fields = (
("ID", "id"),
("URI", "uri"),
("Subject", "subject"),
("Data Ports", advertised_port_summary),
)
text_epilog = None
formatted_print(
server_doc,
text_format=FORMAT_TEXT_RECORD,
fields=fields,
text_epilog=text_epilog,
)
|
[
"def",
"server_show",
"(",
"endpoint_id",
",",
"server_id",
")",
":",
"client",
"=",
"get_client",
"(",
")",
"server_doc",
"=",
"client",
".",
"get_endpoint_server",
"(",
"endpoint_id",
",",
"server_id",
")",
"if",
"not",
"server_doc",
"[",
"\"uri\"",
"]",
":",
"# GCP endpoint server",
"fields",
"=",
"(",
"(",
"\"ID\"",
",",
"\"id\"",
")",
",",
")",
"text_epilog",
"=",
"dedent",
"(",
"\"\"\"\n This server is for a Globus Connect Personal installation.\n\n For its connection status, try:\n globus endpoint show {}\n \"\"\"",
".",
"format",
"(",
"endpoint_id",
")",
")",
"else",
":",
"def",
"advertised_port_summary",
"(",
"server",
")",
":",
"def",
"get_range_summary",
"(",
"start",
",",
"end",
")",
":",
"return",
"(",
"\"unspecified\"",
"if",
"not",
"start",
"and",
"not",
"end",
"else",
"\"unrestricted\"",
"if",
"start",
"==",
"1024",
"and",
"end",
"==",
"65535",
"else",
"\"{}-{}\"",
".",
"format",
"(",
"start",
",",
"end",
")",
")",
"return",
"\"incoming {}, outgoing {}\"",
".",
"format",
"(",
"get_range_summary",
"(",
"server",
"[",
"\"incoming_data_port_start\"",
"]",
",",
"server",
"[",
"\"incoming_data_port_end\"",
"]",
")",
",",
"get_range_summary",
"(",
"server",
"[",
"\"outgoing_data_port_start\"",
"]",
",",
"server",
"[",
"\"outgoing_data_port_end\"",
"]",
")",
",",
")",
"fields",
"=",
"(",
"(",
"\"ID\"",
",",
"\"id\"",
")",
",",
"(",
"\"URI\"",
",",
"\"uri\"",
")",
",",
"(",
"\"Subject\"",
",",
"\"subject\"",
")",
",",
"(",
"\"Data Ports\"",
",",
"advertised_port_summary",
")",
",",
")",
"text_epilog",
"=",
"None",
"formatted_print",
"(",
"server_doc",
",",
"text_format",
"=",
"FORMAT_TEXT_RECORD",
",",
"fields",
"=",
"fields",
",",
"text_epilog",
"=",
"text_epilog",
",",
")"
] | 29.092593
| 17.611111
|
def _case_insensitive_rpartition(input_string: str, separator: str) -> typing.Tuple[str, str, str]:
"""Same as str.rpartition(), except that the partitioning is done case insensitive."""
lowered_input_string = input_string.lower()
lowered_separator = separator.lower()
try:
split_index = lowered_input_string.rindex(lowered_separator)
except ValueError:
# Did not find the separator in the input_string.
# Follow https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str
# str.rpartition documentation and return the tuple ("", "", unmodified_input) in this case
return "", "", input_string
else:
split_index_2 = split_index+len(separator)
return input_string[:split_index], input_string[split_index: split_index_2], input_string[split_index_2:]
|
[
"def",
"_case_insensitive_rpartition",
"(",
"input_string",
":",
"str",
",",
"separator",
":",
"str",
")",
"->",
"typing",
".",
"Tuple",
"[",
"str",
",",
"str",
",",
"str",
"]",
":",
"lowered_input_string",
"=",
"input_string",
".",
"lower",
"(",
")",
"lowered_separator",
"=",
"separator",
".",
"lower",
"(",
")",
"try",
":",
"split_index",
"=",
"lowered_input_string",
".",
"rindex",
"(",
"lowered_separator",
")",
"except",
"ValueError",
":",
"# Did not find the separator in the input_string.",
"# Follow https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str",
"# str.rpartition documentation and return the tuple (\"\", \"\", unmodified_input) in this case",
"return",
"\"\"",
",",
"\"\"",
",",
"input_string",
"else",
":",
"split_index_2",
"=",
"split_index",
"+",
"len",
"(",
"separator",
")",
"return",
"input_string",
"[",
":",
"split_index",
"]",
",",
"input_string",
"[",
"split_index",
":",
"split_index_2",
"]",
",",
"input_string",
"[",
"split_index_2",
":",
"]"
] | 62.642857
| 28.785714
|
def get_model_at_related_field(model, attr):
"""
Looks up ``attr`` as a field of ``model`` and returns the related model class. If ``attr`` is
not a relationship field, ``ValueError`` is raised.
"""
field = model._meta.get_field(attr)
if hasattr(field, 'related_model'):
return field.related_model
raise ValueError("{model}.{attr} ({klass}) is not a relationship field.".format(**{
'model': model.__name__,
'attr': attr,
'klass': field.__class__.__name__,
}))
|
[
"def",
"get_model_at_related_field",
"(",
"model",
",",
"attr",
")",
":",
"field",
"=",
"model",
".",
"_meta",
".",
"get_field",
"(",
"attr",
")",
"if",
"hasattr",
"(",
"field",
",",
"'related_model'",
")",
":",
"return",
"field",
".",
"related_model",
"raise",
"ValueError",
"(",
"\"{model}.{attr} ({klass}) is not a relationship field.\"",
".",
"format",
"(",
"*",
"*",
"{",
"'model'",
":",
"model",
".",
"__name__",
",",
"'attr'",
":",
"attr",
",",
"'klass'",
":",
"field",
".",
"__class__",
".",
"__name__",
",",
"}",
")",
")"
] | 30.117647
| 20.823529
|
def new_dot(self, vd, parent, seqnum, rock_ridge, log_block_size, xa,
file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, DirectoryRecord, int, str, int, bool, int) -> None
'''
Create a new 'dot' Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
log_block_size - The logical block size to use.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode to set for this directory.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, b'\x00', parent, seqnum, True, log_block_size, xa)
if rock_ridge:
self._rr_new(rock_ridge, b'', b'', False, False, False, file_mode)
|
[
"def",
"new_dot",
"(",
"self",
",",
"vd",
",",
"parent",
",",
"seqnum",
",",
"rock_ridge",
",",
"log_block_size",
",",
"xa",
",",
"file_mode",
")",
":",
"# type: (headervd.PrimaryOrSupplementaryVD, DirectoryRecord, int, str, int, bool, int) -> None",
"if",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'Directory Record already initialized'",
")",
"self",
".",
"_new",
"(",
"vd",
",",
"b'\\x00'",
",",
"parent",
",",
"seqnum",
",",
"True",
",",
"log_block_size",
",",
"xa",
")",
"if",
"rock_ridge",
":",
"self",
".",
"_rr_new",
"(",
"rock_ridge",
",",
"b''",
",",
"b''",
",",
"False",
",",
"False",
",",
"False",
",",
"file_mode",
")"
] | 45.391304
| 26.956522
|
def nelson_aalen_estimator(event, time):
"""Nelson-Aalen estimator of cumulative hazard function.
Parameters
----------
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time : array-like, shape = (n_samples,)
Contains event/censoring times.
Returns
-------
time : array, shape = (n_times,)
Unique times.
cum_hazard : array, shape = (n_times,)
Cumulative hazard at each unique time point.
References
----------
.. [1] Nelson, W., "Theory and applications of hazard plotting for censored failure data",
Technometrics, vol. 14, pp. 945-965, 1972.
.. [2] Aalen, O. O., "Nonparametric inference for a family of counting processes",
Annals of Statistics, vol. 6, pp. 701–726, 1978.
"""
event, time = check_y_survival(event, time)
check_consistent_length(event, time)
uniq_times, n_events, n_at_risk = _compute_counts(event, time)
y = numpy.cumsum(n_events / n_at_risk)
return uniq_times, y
|
[
"def",
"nelson_aalen_estimator",
"(",
"event",
",",
"time",
")",
":",
"event",
",",
"time",
"=",
"check_y_survival",
"(",
"event",
",",
"time",
")",
"check_consistent_length",
"(",
"event",
",",
"time",
")",
"uniq_times",
",",
"n_events",
",",
"n_at_risk",
"=",
"_compute_counts",
"(",
"event",
",",
"time",
")",
"y",
"=",
"numpy",
".",
"cumsum",
"(",
"n_events",
"/",
"n_at_risk",
")",
"return",
"uniq_times",
",",
"y"
] | 29.823529
| 20.911765
|
def wait(self):
""" Waits for the pool to be fully stopped """
while True:
if not self.greenlet_watch:
break
if self.stopping:
gevent.sleep(0.1)
else:
gevent.sleep(1)
|
[
"def",
"wait",
"(",
"self",
")",
":",
"while",
"True",
":",
"if",
"not",
"self",
".",
"greenlet_watch",
":",
"break",
"if",
"self",
".",
"stopping",
":",
"gevent",
".",
"sleep",
"(",
"0.1",
")",
"else",
":",
"gevent",
".",
"sleep",
"(",
"1",
")"
] | 23.454545
| 17.818182
|
def get_events(self, event_title, regex=False):
"""
Search for events with the provided title
Args:
event_title: The title of the event
Returns:
An event JSON object returned from the server with the following:
{
"meta":{
"limit": 20, "next": null, "offset": 0,
"previous": null, "total_count": 3
},
"objects": [{}, {}, etc]
}
or None if an error occurred.
"""
regex_val = 0
if regex:
regex_val = 1
r = requests.get('{0}/events/?api_key={1}&username={2}&c-title='
'{3}®ex={4}'.format(self.url, self.api_key,
self.username, event_title,
regex_val), verify=self.verify)
if r.status_code == 200:
json_obj = json.loads(r.text)
return json_obj
else:
log.error('Non-200 status code from get_event: '
'{}'.format(r.status_code))
return None
|
[
"def",
"get_events",
"(",
"self",
",",
"event_title",
",",
"regex",
"=",
"False",
")",
":",
"regex_val",
"=",
"0",
"if",
"regex",
":",
"regex_val",
"=",
"1",
"r",
"=",
"requests",
".",
"get",
"(",
"'{0}/events/?api_key={1}&username={2}&c-title='",
"'{3}®ex={4}'",
".",
"format",
"(",
"self",
".",
"url",
",",
"self",
".",
"api_key",
",",
"self",
".",
"username",
",",
"event_title",
",",
"regex_val",
")",
",",
"verify",
"=",
"self",
".",
"verify",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"json_obj",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"return",
"json_obj",
"else",
":",
"log",
".",
"error",
"(",
"'Non-200 status code from get_event: '",
"'{}'",
".",
"format",
"(",
"r",
".",
"status_code",
")",
")",
"return",
"None"
] | 37.612903
| 18.129032
|
def remove_node(self, node):
"""Remove a node from the scheduler.
This should be called either when the node crashed or at shutdown time.
In the former case any pending items assigned to the node will be
re-scheduled.
Called by the hooks:
- ``DSession.worker_workerfinished``.
- ``DSession.worker_errordown``.
Return the item being executed while the node crashed or None if the
node has no more pending items.
"""
workload = self.assigned_work.pop(node)
if not self._pending_of(workload):
return None
# The node crashed, identify test that crashed
for work_unit in workload.values():
for nodeid, completed in work_unit.items():
if not completed:
crashitem = nodeid
break
else:
continue
break
else:
raise RuntimeError(
"Unable to identify crashitem on a workload with pending items"
)
# Made uncompleted work unit available again
self.workqueue.update(workload)
for node in self.assigned_work:
self._reschedule(node)
return crashitem
|
[
"def",
"remove_node",
"(",
"self",
",",
"node",
")",
":",
"workload",
"=",
"self",
".",
"assigned_work",
".",
"pop",
"(",
"node",
")",
"if",
"not",
"self",
".",
"_pending_of",
"(",
"workload",
")",
":",
"return",
"None",
"# The node crashed, identify test that crashed",
"for",
"work_unit",
"in",
"workload",
".",
"values",
"(",
")",
":",
"for",
"nodeid",
",",
"completed",
"in",
"work_unit",
".",
"items",
"(",
")",
":",
"if",
"not",
"completed",
":",
"crashitem",
"=",
"nodeid",
"break",
"else",
":",
"continue",
"break",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Unable to identify crashitem on a workload with pending items\"",
")",
"# Made uncompleted work unit available again",
"self",
".",
"workqueue",
".",
"update",
"(",
"workload",
")",
"for",
"node",
"in",
"self",
".",
"assigned_work",
":",
"self",
".",
"_reschedule",
"(",
"node",
")",
"return",
"crashitem"
] | 30.65
| 18.975
|
def contains_empty(features):
"""Check features data are not empty
:param features: The features data to check.
:type features: list of numpy arrays.
:return: True if one of the array is empty, False else.
"""
if not features:
return True
for feature in features:
if feature.shape[0] == 0:
return True
return False
|
[
"def",
"contains_empty",
"(",
"features",
")",
":",
"if",
"not",
"features",
":",
"return",
"True",
"for",
"feature",
"in",
"features",
":",
"if",
"feature",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"return",
"True",
"return",
"False"
] | 24.2
| 17.333333
|
def games(self, platform):
""" It returns a list of games given the platform *alias* (usually is
the game name separated by "-" instead of white spaces).
"""
platform = platform.lower()
data_list = self.db.get_data(self.games_path, platform=platform)
data_list = data_list.get('Data') or {}
return [Game(self.db.game, **i) for i in data_list.get('Game') or {}]
|
[
"def",
"games",
"(",
"self",
",",
"platform",
")",
":",
"platform",
"=",
"platform",
".",
"lower",
"(",
")",
"data_list",
"=",
"self",
".",
"db",
".",
"get_data",
"(",
"self",
".",
"games_path",
",",
"platform",
"=",
"platform",
")",
"data_list",
"=",
"data_list",
".",
"get",
"(",
"'Data'",
")",
"or",
"{",
"}",
"return",
"[",
"Game",
"(",
"self",
".",
"db",
".",
"game",
",",
"*",
"*",
"i",
")",
"for",
"i",
"in",
"data_list",
".",
"get",
"(",
"'Game'",
")",
"or",
"{",
"}",
"]"
] | 51.125
| 14.875
|
def noNsProp(self, name):
"""Search and get the value of an attribute associated to a
node This does the entity substitution. This function looks
in DTD attribute declaration for #FIXED or default
declaration values unless DTD use has been turned off. This
function is similar to xmlGetProp except it will accept
only an attribute in no namespace. """
ret = libxml2mod.xmlGetNoNsProp(self._o, name)
return ret
|
[
"def",
"noNsProp",
"(",
"self",
",",
"name",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlGetNoNsProp",
"(",
"self",
".",
"_o",
",",
"name",
")",
"return",
"ret"
] | 52.888889
| 17.111111
|
def dereference_object(object_type, object_uuid, status):
"""Show linked persistent identifier(s)."""
from .models import PersistentIdentifier
pids = PersistentIdentifier.query.filter_by(
object_type=object_type, object_uuid=object_uuid
)
if status:
pids = pids.filter_by(status=status)
for found_pid in pids.all():
click.echo(
'{0.pid_type} {0.pid_value} {0.pid_provider}'.format(found_pid)
)
|
[
"def",
"dereference_object",
"(",
"object_type",
",",
"object_uuid",
",",
"status",
")",
":",
"from",
".",
"models",
"import",
"PersistentIdentifier",
"pids",
"=",
"PersistentIdentifier",
".",
"query",
".",
"filter_by",
"(",
"object_type",
"=",
"object_type",
",",
"object_uuid",
"=",
"object_uuid",
")",
"if",
"status",
":",
"pids",
"=",
"pids",
".",
"filter_by",
"(",
"status",
"=",
"status",
")",
"for",
"found_pid",
"in",
"pids",
".",
"all",
"(",
")",
":",
"click",
".",
"echo",
"(",
"'{0.pid_type} {0.pid_value} {0.pid_provider}'",
".",
"format",
"(",
"found_pid",
")",
")"
] | 32.142857
| 20.357143
|
def _set_error_response_with_body(self, bucket_name=None):
"""
Sets all the error response fields with a valid response body.
Raises :exc:`ValueError` if invoked on a zero length body.
:param bucket_name: Optional bucket name resource at which error
occurred.
:param object_name: Option object name resource at which error
occurred.
"""
if len(self._response.data) == 0:
raise ValueError('response data has no body.')
try:
root = cElementTree.fromstring(self._response.data)
except _ETREE_EXCEPTIONS as error:
raise InvalidXMLError('"Error" XML is not parsable. '
'Message: {0}'.format(error.message))
for attribute in root:
if attribute.tag == 'Code':
self.code = attribute.text
elif attribute.tag == 'BucketName':
self.bucket_name = attribute.text
elif attribute.tag == 'Key':
self.object_name = attribute.text
elif attribute.tag == 'Message':
self.message = attribute.text
elif attribute.tag == 'RequestId':
self.request_id = attribute.text
elif attribute.tag == 'HostId':
self.host_id = attribute.text
# Set amz headers.
self._set_amz_headers()
|
[
"def",
"_set_error_response_with_body",
"(",
"self",
",",
"bucket_name",
"=",
"None",
")",
":",
"if",
"len",
"(",
"self",
".",
"_response",
".",
"data",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'response data has no body.'",
")",
"try",
":",
"root",
"=",
"cElementTree",
".",
"fromstring",
"(",
"self",
".",
"_response",
".",
"data",
")",
"except",
"_ETREE_EXCEPTIONS",
"as",
"error",
":",
"raise",
"InvalidXMLError",
"(",
"'\"Error\" XML is not parsable. '",
"'Message: {0}'",
".",
"format",
"(",
"error",
".",
"message",
")",
")",
"for",
"attribute",
"in",
"root",
":",
"if",
"attribute",
".",
"tag",
"==",
"'Code'",
":",
"self",
".",
"code",
"=",
"attribute",
".",
"text",
"elif",
"attribute",
".",
"tag",
"==",
"'BucketName'",
":",
"self",
".",
"bucket_name",
"=",
"attribute",
".",
"text",
"elif",
"attribute",
".",
"tag",
"==",
"'Key'",
":",
"self",
".",
"object_name",
"=",
"attribute",
".",
"text",
"elif",
"attribute",
".",
"tag",
"==",
"'Message'",
":",
"self",
".",
"message",
"=",
"attribute",
".",
"text",
"elif",
"attribute",
".",
"tag",
"==",
"'RequestId'",
":",
"self",
".",
"request_id",
"=",
"attribute",
".",
"text",
"elif",
"attribute",
".",
"tag",
"==",
"'HostId'",
":",
"self",
".",
"host_id",
"=",
"attribute",
".",
"text",
"# Set amz headers.",
"self",
".",
"_set_amz_headers",
"(",
")"
] | 43.03125
| 13.71875
|
def to_python(self, value, resource):
"""Dictionary to Python object"""
if isinstance(value, dict):
d = {
self.aliases.get(k, k): self.to_python(v, resource) if isinstance(v, (dict, list)) else v
for k, v in six.iteritems(value)
}
return type(self.class_name, (), d)
elif isinstance(value, list):
return [self.to_python(x, resource) if isinstance(x, (dict, list)) else x for x in value]
else:
return value
|
[
"def",
"to_python",
"(",
"self",
",",
"value",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"d",
"=",
"{",
"self",
".",
"aliases",
".",
"get",
"(",
"k",
",",
"k",
")",
":",
"self",
".",
"to_python",
"(",
"v",
",",
"resource",
")",
"if",
"isinstance",
"(",
"v",
",",
"(",
"dict",
",",
"list",
")",
")",
"else",
"v",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"value",
")",
"}",
"return",
"type",
"(",
"self",
".",
"class_name",
",",
"(",
")",
",",
"d",
")",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"return",
"[",
"self",
".",
"to_python",
"(",
"x",
",",
"resource",
")",
"if",
"isinstance",
"(",
"x",
",",
"(",
"dict",
",",
"list",
")",
")",
"else",
"x",
"for",
"x",
"in",
"value",
"]",
"else",
":",
"return",
"value"
] | 39.846154
| 21.923077
|
def suggestTitle(self, title):
"""Suggests a title for the entry.
If title has been manually edited, suggestion is ignored."""
if not self._title_changed or not str(self.wtitle.text()):
self.wtitle.setText(title)
self._title_changed = False
|
[
"def",
"suggestTitle",
"(",
"self",
",",
"title",
")",
":",
"if",
"not",
"self",
".",
"_title_changed",
"or",
"not",
"str",
"(",
"self",
".",
"wtitle",
".",
"text",
"(",
")",
")",
":",
"self",
".",
"wtitle",
".",
"setText",
"(",
"title",
")",
"self",
".",
"_title_changed",
"=",
"False"
] | 47.166667
| 6.5
|
def create_tenant(
self,
parent,
tenant,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new tenant entity.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.TenantServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `tenant`:
>>> tenant = {}
>>>
>>> response = client.create_tenant(parent, tenant)
Args:
parent (str): Required.
Resource name of the project under which the tenant is created.
The format is "projects/{project\_id}", for example,
"projects/api-test-project".
tenant (Union[dict, ~google.cloud.talent_v4beta1.types.Tenant]): Required.
The tenant to be created.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.Tenant`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Tenant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_tenant" not in self._inner_api_calls:
self._inner_api_calls[
"create_tenant"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_tenant,
default_retry=self._method_configs["CreateTenant"].retry,
default_timeout=self._method_configs["CreateTenant"].timeout,
client_info=self._client_info,
)
request = tenant_service_pb2.CreateTenantRequest(parent=parent, tenant=tenant)
return self._inner_api_calls["create_tenant"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
[
"def",
"create_tenant",
"(",
"self",
",",
"parent",
",",
"tenant",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"metadata",
"=",
"None",
",",
")",
":",
"# Wrap the transport method to add retry and timeout logic.",
"if",
"\"create_tenant\"",
"not",
"in",
"self",
".",
"_inner_api_calls",
":",
"self",
".",
"_inner_api_calls",
"[",
"\"create_tenant\"",
"]",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"wrap_method",
"(",
"self",
".",
"transport",
".",
"create_tenant",
",",
"default_retry",
"=",
"self",
".",
"_method_configs",
"[",
"\"CreateTenant\"",
"]",
".",
"retry",
",",
"default_timeout",
"=",
"self",
".",
"_method_configs",
"[",
"\"CreateTenant\"",
"]",
".",
"timeout",
",",
"client_info",
"=",
"self",
".",
"_client_info",
",",
")",
"request",
"=",
"tenant_service_pb2",
".",
"CreateTenantRequest",
"(",
"parent",
"=",
"parent",
",",
"tenant",
"=",
"tenant",
")",
"return",
"self",
".",
"_inner_api_calls",
"[",
"\"create_tenant\"",
"]",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")"
] | 40.428571
| 24.657143
|
def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_lens, keep_range=False,
include_z_start=False):
"""
ray-tracing through parts of the coin, starting with (x,y) and angles (alpha_x, alpha_y) at redshift z_start
and then backwards to redshfit z_stop
:param x: co-moving position [Mpc]
:param y: co-moving position [Mpc]
:param alpha_x: ray angle at z_start [arcsec]
:param alpha_y: ray angle at z_start [arcsec]
:param z_start: redshift of start of computation
:param z_stop: redshift where output is computed
:param kwargs_lens: lens model keyword argument list
:param keep_range: bool, if True, only computes the angular diameter ratio between the first and last step once
:return: co-moving position and angles at redshift z_stop
"""
z_lens_last = z_start
first_deflector = True
for i, idex in enumerate(self._sorted_redshift_index):
z_lens = self._redshift_list[idex]
if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop:
#if z_lens > z_start and z_lens <= z_stop:
if first_deflector is True:
if keep_range is True:
if not hasattr(self, '_cosmo_bkg_T_start'):
self._cosmo_bkg_T_start = self._cosmo_bkg.T_xy(z_start, z_lens)
delta_T = self._cosmo_bkg_T_start
else:
delta_T = self._cosmo_bkg.T_xy(z_start, z_lens)
first_deflector = False
else:
delta_T = self._T_ij_list[i]
x, y = self._ray_step(x, y, alpha_x, alpha_y, delta_T)
alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i)
z_lens_last = z_lens
if keep_range is True:
if not hasattr(self, '_cosmo_bkg_T_stop'):
self._cosmo_bkg_T_stop = self._cosmo_bkg.T_xy(z_lens_last, z_stop)
delta_T = self._cosmo_bkg_T_stop
else:
delta_T = self._cosmo_bkg.T_xy(z_lens_last, z_stop)
x, y = self._ray_step(x, y, alpha_x, alpha_y, delta_T)
return x, y, alpha_x, alpha_y
|
[
"def",
"ray_shooting_partial",
"(",
"self",
",",
"x",
",",
"y",
",",
"alpha_x",
",",
"alpha_y",
",",
"z_start",
",",
"z_stop",
",",
"kwargs_lens",
",",
"keep_range",
"=",
"False",
",",
"include_z_start",
"=",
"False",
")",
":",
"z_lens_last",
"=",
"z_start",
"first_deflector",
"=",
"True",
"for",
"i",
",",
"idex",
"in",
"enumerate",
"(",
"self",
".",
"_sorted_redshift_index",
")",
":",
"z_lens",
"=",
"self",
".",
"_redshift_list",
"[",
"idex",
"]",
"if",
"self",
".",
"_start_condition",
"(",
"include_z_start",
",",
"z_lens",
",",
"z_start",
")",
"and",
"z_lens",
"<=",
"z_stop",
":",
"#if z_lens > z_start and z_lens <= z_stop:",
"if",
"first_deflector",
"is",
"True",
":",
"if",
"keep_range",
"is",
"True",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_cosmo_bkg_T_start'",
")",
":",
"self",
".",
"_cosmo_bkg_T_start",
"=",
"self",
".",
"_cosmo_bkg",
".",
"T_xy",
"(",
"z_start",
",",
"z_lens",
")",
"delta_T",
"=",
"self",
".",
"_cosmo_bkg_T_start",
"else",
":",
"delta_T",
"=",
"self",
".",
"_cosmo_bkg",
".",
"T_xy",
"(",
"z_start",
",",
"z_lens",
")",
"first_deflector",
"=",
"False",
"else",
":",
"delta_T",
"=",
"self",
".",
"_T_ij_list",
"[",
"i",
"]",
"x",
",",
"y",
"=",
"self",
".",
"_ray_step",
"(",
"x",
",",
"y",
",",
"alpha_x",
",",
"alpha_y",
",",
"delta_T",
")",
"alpha_x",
",",
"alpha_y",
"=",
"self",
".",
"_add_deflection",
"(",
"x",
",",
"y",
",",
"alpha_x",
",",
"alpha_y",
",",
"kwargs_lens",
",",
"i",
")",
"z_lens_last",
"=",
"z_lens",
"if",
"keep_range",
"is",
"True",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_cosmo_bkg_T_stop'",
")",
":",
"self",
".",
"_cosmo_bkg_T_stop",
"=",
"self",
".",
"_cosmo_bkg",
".",
"T_xy",
"(",
"z_lens_last",
",",
"z_stop",
")",
"delta_T",
"=",
"self",
".",
"_cosmo_bkg_T_stop",
"else",
":",
"delta_T",
"=",
"self",
".",
"_cosmo_bkg",
".",
"T_xy",
"(",
"z_lens_last",
",",
"z_stop",
")",
"x",
",",
"y",
"=",
"self",
".",
"_ray_step",
"(",
"x",
",",
"y",
",",
"alpha_x",
",",
"alpha_y",
",",
"delta_T",
")",
"return",
"x",
",",
"y",
",",
"alpha_x",
",",
"alpha_y"
] | 53.255814
| 21.069767
|
def find_nearest_dist_node(self, point, distance, retdistance = False):
"""!
@brief Find nearest neighbor in area with radius = distance.
@param[in] point (list): Maximum distance where neighbors are searched.
@param[in] distance (double): Maximum distance where neighbors are searched.
@param[in] retdistance (bool): If True - returns neighbors with distances to them, otherwise only neighbors is returned.
@return (node|list) Nearest neighbor if 'retdistance' is False and list with two elements [node, distance] if 'retdistance' is True,
where the first element is pointer to node and the second element is distance to it.
"""
best_nodes = self.find_nearest_dist_nodes(point, distance)
if best_nodes == []:
return None
nearest = min(best_nodes, key = lambda item: item[0])
if retdistance is True:
return nearest
else:
return nearest[1]
|
[
"def",
"find_nearest_dist_node",
"(",
"self",
",",
"point",
",",
"distance",
",",
"retdistance",
"=",
"False",
")",
":",
"best_nodes",
"=",
"self",
".",
"find_nearest_dist_nodes",
"(",
"point",
",",
"distance",
")",
"if",
"best_nodes",
"==",
"[",
"]",
":",
"return",
"None",
"nearest",
"=",
"min",
"(",
"best_nodes",
",",
"key",
"=",
"lambda",
"item",
":",
"item",
"[",
"0",
"]",
")",
"if",
"retdistance",
"is",
"True",
":",
"return",
"nearest",
"else",
":",
"return",
"nearest",
"[",
"1",
"]"
] | 43.916667
| 31.041667
|
def _align_heavy_atoms(mol1, mol2, vmol1, vmol2, ilabel1, ilabel2,
eq_atoms):
"""
Align the label of topologically identical atoms of second molecule
towards first molecule
Args:
mol1: First molecule. OpenBabel OBMol object
mol2: Second molecule. OpenBabel OBMol object
vmol1: First virtual molecule constructed by centroids. OpenBabel
OBMol object
vmol2: First virtual molecule constructed by centroids. OpenBabel
OBMol object
ilabel1: inchi label map of the first molecule
ilabel2: inchi label map of the second molecule
eq_atoms: equivalent atom lables
Return:
corrected inchi labels of heavy atoms of the second molecule
"""
nvirtual = vmol1.NumAtoms()
nheavy = len(ilabel1)
for i in ilabel2: # add all heavy atoms
a1 = vmol1.NewAtom()
a1.SetAtomicNum(1)
a1.SetVector(0.0, 0.0, 0.0) # useless, just to pair with vmol2
oa2 = mol2.GetAtom(i)
a2 = vmol2.NewAtom()
a2.SetAtomicNum(1)
# align using the virtual atoms, these atoms are not
# used to align, but match by positions
a2.SetVector(oa2.GetVector())
aligner = ob.OBAlign(False, False)
aligner.SetRefMol(vmol1)
aligner.SetTargetMol(vmol2)
aligner.Align()
aligner.UpdateCoords(vmol2)
canon_mol1 = ob.OBMol()
for i in ilabel1:
oa1 = mol1.GetAtom(i)
a1 = canon_mol1.NewAtom()
a1.SetAtomicNum(oa1.GetAtomicNum())
a1.SetVector(oa1.GetVector())
aligned_mol2 = ob.OBMol()
for i in range(nvirtual + 1, nvirtual + nheavy + 1):
oa2 = vmol2.GetAtom(i)
a2 = aligned_mol2.NewAtom()
a2.SetAtomicNum(oa2.GetAtomicNum())
a2.SetVector(oa2.GetVector())
canon_label2 = list(range(1, nheavy+1))
for symm in eq_atoms:
for i in symm:
canon_label2[i-1] = -1
for symm in eq_atoms:
candidates1 = list(symm)
candidates2 = list(symm)
for c2 in candidates2:
distance = 99999.0
canon_idx = candidates1[0]
a2 = aligned_mol2.GetAtom(c2)
for c1 in candidates1:
a1 = canon_mol1.GetAtom(c1)
d = a1.GetDistance(a2)
if d < distance:
distance = d
canon_idx = c1
canon_label2[c2-1] = canon_idx
candidates1.remove(canon_idx)
canon_inchi_orig_map2 = [(canon, inchi, orig)
for canon, inchi, orig in
zip(canon_label2, list(range(1, nheavy + 1)),
ilabel2)]
canon_inchi_orig_map2.sort(key=lambda m: m[0])
heavy_atom_indices2 = tuple([x[2] for x in canon_inchi_orig_map2])
return heavy_atom_indices2
|
[
"def",
"_align_heavy_atoms",
"(",
"mol1",
",",
"mol2",
",",
"vmol1",
",",
"vmol2",
",",
"ilabel1",
",",
"ilabel2",
",",
"eq_atoms",
")",
":",
"nvirtual",
"=",
"vmol1",
".",
"NumAtoms",
"(",
")",
"nheavy",
"=",
"len",
"(",
"ilabel1",
")",
"for",
"i",
"in",
"ilabel2",
":",
"# add all heavy atoms",
"a1",
"=",
"vmol1",
".",
"NewAtom",
"(",
")",
"a1",
".",
"SetAtomicNum",
"(",
"1",
")",
"a1",
".",
"SetVector",
"(",
"0.0",
",",
"0.0",
",",
"0.0",
")",
"# useless, just to pair with vmol2",
"oa2",
"=",
"mol2",
".",
"GetAtom",
"(",
"i",
")",
"a2",
"=",
"vmol2",
".",
"NewAtom",
"(",
")",
"a2",
".",
"SetAtomicNum",
"(",
"1",
")",
"# align using the virtual atoms, these atoms are not",
"# used to align, but match by positions",
"a2",
".",
"SetVector",
"(",
"oa2",
".",
"GetVector",
"(",
")",
")",
"aligner",
"=",
"ob",
".",
"OBAlign",
"(",
"False",
",",
"False",
")",
"aligner",
".",
"SetRefMol",
"(",
"vmol1",
")",
"aligner",
".",
"SetTargetMol",
"(",
"vmol2",
")",
"aligner",
".",
"Align",
"(",
")",
"aligner",
".",
"UpdateCoords",
"(",
"vmol2",
")",
"canon_mol1",
"=",
"ob",
".",
"OBMol",
"(",
")",
"for",
"i",
"in",
"ilabel1",
":",
"oa1",
"=",
"mol1",
".",
"GetAtom",
"(",
"i",
")",
"a1",
"=",
"canon_mol1",
".",
"NewAtom",
"(",
")",
"a1",
".",
"SetAtomicNum",
"(",
"oa1",
".",
"GetAtomicNum",
"(",
")",
")",
"a1",
".",
"SetVector",
"(",
"oa1",
".",
"GetVector",
"(",
")",
")",
"aligned_mol2",
"=",
"ob",
".",
"OBMol",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"nvirtual",
"+",
"1",
",",
"nvirtual",
"+",
"nheavy",
"+",
"1",
")",
":",
"oa2",
"=",
"vmol2",
".",
"GetAtom",
"(",
"i",
")",
"a2",
"=",
"aligned_mol2",
".",
"NewAtom",
"(",
")",
"a2",
".",
"SetAtomicNum",
"(",
"oa2",
".",
"GetAtomicNum",
"(",
")",
")",
"a2",
".",
"SetVector",
"(",
"oa2",
".",
"GetVector",
"(",
")",
")",
"canon_label2",
"=",
"list",
"(",
"range",
"(",
"1",
",",
"nheavy",
"+",
"1",
")",
")",
"for",
"symm",
"in",
"eq_atoms",
":",
"for",
"i",
"in",
"symm",
":",
"canon_label2",
"[",
"i",
"-",
"1",
"]",
"=",
"-",
"1",
"for",
"symm",
"in",
"eq_atoms",
":",
"candidates1",
"=",
"list",
"(",
"symm",
")",
"candidates2",
"=",
"list",
"(",
"symm",
")",
"for",
"c2",
"in",
"candidates2",
":",
"distance",
"=",
"99999.0",
"canon_idx",
"=",
"candidates1",
"[",
"0",
"]",
"a2",
"=",
"aligned_mol2",
".",
"GetAtom",
"(",
"c2",
")",
"for",
"c1",
"in",
"candidates1",
":",
"a1",
"=",
"canon_mol1",
".",
"GetAtom",
"(",
"c1",
")",
"d",
"=",
"a1",
".",
"GetDistance",
"(",
"a2",
")",
"if",
"d",
"<",
"distance",
":",
"distance",
"=",
"d",
"canon_idx",
"=",
"c1",
"canon_label2",
"[",
"c2",
"-",
"1",
"]",
"=",
"canon_idx",
"candidates1",
".",
"remove",
"(",
"canon_idx",
")",
"canon_inchi_orig_map2",
"=",
"[",
"(",
"canon",
",",
"inchi",
",",
"orig",
")",
"for",
"canon",
",",
"inchi",
",",
"orig",
"in",
"zip",
"(",
"canon_label2",
",",
"list",
"(",
"range",
"(",
"1",
",",
"nheavy",
"+",
"1",
")",
")",
",",
"ilabel2",
")",
"]",
"canon_inchi_orig_map2",
".",
"sort",
"(",
"key",
"=",
"lambda",
"m",
":",
"m",
"[",
"0",
"]",
")",
"heavy_atom_indices2",
"=",
"tuple",
"(",
"[",
"x",
"[",
"2",
"]",
"for",
"x",
"in",
"canon_inchi_orig_map2",
"]",
")",
"return",
"heavy_atom_indices2"
] | 37.439024
| 14.414634
|
def jrepr(value):
'''customized `repr()`.'''
if value is None:
return repr(value)
t = type(value)
if t.__repr__ is not object.__repr__:
return repr(value)
return 'object ' + t.__name__
|
[
"def",
"jrepr",
"(",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"repr",
"(",
"value",
")",
"t",
"=",
"type",
"(",
"value",
")",
"if",
"t",
".",
"__repr__",
"is",
"not",
"object",
".",
"__repr__",
":",
"return",
"repr",
"(",
"value",
")",
"return",
"'object '",
"+",
"t",
".",
"__name__"
] | 26.625
| 13.625
|
def create_todo_list(self, project_id, milestone_id=None, private=None,
tracked=False, name=None, description=None, template_id=None):
"""
This will create a new, empty list. You can create the list
explicitly, or by giving it a list template id to base the new list
off of.
"""
path = '/projects/%u/todos/create_list' % project_id
req = ET.Element('request')
if milestone_id is not None:
ET.SubElement('milestone-id').text = str(milestone_id)
if private is not None:
ET.SubElement('private').text = str(bool(private)).lower()
ET.SubElement('tracked').text = str(bool(tracked)).lower()
if name is not None:
ET.SubElement('name').text = str(name)
ET.SubElement('description').text = str(description)
if template_id is not None:
ET.SubElement('use-template').text = 'true'
ET.SubElement('template-id').text = str(int(template_id))
return self._request(path, req)
|
[
"def",
"create_todo_list",
"(",
"self",
",",
"project_id",
",",
"milestone_id",
"=",
"None",
",",
"private",
"=",
"None",
",",
"tracked",
"=",
"False",
",",
"name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"template_id",
"=",
"None",
")",
":",
"path",
"=",
"'/projects/%u/todos/create_list'",
"%",
"project_id",
"req",
"=",
"ET",
".",
"Element",
"(",
"'request'",
")",
"if",
"milestone_id",
"is",
"not",
"None",
":",
"ET",
".",
"SubElement",
"(",
"'milestone-id'",
")",
".",
"text",
"=",
"str",
"(",
"milestone_id",
")",
"if",
"private",
"is",
"not",
"None",
":",
"ET",
".",
"SubElement",
"(",
"'private'",
")",
".",
"text",
"=",
"str",
"(",
"bool",
"(",
"private",
")",
")",
".",
"lower",
"(",
")",
"ET",
".",
"SubElement",
"(",
"'tracked'",
")",
".",
"text",
"=",
"str",
"(",
"bool",
"(",
"tracked",
")",
")",
".",
"lower",
"(",
")",
"if",
"name",
"is",
"not",
"None",
":",
"ET",
".",
"SubElement",
"(",
"'name'",
")",
".",
"text",
"=",
"str",
"(",
"name",
")",
"ET",
".",
"SubElement",
"(",
"'description'",
")",
".",
"text",
"=",
"str",
"(",
"description",
")",
"if",
"template_id",
"is",
"not",
"None",
":",
"ET",
".",
"SubElement",
"(",
"'use-template'",
")",
".",
"text",
"=",
"'true'",
"ET",
".",
"SubElement",
"(",
"'template-id'",
")",
".",
"text",
"=",
"str",
"(",
"int",
"(",
"template_id",
")",
")",
"return",
"self",
".",
"_request",
"(",
"path",
",",
"req",
")"
] | 48.761905
| 17.333333
|
def _verify_create_args(module_name, class_name, static):
""" Verifies a subset of the arguments to create() """
# Verify module name is provided
if module_name is None:
raise InvalidServiceConfiguration(
'Service configurations must define a module'
)
# Non-static services must define a class
if not static and class_name is None:
tmpl0 = 'Non-static service configurations must define a class: '
tmpl1 = 'module is %s'
raise InvalidServiceConfiguration((tmpl0 + tmpl1) % module_name)
|
[
"def",
"_verify_create_args",
"(",
"module_name",
",",
"class_name",
",",
"static",
")",
":",
"# Verify module name is provided",
"if",
"module_name",
"is",
"None",
":",
"raise",
"InvalidServiceConfiguration",
"(",
"'Service configurations must define a module'",
")",
"# Non-static services must define a class",
"if",
"not",
"static",
"and",
"class_name",
"is",
"None",
":",
"tmpl0",
"=",
"'Non-static service configurations must define a class: '",
"tmpl1",
"=",
"'module is %s'",
"raise",
"InvalidServiceConfiguration",
"(",
"(",
"tmpl0",
"+",
"tmpl1",
")",
"%",
"module_name",
")"
] | 42.076923
| 15.769231
|
def get_key(cls, k, default=None):
"""Matching get method for ``set_key``
"""
k = cls.__name__ + "__" + k
if k in session:
return session[k]
else:
return default
|
[
"def",
"get_key",
"(",
"cls",
",",
"k",
",",
"default",
"=",
"None",
")",
":",
"k",
"=",
"cls",
".",
"__name__",
"+",
"\"__\"",
"+",
"k",
"if",
"k",
"in",
"session",
":",
"return",
"session",
"[",
"k",
"]",
"else",
":",
"return",
"default"
] | 27.25
| 9.875
|
def get_modules(folder):
"""Find all valid modules in the given folder which must be in
in the same directory as this loader.py module. A valid module
has a .py extension, and is importable.
@return: all loaded valid modules
@rtype: iterator of module
"""
if is_frozen():
# find modules in library.zip filename
zipname = os.path.dirname(os.path.dirname(__file__))
parentmodule = os.path.basename(os.path.dirname(__file__))
with zipfile.ZipFile(zipname, 'r') as f:
prefix = "%s/%s/" % (parentmodule, folder)
modnames = [os.path.splitext(n[len(prefix):])[0]
for n in f.namelist()
if n.startswith(prefix) and "__init__" not in n]
else:
dirname = os.path.join(os.path.dirname(__file__), folder)
modnames = get_importable_modules(dirname)
for modname in modnames:
try:
name ="..%s.%s" % (folder, modname)
yield importlib.import_module(name, __name__)
except ImportError as msg:
out.error("could not load module %s: %s" % (modname, msg))
|
[
"def",
"get_modules",
"(",
"folder",
")",
":",
"if",
"is_frozen",
"(",
")",
":",
"# find modules in library.zip filename",
"zipname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"parentmodule",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"zipname",
",",
"'r'",
")",
"as",
"f",
":",
"prefix",
"=",
"\"%s/%s/\"",
"%",
"(",
"parentmodule",
",",
"folder",
")",
"modnames",
"=",
"[",
"os",
".",
"path",
".",
"splitext",
"(",
"n",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
")",
"[",
"0",
"]",
"for",
"n",
"in",
"f",
".",
"namelist",
"(",
")",
"if",
"n",
".",
"startswith",
"(",
"prefix",
")",
"and",
"\"__init__\"",
"not",
"in",
"n",
"]",
"else",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"folder",
")",
"modnames",
"=",
"get_importable_modules",
"(",
"dirname",
")",
"for",
"modname",
"in",
"modnames",
":",
"try",
":",
"name",
"=",
"\"..%s.%s\"",
"%",
"(",
"folder",
",",
"modname",
")",
"yield",
"importlib",
".",
"import_module",
"(",
"name",
",",
"__name__",
")",
"except",
"ImportError",
"as",
"msg",
":",
"out",
".",
"error",
"(",
"\"could not load module %s: %s\"",
"%",
"(",
"modname",
",",
"msg",
")",
")"
] | 43.8
| 14.64
|
def parse_data_df(data_dset, ridx, cidx, row_meta, col_meta):
"""
Parses in data_df from hdf5, subsetting if specified.
Input:
-data_dset (h5py dset): HDF5 dataset from which to read data_df
-ridx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-cidx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-row_meta (pandas DataFrame): the parsed in row metadata
-col_meta (pandas DataFrame): the parsed in col metadata
"""
if len(ridx) == len(row_meta.index) and len(cidx) == len(col_meta.index): # no subset
data_array = np.empty(data_dset.shape, dtype=np.float32)
data_dset.read_direct(data_array)
data_array = data_array.transpose()
elif len(ridx) <= len(cidx):
first_subset = data_dset[:, ridx].astype(np.float32)
data_array = first_subset[cidx, :].transpose()
elif len(cidx) < len(ridx):
first_subset = data_dset[cidx, :].astype(np.float32)
data_array = first_subset[:, ridx].transpose()
# make DataFrame instance
data_df = pd.DataFrame(data_array, index=row_meta.index[ridx], columns=col_meta.index[cidx])
return data_df
|
[
"def",
"parse_data_df",
"(",
"data_dset",
",",
"ridx",
",",
"cidx",
",",
"row_meta",
",",
"col_meta",
")",
":",
"if",
"len",
"(",
"ridx",
")",
"==",
"len",
"(",
"row_meta",
".",
"index",
")",
"and",
"len",
"(",
"cidx",
")",
"==",
"len",
"(",
"col_meta",
".",
"index",
")",
":",
"# no subset",
"data_array",
"=",
"np",
".",
"empty",
"(",
"data_dset",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"data_dset",
".",
"read_direct",
"(",
"data_array",
")",
"data_array",
"=",
"data_array",
".",
"transpose",
"(",
")",
"elif",
"len",
"(",
"ridx",
")",
"<=",
"len",
"(",
"cidx",
")",
":",
"first_subset",
"=",
"data_dset",
"[",
":",
",",
"ridx",
"]",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"data_array",
"=",
"first_subset",
"[",
"cidx",
",",
":",
"]",
".",
"transpose",
"(",
")",
"elif",
"len",
"(",
"cidx",
")",
"<",
"len",
"(",
"ridx",
")",
":",
"first_subset",
"=",
"data_dset",
"[",
"cidx",
",",
":",
"]",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"data_array",
"=",
"first_subset",
"[",
":",
",",
"ridx",
"]",
".",
"transpose",
"(",
")",
"# make DataFrame instance",
"data_df",
"=",
"pd",
".",
"DataFrame",
"(",
"data_array",
",",
"index",
"=",
"row_meta",
".",
"index",
"[",
"ridx",
"]",
",",
"columns",
"=",
"col_meta",
".",
"index",
"[",
"cidx",
"]",
")",
"return",
"data_df"
] | 47.346154
| 19.115385
|
def acquire_lock(path: str, blocking: bool) -> Generator[Optional[int], None, None]:
"""Raises an OSError if the lock can't be acquired"""
try:
with open(path, "w+") as lockfile:
if not blocking:
lock_command = fcntl.LOCK_EX | fcntl.LOCK_NB
else:
lock_command = fcntl.LOCK_EX
fcntl.lockf(lockfile.fileno(), lock_command)
yield lockfile.fileno()
fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN)
except FileNotFoundError:
yield
|
[
"def",
"acquire_lock",
"(",
"path",
":",
"str",
",",
"blocking",
":",
"bool",
")",
"->",
"Generator",
"[",
"Optional",
"[",
"int",
"]",
",",
"None",
",",
"None",
"]",
":",
"try",
":",
"with",
"open",
"(",
"path",
",",
"\"w+\"",
")",
"as",
"lockfile",
":",
"if",
"not",
"blocking",
":",
"lock_command",
"=",
"fcntl",
".",
"LOCK_EX",
"|",
"fcntl",
".",
"LOCK_NB",
"else",
":",
"lock_command",
"=",
"fcntl",
".",
"LOCK_EX",
"fcntl",
".",
"lockf",
"(",
"lockfile",
".",
"fileno",
"(",
")",
",",
"lock_command",
")",
"yield",
"lockfile",
".",
"fileno",
"(",
")",
"fcntl",
".",
"lockf",
"(",
"lockfile",
".",
"fileno",
"(",
")",
",",
"fcntl",
".",
"LOCK_UN",
")",
"except",
"FileNotFoundError",
":",
"yield"
] | 35.333333
| 19.533333
|
def request(self, type, command_list):
'''
Send NX-API JSON request to the NX-OS device.
'''
req = self._build_request(type, command_list)
if self.nxargs['connect_over_uds']:
self.connection.request('POST', req['url'], req['payload'], req['headers'])
response = self.connection.getresponse()
else:
response = self.connection(req['url'],
method='POST',
opts=req['opts'],
data=req['payload'],
header_dict=req['headers'],
decode=True,
decode_type='json',
**self.nxargs)
return self.parse_response(response, command_list)
|
[
"def",
"request",
"(",
"self",
",",
"type",
",",
"command_list",
")",
":",
"req",
"=",
"self",
".",
"_build_request",
"(",
"type",
",",
"command_list",
")",
"if",
"self",
".",
"nxargs",
"[",
"'connect_over_uds'",
"]",
":",
"self",
".",
"connection",
".",
"request",
"(",
"'POST'",
",",
"req",
"[",
"'url'",
"]",
",",
"req",
"[",
"'payload'",
"]",
",",
"req",
"[",
"'headers'",
"]",
")",
"response",
"=",
"self",
".",
"connection",
".",
"getresponse",
"(",
")",
"else",
":",
"response",
"=",
"self",
".",
"connection",
"(",
"req",
"[",
"'url'",
"]",
",",
"method",
"=",
"'POST'",
",",
"opts",
"=",
"req",
"[",
"'opts'",
"]",
",",
"data",
"=",
"req",
"[",
"'payload'",
"]",
",",
"header_dict",
"=",
"req",
"[",
"'headers'",
"]",
",",
"decode",
"=",
"True",
",",
"decode_type",
"=",
"'json'",
",",
"*",
"*",
"self",
".",
"nxargs",
")",
"return",
"self",
".",
"parse_response",
"(",
"response",
",",
"command_list",
")"
] | 45.526316
| 18.894737
|
def _align(self, axis):
"""
Align spark bolt array so that axes for iteration are in the keys.
This operation is applied before most functional operators.
It ensures that the specified axes are valid, and swaps
key/value axes so that functional operators can be applied
over the correct records.
Parameters
----------
axis: tuple[int]
One or more axes that wil be iterated over by a functional operator
Returns
-------
BoltArraySpark
"""
# ensure that the specified axes are valid
inshape(self.shape, axis)
# find the value axes that should be moved into the keys (axis >= split)
tokeys = [(a - self.split) for a in axis if a >= self.split]
# find the key axes that should be moved into the values (axis < split)
tovalues = [a for a in range(self.split) if a not in axis]
if tokeys or tovalues:
return self.swap(tovalues, tokeys)
else:
return self
|
[
"def",
"_align",
"(",
"self",
",",
"axis",
")",
":",
"# ensure that the specified axes are valid",
"inshape",
"(",
"self",
".",
"shape",
",",
"axis",
")",
"# find the value axes that should be moved into the keys (axis >= split)",
"tokeys",
"=",
"[",
"(",
"a",
"-",
"self",
".",
"split",
")",
"for",
"a",
"in",
"axis",
"if",
"a",
">=",
"self",
".",
"split",
"]",
"# find the key axes that should be moved into the values (axis < split)",
"tovalues",
"=",
"[",
"a",
"for",
"a",
"in",
"range",
"(",
"self",
".",
"split",
")",
"if",
"a",
"not",
"in",
"axis",
"]",
"if",
"tokeys",
"or",
"tovalues",
":",
"return",
"self",
".",
"swap",
"(",
"tovalues",
",",
"tokeys",
")",
"else",
":",
"return",
"self"
] | 33.129032
| 24.225806
|
def _create_justification_button(self):
"""Creates horizontal justification button"""
iconnames = ["JustifyLeft", "JustifyCenter", "JustifyRight"]
bmplist = [icons[iconname] for iconname in iconnames]
self.justify_tb = _widgets.BitmapToggleButton(self, bmplist)
self.justify_tb.SetToolTipString(_(u"Justification"))
self.Bind(wx.EVT_BUTTON, self.OnJustification, self.justify_tb)
self.AddControl(self.justify_tb)
|
[
"def",
"_create_justification_button",
"(",
"self",
")",
":",
"iconnames",
"=",
"[",
"\"JustifyLeft\"",
",",
"\"JustifyCenter\"",
",",
"\"JustifyRight\"",
"]",
"bmplist",
"=",
"[",
"icons",
"[",
"iconname",
"]",
"for",
"iconname",
"in",
"iconnames",
"]",
"self",
".",
"justify_tb",
"=",
"_widgets",
".",
"BitmapToggleButton",
"(",
"self",
",",
"bmplist",
")",
"self",
".",
"justify_tb",
".",
"SetToolTipString",
"(",
"_",
"(",
"u\"Justification\"",
")",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_BUTTON",
",",
"self",
".",
"OnJustification",
",",
"self",
".",
"justify_tb",
")",
"self",
".",
"AddControl",
"(",
"self",
".",
"justify_tb",
")"
] | 51.222222
| 18.888889
|
def to_bytes(val):
"""Takes a text message and return a tuple
"""
if val is NoResponse:
return val
val = val.replace('\\r', '\r').replace('\\n', '\n')
return val.encode()
|
[
"def",
"to_bytes",
"(",
"val",
")",
":",
"if",
"val",
"is",
"NoResponse",
":",
"return",
"val",
"val",
"=",
"val",
".",
"replace",
"(",
"'\\\\r'",
",",
"'\\r'",
")",
".",
"replace",
"(",
"'\\\\n'",
",",
"'\\n'",
")",
"return",
"val",
".",
"encode",
"(",
")"
] | 27.428571
| 13
|
def _collect_by_key(self,specs):
"""
Returns a dictionary like object with the lists of values
collapsed by their respective key. Useful to find varying vs
constant keys and to find how fast keys vary.
"""
# Collect (key, value) tuples as list of lists, flatten with chain
allkeys = itertools.chain.from_iterable(
[[(k, run[k]) for k in run] for run in specs])
collection = defaultdict(list)
for (k,v) in allkeys: collection[k].append(v)
return collection
|
[
"def",
"_collect_by_key",
"(",
"self",
",",
"specs",
")",
":",
"# Collect (key, value) tuples as list of lists, flatten with chain",
"allkeys",
"=",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"[",
"[",
"(",
"k",
",",
"run",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"run",
"]",
"for",
"run",
"in",
"specs",
"]",
")",
"collection",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"allkeys",
":",
"collection",
"[",
"k",
"]",
".",
"append",
"(",
"v",
")",
"return",
"collection"
] | 44.666667
| 13.666667
|
def dependencies(self, task, params={}, **options):
"""Returns the compact representations of all of the dependencies of a task.
Parameters
----------
task : {Id} The task to get dependencies on.
[params] : {Object} Parameters for the request
"""
path = "/tasks/%s/dependencies" % (task)
return self.client.get(path, params, **options)
|
[
"def",
"dependencies",
"(",
"self",
",",
"task",
",",
"params",
"=",
"{",
"}",
",",
"*",
"*",
"options",
")",
":",
"path",
"=",
"\"/tasks/%s/dependencies\"",
"%",
"(",
"task",
")",
"return",
"self",
".",
"client",
".",
"get",
"(",
"path",
",",
"params",
",",
"*",
"*",
"options",
")"
] | 39.2
| 14.5
|
def report(data):
"""Create a Rmd report for small RNAseq analysis"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, "report")
safe_makedir(out_dir)
summary_file = op.join(out_dir, "summary.csv")
with file_transaction(summary_file) as out_tx:
with open(out_tx, 'w') as out_handle:
out_handle.write("sample_id,%s\n" % _guess_header(data[0][0]))
for sample in data:
info = sample[0]
group = _guess_group(info)
files = info["seqbuster"] if "seqbuster" in info else "None"
out_handle.write(",".join([dd.get_sample_name(info),
group]) + "\n")
_modify_report(work_dir, out_dir)
return summary_file
|
[
"def",
"report",
"(",
"data",
")",
":",
"work_dir",
"=",
"dd",
".",
"get_work_dir",
"(",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"out_dir",
"=",
"op",
".",
"join",
"(",
"work_dir",
",",
"\"report\"",
")",
"safe_makedir",
"(",
"out_dir",
")",
"summary_file",
"=",
"op",
".",
"join",
"(",
"out_dir",
",",
"\"summary.csv\"",
")",
"with",
"file_transaction",
"(",
"summary_file",
")",
"as",
"out_tx",
":",
"with",
"open",
"(",
"out_tx",
",",
"'w'",
")",
"as",
"out_handle",
":",
"out_handle",
".",
"write",
"(",
"\"sample_id,%s\\n\"",
"%",
"_guess_header",
"(",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
")",
")",
"for",
"sample",
"in",
"data",
":",
"info",
"=",
"sample",
"[",
"0",
"]",
"group",
"=",
"_guess_group",
"(",
"info",
")",
"files",
"=",
"info",
"[",
"\"seqbuster\"",
"]",
"if",
"\"seqbuster\"",
"in",
"info",
"else",
"\"None\"",
"out_handle",
".",
"write",
"(",
"\",\"",
".",
"join",
"(",
"[",
"dd",
".",
"get_sample_name",
"(",
"info",
")",
",",
"group",
"]",
")",
"+",
"\"\\n\"",
")",
"_modify_report",
"(",
"work_dir",
",",
"out_dir",
")",
"return",
"summary_file"
] | 45.058824
| 13
|
def get_short_url(self, obj):
"""
Get short URL of blog post like '/blog/<slug>/' using ``get_absolute_url`` if available.
Removes dependency on reverse URLs of Mezzanine views when deploying Mezzanine only as an API backend.
"""
try:
url = obj.get_absolute_url()
except NoReverseMatch:
url = '/blog/' + obj.slug
return url
|
[
"def",
"get_short_url",
"(",
"self",
",",
"obj",
")",
":",
"try",
":",
"url",
"=",
"obj",
".",
"get_absolute_url",
"(",
")",
"except",
"NoReverseMatch",
":",
"url",
"=",
"'/blog/'",
"+",
"obj",
".",
"slug",
"return",
"url"
] | 39.4
| 20
|
def port_profile_vlan_profile_switchport_access_mac_vlan_classification_access_vlan_access_mac_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
access_mac_vlan_classification = ET.SubElement(switchport, "access-mac-vlan-classification")
access = ET.SubElement(access_mac_vlan_classification, "access")
vlan = ET.SubElement(access, "vlan")
access_vlan_id_key = ET.SubElement(vlan, "access-vlan-id")
access_vlan_id_key.text = kwargs.pop('access_vlan_id')
access_mac_address = ET.SubElement(vlan, "access-mac-address")
access_mac_address.text = kwargs.pop('access_mac_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"port_profile_vlan_profile_switchport_access_mac_vlan_classification_access_vlan_access_mac_address",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"port_profile",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"port-profile\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-port-profile\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"port_profile",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"vlan_profile",
"=",
"ET",
".",
"SubElement",
"(",
"port_profile",
",",
"\"vlan-profile\"",
")",
"switchport",
"=",
"ET",
".",
"SubElement",
"(",
"vlan_profile",
",",
"\"switchport\"",
")",
"access_mac_vlan_classification",
"=",
"ET",
".",
"SubElement",
"(",
"switchport",
",",
"\"access-mac-vlan-classification\"",
")",
"access",
"=",
"ET",
".",
"SubElement",
"(",
"access_mac_vlan_classification",
",",
"\"access\"",
")",
"vlan",
"=",
"ET",
".",
"SubElement",
"(",
"access",
",",
"\"vlan\"",
")",
"access_vlan_id_key",
"=",
"ET",
".",
"SubElement",
"(",
"vlan",
",",
"\"access-vlan-id\"",
")",
"access_vlan_id_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'access_vlan_id'",
")",
"access_mac_address",
"=",
"ET",
".",
"SubElement",
"(",
"vlan",
",",
"\"access-mac-address\"",
")",
"access_mac_address",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'access_mac_address'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 57.894737
| 25.421053
|
def get_port_channel_detail_output_lacp_partner_system_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_channel_detail = ET.Element("get_port_channel_detail")
config = get_port_channel_detail
output = ET.SubElement(get_port_channel_detail, "output")
lacp = ET.SubElement(output, "lacp")
partner_system_id = ET.SubElement(lacp, "partner-system-id")
partner_system_id.text = kwargs.pop('partner_system_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"get_port_channel_detail_output_lacp_partner_system_id",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_port_channel_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_port_channel_detail\"",
")",
"config",
"=",
"get_port_channel_detail",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_port_channel_detail",
",",
"\"output\"",
")",
"lacp",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"lacp\"",
")",
"partner_system_id",
"=",
"ET",
".",
"SubElement",
"(",
"lacp",
",",
"\"partner-system-id\"",
")",
"partner_system_id",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'partner_system_id'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 45.538462
| 16.538462
|
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
|
[
"def",
"_simple_blockify",
"(",
"tuples",
",",
"dtype",
")",
":",
"values",
",",
"placement",
"=",
"_stack_arrays",
"(",
"tuples",
",",
"dtype",
")",
"# CHECK DTYPE?",
"if",
"dtype",
"is",
"not",
"None",
"and",
"values",
".",
"dtype",
"!=",
"dtype",
":",
"# pragma: no cover",
"values",
"=",
"values",
".",
"astype",
"(",
"dtype",
")",
"block",
"=",
"make_block",
"(",
"values",
",",
"placement",
"=",
"placement",
")",
"return",
"[",
"block",
"]"
] | 33.416667
| 15.916667
|
def is_gzippable(self, path):
"""
Returns a boolean indicating if the provided file path is a candidate
for gzipping.
"""
# First check if gzipping is allowed by the global setting
if not getattr(settings, 'BAKERY_GZIP', False):
return False
# Then check if the content type of this particular file is gzippable
whitelist = getattr(
settings,
'GZIP_CONTENT_TYPES',
DEFAULT_GZIP_CONTENT_TYPES
)
return mimetypes.guess_type(path)[0] in whitelist
|
[
"def",
"is_gzippable",
"(",
"self",
",",
"path",
")",
":",
"# First check if gzipping is allowed by the global setting",
"if",
"not",
"getattr",
"(",
"settings",
",",
"'BAKERY_GZIP'",
",",
"False",
")",
":",
"return",
"False",
"# Then check if the content type of this particular file is gzippable",
"whitelist",
"=",
"getattr",
"(",
"settings",
",",
"'GZIP_CONTENT_TYPES'",
",",
"DEFAULT_GZIP_CONTENT_TYPES",
")",
"return",
"mimetypes",
".",
"guess_type",
"(",
"path",
")",
"[",
"0",
"]",
"in",
"whitelist"
] | 37.133333
| 16.6
|
def _countOverlap(rep1, rep2):
"""
Return the overlap between two representations. rep1 and rep2 are lists of
non-zero indices.
"""
overlap = 0
for e in rep1:
if e in rep2:
overlap += 1
return overlap
|
[
"def",
"_countOverlap",
"(",
"rep1",
",",
"rep2",
")",
":",
"overlap",
"=",
"0",
"for",
"e",
"in",
"rep1",
":",
"if",
"e",
"in",
"rep2",
":",
"overlap",
"+=",
"1",
"return",
"overlap"
] | 23.3
| 17.7
|
def star(args):
"""
%prog star folder reference
Run star on a folder with reads.
"""
p = OptionParser(star.__doc__)
p.add_option("--single", default=False, action="store_true",
help="Single end mapping")
p.set_fastq_names()
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
folder, reference = args
cpus = opts.cpus
mm = MakeManager()
num = 1 if opts.single else 2
folder, reference = args
gd = "GenomeDir"
mkdir(gd)
STAR = "STAR --runThreadN {0} --genomeDir {1}".format(cpus, gd)
# Step 0: build genome index
genomeidx = op.join(gd, "Genome")
if need_update(reference, genomeidx):
cmd = STAR + " --runMode genomeGenerate"
cmd += " --genomeFastaFiles {0}".format(reference)
mm.add(reference, genomeidx, cmd)
# Step 1: align
for p, prefix in iter_project(folder, opts.names, num):
pf = "{0}_star".format(prefix)
bamfile = pf + "Aligned.sortedByCoord.out.bam"
cmd = STAR + " --readFilesIn {0}".format(" ".join(p))
if p[0].endswith(".gz"):
cmd += " --readFilesCommand zcat"
cmd += " --outSAMtype BAM SortedByCoordinate"
cmd += " --outFileNamePrefix {0}".format(pf)
cmd += " --twopassMode Basic"
# Compatibility for cufflinks
cmd += " --outSAMstrandField intronMotif"
cmd += " --outFilterIntronMotifs RemoveNoncanonical"
mm.add(p, bamfile, cmd)
mm.write()
|
[
"def",
"star",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"star",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--single\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Single end mapping\"",
")",
"p",
".",
"set_fastq_names",
"(",
")",
"p",
".",
"set_cpus",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"folder",
",",
"reference",
"=",
"args",
"cpus",
"=",
"opts",
".",
"cpus",
"mm",
"=",
"MakeManager",
"(",
")",
"num",
"=",
"1",
"if",
"opts",
".",
"single",
"else",
"2",
"folder",
",",
"reference",
"=",
"args",
"gd",
"=",
"\"GenomeDir\"",
"mkdir",
"(",
"gd",
")",
"STAR",
"=",
"\"STAR --runThreadN {0} --genomeDir {1}\"",
".",
"format",
"(",
"cpus",
",",
"gd",
")",
"# Step 0: build genome index",
"genomeidx",
"=",
"op",
".",
"join",
"(",
"gd",
",",
"\"Genome\"",
")",
"if",
"need_update",
"(",
"reference",
",",
"genomeidx",
")",
":",
"cmd",
"=",
"STAR",
"+",
"\" --runMode genomeGenerate\"",
"cmd",
"+=",
"\" --genomeFastaFiles {0}\"",
".",
"format",
"(",
"reference",
")",
"mm",
".",
"add",
"(",
"reference",
",",
"genomeidx",
",",
"cmd",
")",
"# Step 1: align",
"for",
"p",
",",
"prefix",
"in",
"iter_project",
"(",
"folder",
",",
"opts",
".",
"names",
",",
"num",
")",
":",
"pf",
"=",
"\"{0}_star\"",
".",
"format",
"(",
"prefix",
")",
"bamfile",
"=",
"pf",
"+",
"\"Aligned.sortedByCoord.out.bam\"",
"cmd",
"=",
"STAR",
"+",
"\" --readFilesIn {0}\"",
".",
"format",
"(",
"\" \"",
".",
"join",
"(",
"p",
")",
")",
"if",
"p",
"[",
"0",
"]",
".",
"endswith",
"(",
"\".gz\"",
")",
":",
"cmd",
"+=",
"\" --readFilesCommand zcat\"",
"cmd",
"+=",
"\" --outSAMtype BAM SortedByCoordinate\"",
"cmd",
"+=",
"\" --outFileNamePrefix {0}\"",
".",
"format",
"(",
"pf",
")",
"cmd",
"+=",
"\" --twopassMode Basic\"",
"# Compatibility for cufflinks",
"cmd",
"+=",
"\" --outSAMstrandField intronMotif\"",
"cmd",
"+=",
"\" --outFilterIntronMotifs RemoveNoncanonical\"",
"mm",
".",
"add",
"(",
"p",
",",
"bamfile",
",",
"cmd",
")",
"mm",
".",
"write",
"(",
")"
] | 30.571429
| 16.040816
|
def _requirements_sanitize(req_list):
# type: (List[str]) -> List[str]
"""
Cleanup a list of requirement strings (e.g. from requirements.txt) to only
contain entries valid for this platform and with the lowest required version
only.
Example
-------
>>> from sys import version_info
>>> _requirements_sanitize([
... 'foo>=3.0',
... "monotonic>=1.0,>0.1;python_version=='2.4'",
... "bar>1.0;python_version=='{}.{}'".format(version_info[0], version_info[1])
... ])
['foo >= 3.0', 'bar > 1.0']
"""
filtered_req_list = (
_requirement_find_lowest_possible(req) for req in
(pkg_resources.Requirement.parse(s) for s in req_list)
if _requirement_filter_by_marker(req)
)
return [" ".join(req) for req in filtered_req_list]
|
[
"def",
"_requirements_sanitize",
"(",
"req_list",
")",
":",
"# type: (List[str]) -> List[str]",
"filtered_req_list",
"=",
"(",
"_requirement_find_lowest_possible",
"(",
"req",
")",
"for",
"req",
"in",
"(",
"pkg_resources",
".",
"Requirement",
".",
"parse",
"(",
"s",
")",
"for",
"s",
"in",
"req_list",
")",
"if",
"_requirement_filter_by_marker",
"(",
"req",
")",
")",
"return",
"[",
"\" \"",
".",
"join",
"(",
"req",
")",
"for",
"req",
"in",
"filtered_req_list",
"]"
] | 33.291667
| 20.541667
|
def sync_repo_hook(self, repo_id):
"""Sync a GitHub repo's hook with the locally stored repo."""
# Get the hook that we may have set in the past
gh_repo = self.api.repository_with_id(repo_id)
hooks = (hook.id for hook in gh_repo.hooks()
if hook.config.get('url', '') == self.webhook_url)
hook_id = next(hooks, None)
# If hook on GitHub exists, get or create corresponding db object and
# enable the hook. Otherwise remove the old hook information.
if hook_id:
Repository.enable(user_id=self.user_id,
github_id=gh_repo.id,
name=gh_repo.full_name,
hook=hook_id)
else:
Repository.disable(user_id=self.user_id,
github_id=gh_repo.id,
name=gh_repo.full_name)
|
[
"def",
"sync_repo_hook",
"(",
"self",
",",
"repo_id",
")",
":",
"# Get the hook that we may have set in the past",
"gh_repo",
"=",
"self",
".",
"api",
".",
"repository_with_id",
"(",
"repo_id",
")",
"hooks",
"=",
"(",
"hook",
".",
"id",
"for",
"hook",
"in",
"gh_repo",
".",
"hooks",
"(",
")",
"if",
"hook",
".",
"config",
".",
"get",
"(",
"'url'",
",",
"''",
")",
"==",
"self",
".",
"webhook_url",
")",
"hook_id",
"=",
"next",
"(",
"hooks",
",",
"None",
")",
"# If hook on GitHub exists, get or create corresponding db object and",
"# enable the hook. Otherwise remove the old hook information.",
"if",
"hook_id",
":",
"Repository",
".",
"enable",
"(",
"user_id",
"=",
"self",
".",
"user_id",
",",
"github_id",
"=",
"gh_repo",
".",
"id",
",",
"name",
"=",
"gh_repo",
".",
"full_name",
",",
"hook",
"=",
"hook_id",
")",
"else",
":",
"Repository",
".",
"disable",
"(",
"user_id",
"=",
"self",
".",
"user_id",
",",
"github_id",
"=",
"gh_repo",
".",
"id",
",",
"name",
"=",
"gh_repo",
".",
"full_name",
")"
] | 47.368421
| 16.263158
|
def _create_h(x):
"""increase between samples"""
h = np.zeros_like(x)
h[:-1] = x[1:] - x[:-1]
# border
h[-1] = h[-2]
return h
|
[
"def",
"_create_h",
"(",
"x",
")",
":",
"h",
"=",
"np",
".",
"zeros_like",
"(",
"x",
")",
"h",
"[",
":",
"-",
"1",
"]",
"=",
"x",
"[",
"1",
":",
"]",
"-",
"x",
"[",
":",
"-",
"1",
"]",
"# border",
"h",
"[",
"-",
"1",
"]",
"=",
"h",
"[",
"-",
"2",
"]",
"return",
"h"
] | 23.857143
| 15.857143
|
def remove(self, child):
"""Removes the child element"""
if not isinstance(child, AbstractElement):
raise ValueError("Expected AbstractElement, got " + str(type(child)))
if child.parent == self:
child.parent = None
self.data.remove(child)
#delete from index
if child.id and self.doc and child.id in self.doc.index:
del self.doc.index[child.id]
|
[
"def",
"remove",
"(",
"self",
",",
"child",
")",
":",
"if",
"not",
"isinstance",
"(",
"child",
",",
"AbstractElement",
")",
":",
"raise",
"ValueError",
"(",
"\"Expected AbstractElement, got \"",
"+",
"str",
"(",
"type",
"(",
"child",
")",
")",
")",
"if",
"child",
".",
"parent",
"==",
"self",
":",
"child",
".",
"parent",
"=",
"None",
"self",
".",
"data",
".",
"remove",
"(",
"child",
")",
"#delete from index",
"if",
"child",
".",
"id",
"and",
"self",
".",
"doc",
"and",
"child",
".",
"id",
"in",
"self",
".",
"doc",
".",
"index",
":",
"del",
"self",
".",
"doc",
".",
"index",
"[",
"child",
".",
"id",
"]"
] | 41.8
| 13.1
|
def btc_tx_sign_input(tx, idx, prevout_script, prevout_amount, private_key_info, hashcode=SIGHASH_ALL, hashcodes=None, segwit=None, scriptsig_type=None, redeem_script=None, witness_script=None, **blockchain_opts):
"""
Sign a particular input in the given transaction.
@private_key_info can either be a private key, or it can be a dict with 'redeem_script' and 'private_keys' defined
Returns the tx with the signed input
"""
if segwit is None:
segwit = get_features('segwit')
if scriptsig_type is None:
scriptsig_type = btc_privkey_scriptsig_classify(private_key_info)
if scriptsig_type in ['p2wpkh', 'p2wsh', 'p2sh-p2wpkh', 'p2sh-p2wsh'] and not segwit:
raise ValueError("Segwit is not enabled, but {} is a segwit scriptsig type".format(prevout_script))
return btc_tx_sign(tx, idx, prevout_script, prevout_amount, private_key_info, scriptsig_type, hashcode=hashcode, hashcodes=hashcodes, redeem_script=redeem_script, witness_script=witness_script)
|
[
"def",
"btc_tx_sign_input",
"(",
"tx",
",",
"idx",
",",
"prevout_script",
",",
"prevout_amount",
",",
"private_key_info",
",",
"hashcode",
"=",
"SIGHASH_ALL",
",",
"hashcodes",
"=",
"None",
",",
"segwit",
"=",
"None",
",",
"scriptsig_type",
"=",
"None",
",",
"redeem_script",
"=",
"None",
",",
"witness_script",
"=",
"None",
",",
"*",
"*",
"blockchain_opts",
")",
":",
"if",
"segwit",
"is",
"None",
":",
"segwit",
"=",
"get_features",
"(",
"'segwit'",
")",
"if",
"scriptsig_type",
"is",
"None",
":",
"scriptsig_type",
"=",
"btc_privkey_scriptsig_classify",
"(",
"private_key_info",
")",
"if",
"scriptsig_type",
"in",
"[",
"'p2wpkh'",
",",
"'p2wsh'",
",",
"'p2sh-p2wpkh'",
",",
"'p2sh-p2wsh'",
"]",
"and",
"not",
"segwit",
":",
"raise",
"ValueError",
"(",
"\"Segwit is not enabled, but {} is a segwit scriptsig type\"",
".",
"format",
"(",
"prevout_script",
")",
")",
"return",
"btc_tx_sign",
"(",
"tx",
",",
"idx",
",",
"prevout_script",
",",
"prevout_amount",
",",
"private_key_info",
",",
"scriptsig_type",
",",
"hashcode",
"=",
"hashcode",
",",
"hashcodes",
"=",
"hashcodes",
",",
"redeem_script",
"=",
"redeem_script",
",",
"witness_script",
"=",
"witness_script",
")"
] | 59
| 44.176471
|
def _get(self):
"""get and parse data stored in self.path."""
data, stat = self.zk.get(self.path)
if not len(data):
return {}, stat.version
if self.OLD_SEPARATOR in data:
return self._get_old()
return json.loads(data), stat.version
|
[
"def",
"_get",
"(",
"self",
")",
":",
"data",
",",
"stat",
"=",
"self",
".",
"zk",
".",
"get",
"(",
"self",
".",
"path",
")",
"if",
"not",
"len",
"(",
"data",
")",
":",
"return",
"{",
"}",
",",
"stat",
".",
"version",
"if",
"self",
".",
"OLD_SEPARATOR",
"in",
"data",
":",
"return",
"self",
".",
"_get_old",
"(",
")",
"return",
"json",
".",
"loads",
"(",
"data",
")",
",",
"stat",
".",
"version"
] | 32
| 11.222222
|
async def send_chat_action(self, chat_id: typing.Union[base.Integer, base.String],
action: base.String) -> base.Boolean:
"""
Use this method when you need to tell the user that something is happening on the bot's side.
The status is set for 5 seconds or less
(when a message arrives from your bot, Telegram clients clear its typing status).
We only recommend using this method when a response from the bot will take
a noticeable amount of time to arrive.
Source: https://core.telegram.org/bots/api#sendchataction
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param action: Type of action to broadcast
:type action: :obj:`base.String`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SEND_CHAT_ACTION, payload)
return result
|
[
"async",
"def",
"send_chat_action",
"(",
"self",
",",
"chat_id",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"Integer",
",",
"base",
".",
"String",
"]",
",",
"action",
":",
"base",
".",
"String",
")",
"->",
"base",
".",
"Boolean",
":",
"payload",
"=",
"generate_payload",
"(",
"*",
"*",
"locals",
"(",
")",
")",
"result",
"=",
"await",
"self",
".",
"request",
"(",
"api",
".",
"Methods",
".",
"SEND_CHAT_ACTION",
",",
"payload",
")",
"return",
"result"
] | 46.608696
| 25.130435
|
def backend_to_retrieve(self, namespace, stream):
"""
Return backend enabled for reading for `stream`.
"""
if namespace not in self.namespaces:
raise NamespaceMissing('`{}` namespace is not configured'
.format(namespace))
stream_prefix = self.get_matching_prefix(namespace, stream)
read_backend = self.prefix_read_backends[namespace][stream_prefix]
return (read_backend,
self.prefix_confs[namespace][stream_prefix][read_backend])
|
[
"def",
"backend_to_retrieve",
"(",
"self",
",",
"namespace",
",",
"stream",
")",
":",
"if",
"namespace",
"not",
"in",
"self",
".",
"namespaces",
":",
"raise",
"NamespaceMissing",
"(",
"'`{}` namespace is not configured'",
".",
"format",
"(",
"namespace",
")",
")",
"stream_prefix",
"=",
"self",
".",
"get_matching_prefix",
"(",
"namespace",
",",
"stream",
")",
"read_backend",
"=",
"self",
".",
"prefix_read_backends",
"[",
"namespace",
"]",
"[",
"stream_prefix",
"]",
"return",
"(",
"read_backend",
",",
"self",
".",
"prefix_confs",
"[",
"namespace",
"]",
"[",
"stream_prefix",
"]",
"[",
"read_backend",
"]",
")"
] | 44.909091
| 13.636364
|
def consume(self, key, default=None, current=None, print_on_success=False):
"""
Consume a key from the configuration. When a key is consumed, it
is removed from the configuration.
If not found, the default is returned. If the current value is not
None, it will be returned instead, but the key will still be
considered consumed.
"""
value = self._configuration.pop(key, default)
if current:
return current
if value and print_on_success:
LOG.debug("Found %s: `%s`", key, ", ".join(value))
if value and key in self._deprecated:
LOG.warning(
"Configuration file uses deprecated item `%s`: "
"please migrate to its replacement `%s`",
key,
self._deprecated[key],
)
return value
|
[
"def",
"consume",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
",",
"current",
"=",
"None",
",",
"print_on_success",
"=",
"False",
")",
":",
"value",
"=",
"self",
".",
"_configuration",
".",
"pop",
"(",
"key",
",",
"default",
")",
"if",
"current",
":",
"return",
"current",
"if",
"value",
"and",
"print_on_success",
":",
"LOG",
".",
"debug",
"(",
"\"Found %s: `%s`\"",
",",
"key",
",",
"\", \"",
".",
"join",
"(",
"value",
")",
")",
"if",
"value",
"and",
"key",
"in",
"self",
".",
"_deprecated",
":",
"LOG",
".",
"warning",
"(",
"\"Configuration file uses deprecated item `%s`: \"",
"\"please migrate to its replacement `%s`\"",
",",
"key",
",",
"self",
".",
"_deprecated",
"[",
"key",
"]",
",",
")",
"return",
"value"
] | 37.391304
| 18.521739
|
def read_points(features):
""" Iterable of features to a sequence of point tuples
Where "features" can be either GeoJSON mappings
or objects implementing the geo_interface
"""
for feature in features:
if isinstance(feature, (tuple, list)) and len(feature) == 2:
yield feature
elif hasattr(feature, '__geo_interface__'):
# An object implementing the geo_interface
try:
# Could be a Feature...
geom = feature.__geo_interface__['geometry']
for pt in _geom_points(geom):
yield pt
except KeyError:
# ... or a geometry directly
for pt in _geom_points(feature.__geo_interface__):
yield pt
elif 'type' in feature and feature['type'] == 'Feature':
# A GeoJSON-like mapping
geom = feature['geometry']
for pt in _geom_points(geom):
yield pt
elif 'coordinates' in feature:
geom = feature
for pt in _geom_points(geom):
yield pt
else:
raise InvalidFeatureError(
"Unknown object: Not a GeoJSON Point feature or "
"an object with __geo_interface__:\n{0}".format(feature))
|
[
"def",
"read_points",
"(",
"features",
")",
":",
"for",
"feature",
"in",
"features",
":",
"if",
"isinstance",
"(",
"feature",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"len",
"(",
"feature",
")",
"==",
"2",
":",
"yield",
"feature",
"elif",
"hasattr",
"(",
"feature",
",",
"'__geo_interface__'",
")",
":",
"# An object implementing the geo_interface",
"try",
":",
"# Could be a Feature...",
"geom",
"=",
"feature",
".",
"__geo_interface__",
"[",
"'geometry'",
"]",
"for",
"pt",
"in",
"_geom_points",
"(",
"geom",
")",
":",
"yield",
"pt",
"except",
"KeyError",
":",
"# ... or a geometry directly",
"for",
"pt",
"in",
"_geom_points",
"(",
"feature",
".",
"__geo_interface__",
")",
":",
"yield",
"pt",
"elif",
"'type'",
"in",
"feature",
"and",
"feature",
"[",
"'type'",
"]",
"==",
"'Feature'",
":",
"# A GeoJSON-like mapping",
"geom",
"=",
"feature",
"[",
"'geometry'",
"]",
"for",
"pt",
"in",
"_geom_points",
"(",
"geom",
")",
":",
"yield",
"pt",
"elif",
"'coordinates'",
"in",
"feature",
":",
"geom",
"=",
"feature",
"for",
"pt",
"in",
"_geom_points",
"(",
"geom",
")",
":",
"yield",
"pt",
"else",
":",
"raise",
"InvalidFeatureError",
"(",
"\"Unknown object: Not a GeoJSON Point feature or \"",
"\"an object with __geo_interface__:\\n{0}\"",
".",
"format",
"(",
"feature",
")",
")"
] | 34.810811
| 16.027027
|
def convert_coordinates(coords, origin, wgs84, wrapped):
""" Convert coordinates from one crs to another """
if isinstance(coords, list) or isinstance(coords, tuple):
try:
if isinstance(coords[0], list) or isinstance(coords[0], tuple):
return [convert_coordinates(list(c), origin, wgs84, wrapped) for c in coords]
elif isinstance(coords[0], float):
c = list(transform(origin, wgs84, *coords))
if wrapped and c[0] < -170:
c[0] = c[0] + 360
return c
except IndexError:
pass
return None
|
[
"def",
"convert_coordinates",
"(",
"coords",
",",
"origin",
",",
"wgs84",
",",
"wrapped",
")",
":",
"if",
"isinstance",
"(",
"coords",
",",
"list",
")",
"or",
"isinstance",
"(",
"coords",
",",
"tuple",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"coords",
"[",
"0",
"]",
",",
"list",
")",
"or",
"isinstance",
"(",
"coords",
"[",
"0",
"]",
",",
"tuple",
")",
":",
"return",
"[",
"convert_coordinates",
"(",
"list",
"(",
"c",
")",
",",
"origin",
",",
"wgs84",
",",
"wrapped",
")",
"for",
"c",
"in",
"coords",
"]",
"elif",
"isinstance",
"(",
"coords",
"[",
"0",
"]",
",",
"float",
")",
":",
"c",
"=",
"list",
"(",
"transform",
"(",
"origin",
",",
"wgs84",
",",
"*",
"coords",
")",
")",
"if",
"wrapped",
"and",
"c",
"[",
"0",
"]",
"<",
"-",
"170",
":",
"c",
"[",
"0",
"]",
"=",
"c",
"[",
"0",
"]",
"+",
"360",
"return",
"c",
"except",
"IndexError",
":",
"pass",
"return",
"None"
] | 38.625
| 21.4375
|
def send_message_for_lane_change(sender, **kwargs):
"""
Sends a message to possible owners of the current workflows
next lane.
Args:
**kwargs: ``current`` and ``possible_owners`` are required.
sender (User): User object
"""
current = kwargs['current']
owners = kwargs['possible_owners']
if 'lane_change_invite' in current.task_data:
msg_context = current.task_data.pop('lane_change_invite')
else:
msg_context = DEFAULT_LANE_CHANGE_INVITE_MSG
wfi = WFCache(current).get_instance()
# Deletion of used passive task invitation which belongs to previous lane.
TaskInvitation.objects.filter(instance=wfi, role=current.role, wf_name=wfi.wf.name).delete()
today = datetime.today()
for recipient in owners:
inv = TaskInvitation(
instance=wfi,
role=recipient,
wf_name=wfi.wf.name,
progress=30,
start_date=today,
finish_date=today + timedelta(15)
)
inv.title = current.task_data.get('INVITATION_TITLE') or wfi.wf.title
inv.save()
# try to send notification, if it fails go on
try:
recipient.send_notification(title=msg_context['title'],
message="%s %s" % (wfi.wf.title, msg_context['body']),
typ=1, # info
url='',
sender=sender
)
except: # todo: specify which exception
pass
|
[
"def",
"send_message_for_lane_change",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"current",
"=",
"kwargs",
"[",
"'current'",
"]",
"owners",
"=",
"kwargs",
"[",
"'possible_owners'",
"]",
"if",
"'lane_change_invite'",
"in",
"current",
".",
"task_data",
":",
"msg_context",
"=",
"current",
".",
"task_data",
".",
"pop",
"(",
"'lane_change_invite'",
")",
"else",
":",
"msg_context",
"=",
"DEFAULT_LANE_CHANGE_INVITE_MSG",
"wfi",
"=",
"WFCache",
"(",
"current",
")",
".",
"get_instance",
"(",
")",
"# Deletion of used passive task invitation which belongs to previous lane.",
"TaskInvitation",
".",
"objects",
".",
"filter",
"(",
"instance",
"=",
"wfi",
",",
"role",
"=",
"current",
".",
"role",
",",
"wf_name",
"=",
"wfi",
".",
"wf",
".",
"name",
")",
".",
"delete",
"(",
")",
"today",
"=",
"datetime",
".",
"today",
"(",
")",
"for",
"recipient",
"in",
"owners",
":",
"inv",
"=",
"TaskInvitation",
"(",
"instance",
"=",
"wfi",
",",
"role",
"=",
"recipient",
",",
"wf_name",
"=",
"wfi",
".",
"wf",
".",
"name",
",",
"progress",
"=",
"30",
",",
"start_date",
"=",
"today",
",",
"finish_date",
"=",
"today",
"+",
"timedelta",
"(",
"15",
")",
")",
"inv",
".",
"title",
"=",
"current",
".",
"task_data",
".",
"get",
"(",
"'INVITATION_TITLE'",
")",
"or",
"wfi",
".",
"wf",
".",
"title",
"inv",
".",
"save",
"(",
")",
"# try to send notification, if it fails go on",
"try",
":",
"recipient",
".",
"send_notification",
"(",
"title",
"=",
"msg_context",
"[",
"'title'",
"]",
",",
"message",
"=",
"\"%s %s\"",
"%",
"(",
"wfi",
".",
"wf",
".",
"title",
",",
"msg_context",
"[",
"'body'",
"]",
")",
",",
"typ",
"=",
"1",
",",
"# info",
"url",
"=",
"''",
",",
"sender",
"=",
"sender",
")",
"except",
":",
"# todo: specify which exception",
"pass"
] | 34.822222
| 20.6
|
def show_vcs_output_vcs_nodes_vcs_node_info_node_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_rbridge_id = ET.SubElement(vcs_node_info, "node-rbridge-id")
node_rbridge_id.text = kwargs.pop('node_rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"show_vcs_output_vcs_nodes_vcs_node_info_node_rbridge_id",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"show_vcs",
"=",
"ET",
".",
"Element",
"(",
"\"show_vcs\"",
")",
"config",
"=",
"show_vcs",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"show_vcs",
",",
"\"output\"",
")",
"vcs_nodes",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"vcs-nodes\"",
")",
"vcs_node_info",
"=",
"ET",
".",
"SubElement",
"(",
"vcs_nodes",
",",
"\"vcs-node-info\"",
")",
"node_rbridge_id",
"=",
"ET",
".",
"SubElement",
"(",
"vcs_node_info",
",",
"\"node-rbridge-id\"",
")",
"node_rbridge_id",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'node_rbridge_id'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 43.571429
| 15.928571
|
def _update_hosting_device_exclusivity(self, context, hosting_device,
tenant_id):
"""Make <hosting device> bound or unbound to <tenant_id>.
If <tenant_id> is None the device is unbound, otherwise it gets bound
to that <tenant_id>
"""
with context.session.begin(subtransactions=True):
hosting_device['tenant_bound'] = tenant_id
context.session.add(hosting_device)
for item in (context.session.query(hd_models.SlotAllocation).
filter_by(hosting_device_id=hosting_device['id'])):
item['tenant_bound'] = tenant_id
context.session.add(item)
|
[
"def",
"_update_hosting_device_exclusivity",
"(",
"self",
",",
"context",
",",
"hosting_device",
",",
"tenant_id",
")",
":",
"with",
"context",
".",
"session",
".",
"begin",
"(",
"subtransactions",
"=",
"True",
")",
":",
"hosting_device",
"[",
"'tenant_bound'",
"]",
"=",
"tenant_id",
"context",
".",
"session",
".",
"add",
"(",
"hosting_device",
")",
"for",
"item",
"in",
"(",
"context",
".",
"session",
".",
"query",
"(",
"hd_models",
".",
"SlotAllocation",
")",
".",
"filter_by",
"(",
"hosting_device_id",
"=",
"hosting_device",
"[",
"'id'",
"]",
")",
")",
":",
"item",
"[",
"'tenant_bound'",
"]",
"=",
"tenant_id",
"context",
".",
"session",
".",
"add",
"(",
"item",
")"
] | 49.928571
| 17.785714
|
def _check_consider_merging_isinstance(self, node):
"""Check isinstance calls which can be merged together."""
if node.op != "or":
return
first_args = self._duplicated_isinstance_types(node)
for duplicated_name, class_names in first_args.items():
names = sorted(name for name in class_names)
self.add_message(
"consider-merging-isinstance",
node=node,
args=(duplicated_name, ", ".join(names)),
)
|
[
"def",
"_check_consider_merging_isinstance",
"(",
"self",
",",
"node",
")",
":",
"if",
"node",
".",
"op",
"!=",
"\"or\"",
":",
"return",
"first_args",
"=",
"self",
".",
"_duplicated_isinstance_types",
"(",
"node",
")",
"for",
"duplicated_name",
",",
"class_names",
"in",
"first_args",
".",
"items",
"(",
")",
":",
"names",
"=",
"sorted",
"(",
"name",
"for",
"name",
"in",
"class_names",
")",
"self",
".",
"add_message",
"(",
"\"consider-merging-isinstance\"",
",",
"node",
"=",
"node",
",",
"args",
"=",
"(",
"duplicated_name",
",",
"\", \"",
".",
"join",
"(",
"names",
")",
")",
",",
")"
] | 39.384615
| 16.923077
|
def get_consensus_module(module_name):
"""Returns a consensus module by name.
Args:
module_name (str): The name of the module to load.
Returns:
module: The consensus module.
Raises:
UnknownConsensusModuleError: Raised if the given module_name does
not correspond to a consensus implementation.
"""
module_package = module_name
if module_name == 'genesis':
module_package = (
'sawtooth_validator.journal.consensus.genesis.'
'genesis_consensus'
)
elif module_name == 'devmode':
module_package = (
'sawtooth_validator.journal.consensus.dev_mode.'
'dev_mode_consensus'
)
try:
return importlib.import_module(module_package)
except ImportError:
raise UnknownConsensusModuleError(
'Consensus module "{}" does not exist.'.format(module_name))
|
[
"def",
"get_consensus_module",
"(",
"module_name",
")",
":",
"module_package",
"=",
"module_name",
"if",
"module_name",
"==",
"'genesis'",
":",
"module_package",
"=",
"(",
"'sawtooth_validator.journal.consensus.genesis.'",
"'genesis_consensus'",
")",
"elif",
"module_name",
"==",
"'devmode'",
":",
"module_package",
"=",
"(",
"'sawtooth_validator.journal.consensus.dev_mode.'",
"'dev_mode_consensus'",
")",
"try",
":",
"return",
"importlib",
".",
"import_module",
"(",
"module_package",
")",
"except",
"ImportError",
":",
"raise",
"UnknownConsensusModuleError",
"(",
"'Consensus module \"{}\" does not exist.'",
".",
"format",
"(",
"module_name",
")",
")"
] | 33.1
| 18.666667
|
def selected_display_item(self) -> typing.Optional[DisplayItem.DisplayItem]:
"""Return the selected display item.
The selected display is the display ite that has keyboard focus in the data panel or a display panel.
"""
# first check for the [focused] data browser
display_item = self.focused_display_item
if not display_item:
selected_display_panel = self.selected_display_panel
display_item = selected_display_panel.display_item if selected_display_panel else None
return display_item
|
[
"def",
"selected_display_item",
"(",
"self",
")",
"->",
"typing",
".",
"Optional",
"[",
"DisplayItem",
".",
"DisplayItem",
"]",
":",
"# first check for the [focused] data browser",
"display_item",
"=",
"self",
".",
"focused_display_item",
"if",
"not",
"display_item",
":",
"selected_display_panel",
"=",
"self",
".",
"selected_display_panel",
"display_item",
"=",
"selected_display_panel",
".",
"display_item",
"if",
"selected_display_panel",
"else",
"None",
"return",
"display_item"
] | 50.636364
| 24.727273
|
def runSearchCallSets(self, request):
"""
Runs the specified SearchCallSetsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchCallSetsRequest,
protocol.SearchCallSetsResponse,
self.callSetsGenerator)
|
[
"def",
"runSearchCallSets",
"(",
"self",
",",
"request",
")",
":",
"return",
"self",
".",
"runSearchRequest",
"(",
"request",
",",
"protocol",
".",
"SearchCallSetsRequest",
",",
"protocol",
".",
"SearchCallSetsResponse",
",",
"self",
".",
"callSetsGenerator",
")"
] | 34.5
| 4.5
|
def cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for real general matrix.
"""
status = _libcublas.cublasSgemv_v2(handle,
_CUBLAS_OP[trans], m, n,
ctypes.byref(ctypes.c_float(alpha)), int(A), lda,
int(x), incx,
ctypes.byref(ctypes.c_float(beta)), int(y), incy)
cublasCheckStatus(status)
|
[
"def",
"cublasSgemv",
"(",
"handle",
",",
"trans",
",",
"m",
",",
"n",
",",
"alpha",
",",
"A",
",",
"lda",
",",
"x",
",",
"incx",
",",
"beta",
",",
"y",
",",
"incy",
")",
":",
"status",
"=",
"_libcublas",
".",
"cublasSgemv_v2",
"(",
"handle",
",",
"_CUBLAS_OP",
"[",
"trans",
"]",
",",
"m",
",",
"n",
",",
"ctypes",
".",
"byref",
"(",
"ctypes",
".",
"c_float",
"(",
"alpha",
")",
")",
",",
"int",
"(",
"A",
")",
",",
"lda",
",",
"int",
"(",
"x",
")",
",",
"incx",
",",
"ctypes",
".",
"byref",
"(",
"ctypes",
".",
"c_float",
"(",
"beta",
")",
")",
",",
"int",
"(",
"y",
")",
",",
"incy",
")",
"cublasCheckStatus",
"(",
"status",
")"
] | 42.25
| 22.916667
|
def _cleanup(self):
"""Remove any stale files from the session storage directory"""
for filename in os.listdir(self._storage_dir):
file_path = path.join(self._storage_dir, filename)
file_stat = os.stat(file_path)
evaluate = max(file_stat.st_ctime, file_stat.st_mtime)
if evaluate + self._duration < time.time():
LOGGER.debug('Removing stale file: %s', file_path)
os.unlink(file_path)
|
[
"def",
"_cleanup",
"(",
"self",
")",
":",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"_storage_dir",
")",
":",
"file_path",
"=",
"path",
".",
"join",
"(",
"self",
".",
"_storage_dir",
",",
"filename",
")",
"file_stat",
"=",
"os",
".",
"stat",
"(",
"file_path",
")",
"evaluate",
"=",
"max",
"(",
"file_stat",
".",
"st_ctime",
",",
"file_stat",
".",
"st_mtime",
")",
"if",
"evaluate",
"+",
"self",
".",
"_duration",
"<",
"time",
".",
"time",
"(",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'Removing stale file: %s'",
",",
"file_path",
")",
"os",
".",
"unlink",
"(",
"file_path",
")"
] | 52.333333
| 14.444444
|
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2015-06-15 - Written - Bovy (IAS)
"""
return -3.*R*z*(R**2.+z**2.+self._b2)**-2.5
|
[
"def",
"_Rzderiv",
"(",
"self",
",",
"R",
",",
"z",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"return",
"-",
"3.",
"*",
"R",
"*",
"z",
"*",
"(",
"R",
"**",
"2.",
"+",
"z",
"**",
"2.",
"+",
"self",
".",
"_b2",
")",
"**",
"-",
"2.5"
] | 26.529412
| 15.470588
|
def skill_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/skills#create-skill"
api_path = "/api/v2/skills"
return self.call(api_path, method="POST", data=data, **kwargs)
|
[
"def",
"skill_create",
"(",
"self",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/skills\"",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"method",
"=",
"\"POST\"",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
] | 55.5
| 18.5
|
def _ast_optree_node_to_code(self, node, **kwargs):
"""Convert an abstract syntax operator tree to python source code."""
opnode = node.opnode
if opnode is None:
return self._ast_to_code(node.operands[0])
else:
operator = opnode.operator
if operator is OP_ALTERNATE:
return self._ast_op_alternate_to_code(node, **kwargs)
elif operator is OP_WS_CONCAT:
kwargs["ignore_whitespace"] = False
return self._ast_op_concat_to_code(node, **kwargs)
elif operator is OP_CONCAT:
kwargs["ignore_whitespace"] = True
return self._ast_op_concat_to_code(node, **kwargs)
elif operator is OP_EXCLUDE:
return self._ast_op_exclude_to_code(node, **kwargs)
elif operator is OP_MULTIPLY:
return self._ast_op_multiply_to_code(node, **kwargs)
elif operator is OP_REPEAT:
return self._ast_op_repeat_to_code(node, **kwargs)
else:
raise Exception("Unhandled optree node: {0}".format(node))
|
[
"def",
"_ast_optree_node_to_code",
"(",
"self",
",",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"opnode",
"=",
"node",
".",
"opnode",
"if",
"opnode",
"is",
"None",
":",
"return",
"self",
".",
"_ast_to_code",
"(",
"node",
".",
"operands",
"[",
"0",
"]",
")",
"else",
":",
"operator",
"=",
"opnode",
".",
"operator",
"if",
"operator",
"is",
"OP_ALTERNATE",
":",
"return",
"self",
".",
"_ast_op_alternate_to_code",
"(",
"node",
",",
"*",
"*",
"kwargs",
")",
"elif",
"operator",
"is",
"OP_WS_CONCAT",
":",
"kwargs",
"[",
"\"ignore_whitespace\"",
"]",
"=",
"False",
"return",
"self",
".",
"_ast_op_concat_to_code",
"(",
"node",
",",
"*",
"*",
"kwargs",
")",
"elif",
"operator",
"is",
"OP_CONCAT",
":",
"kwargs",
"[",
"\"ignore_whitespace\"",
"]",
"=",
"True",
"return",
"self",
".",
"_ast_op_concat_to_code",
"(",
"node",
",",
"*",
"*",
"kwargs",
")",
"elif",
"operator",
"is",
"OP_EXCLUDE",
":",
"return",
"self",
".",
"_ast_op_exclude_to_code",
"(",
"node",
",",
"*",
"*",
"kwargs",
")",
"elif",
"operator",
"is",
"OP_MULTIPLY",
":",
"return",
"self",
".",
"_ast_op_multiply_to_code",
"(",
"node",
",",
"*",
"*",
"kwargs",
")",
"elif",
"operator",
"is",
"OP_REPEAT",
":",
"return",
"self",
".",
"_ast_op_repeat_to_code",
"(",
"node",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Unhandled optree node: {0}\"",
".",
"format",
"(",
"node",
")",
")"
] | 42.608696
| 13.086957
|
def transform_login(config):
"""
Parse login data as dict. Called from load_from_file and
also can be used when collecting information from other
sources as well.
:param dict data: data representing the valid key/value pairs
from smcrc
:return: dict dict of settings that can be sent into session.login
"""
verify = True
if config.pop('smc_ssl', None):
scheme = 'https'
verify = config.pop('ssl_cert_file', None)
if config.pop('verify_ssl', None):
# Get cert path to verify
if not verify: # Setting omitted or already False
verify = False
else:
verify = False
else:
scheme = 'http'
config.pop('verify_ssl', None)
config.pop('ssl_cert_file', None)
verify = False
transformed = {}
url = '{}://{}:{}'.format(
scheme,
config.pop('smc_address', None),
config.pop('smc_port', None))
timeout = config.pop('timeout', None)
if timeout:
try:
timeout = int(timeout)
except ValueError:
timeout = None
api_version = config.pop('api_version', None)
if api_version:
try:
float(api_version)
except ValueError:
api_version = None
transformed.update(
url=url,
api_key=config.pop('smc_apikey', None),
api_version=api_version,
verify=verify,
timeout=timeout,
domain=config.pop('domain', None))
if config:
transformed.update(kwargs=config) # Any remaining args
return transformed
|
[
"def",
"transform_login",
"(",
"config",
")",
":",
"verify",
"=",
"True",
"if",
"config",
".",
"pop",
"(",
"'smc_ssl'",
",",
"None",
")",
":",
"scheme",
"=",
"'https'",
"verify",
"=",
"config",
".",
"pop",
"(",
"'ssl_cert_file'",
",",
"None",
")",
"if",
"config",
".",
"pop",
"(",
"'verify_ssl'",
",",
"None",
")",
":",
"# Get cert path to verify",
"if",
"not",
"verify",
":",
"# Setting omitted or already False",
"verify",
"=",
"False",
"else",
":",
"verify",
"=",
"False",
"else",
":",
"scheme",
"=",
"'http'",
"config",
".",
"pop",
"(",
"'verify_ssl'",
",",
"None",
")",
"config",
".",
"pop",
"(",
"'ssl_cert_file'",
",",
"None",
")",
"verify",
"=",
"False",
"transformed",
"=",
"{",
"}",
"url",
"=",
"'{}://{}:{}'",
".",
"format",
"(",
"scheme",
",",
"config",
".",
"pop",
"(",
"'smc_address'",
",",
"None",
")",
",",
"config",
".",
"pop",
"(",
"'smc_port'",
",",
"None",
")",
")",
"timeout",
"=",
"config",
".",
"pop",
"(",
"'timeout'",
",",
"None",
")",
"if",
"timeout",
":",
"try",
":",
"timeout",
"=",
"int",
"(",
"timeout",
")",
"except",
"ValueError",
":",
"timeout",
"=",
"None",
"api_version",
"=",
"config",
".",
"pop",
"(",
"'api_version'",
",",
"None",
")",
"if",
"api_version",
":",
"try",
":",
"float",
"(",
"api_version",
")",
"except",
"ValueError",
":",
"api_version",
"=",
"None",
"transformed",
".",
"update",
"(",
"url",
"=",
"url",
",",
"api_key",
"=",
"config",
".",
"pop",
"(",
"'smc_apikey'",
",",
"None",
")",
",",
"api_version",
"=",
"api_version",
",",
"verify",
"=",
"verify",
",",
"timeout",
"=",
"timeout",
",",
"domain",
"=",
"config",
".",
"pop",
"(",
"'domain'",
",",
"None",
")",
")",
"if",
"config",
":",
"transformed",
".",
"update",
"(",
"kwargs",
"=",
"config",
")",
"# Any remaining args",
"return",
"transformed"
] | 26.932203
| 17.881356
|
def get_tenant(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Retrieves specified tenant.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.TenantServiceClient()
>>>
>>> name = client.tenant_path('[PROJECT]', '[TENANT]')
>>>
>>> response = client.get_tenant(name)
Args:
name (str): Required.
The resource name of the tenant to be retrieved.
The format is "projects/{project\_id}/tenants/{tenant\_id}", for
example, "projects/api-test-project/tenants/foo".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Tenant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_tenant" not in self._inner_api_calls:
self._inner_api_calls[
"get_tenant"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_tenant,
default_retry=self._method_configs["GetTenant"].retry,
default_timeout=self._method_configs["GetTenant"].timeout,
client_info=self._client_info,
)
request = tenant_service_pb2.GetTenantRequest(name=name)
return self._inner_api_calls["get_tenant"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
[
"def",
"get_tenant",
"(",
"self",
",",
"name",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"metadata",
"=",
"None",
",",
")",
":",
"# Wrap the transport method to add retry and timeout logic.",
"if",
"\"get_tenant\"",
"not",
"in",
"self",
".",
"_inner_api_calls",
":",
"self",
".",
"_inner_api_calls",
"[",
"\"get_tenant\"",
"]",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"wrap_method",
"(",
"self",
".",
"transport",
".",
"get_tenant",
",",
"default_retry",
"=",
"self",
".",
"_method_configs",
"[",
"\"GetTenant\"",
"]",
".",
"retry",
",",
"default_timeout",
"=",
"self",
".",
"_method_configs",
"[",
"\"GetTenant\"",
"]",
".",
"timeout",
",",
"client_info",
"=",
"self",
".",
"_client_info",
",",
")",
"request",
"=",
"tenant_service_pb2",
".",
"GetTenantRequest",
"(",
"name",
"=",
"name",
")",
"return",
"self",
".",
"_inner_api_calls",
"[",
"\"get_tenant\"",
"]",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")"
] | 40.2
| 24.066667
|
def genhash(self, package, code):
"""Generate a hash from code."""
return hex(checksum(
hash_sep.join(
str(item) for item in (VERSION_STR,)
+ self.__reduce__()[1]
+ (package, code)
).encode(default_encoding),
))
|
[
"def",
"genhash",
"(",
"self",
",",
"package",
",",
"code",
")",
":",
"return",
"hex",
"(",
"checksum",
"(",
"hash_sep",
".",
"join",
"(",
"str",
"(",
"item",
")",
"for",
"item",
"in",
"(",
"VERSION_STR",
",",
")",
"+",
"self",
".",
"__reduce__",
"(",
")",
"[",
"1",
"]",
"+",
"(",
"package",
",",
"code",
")",
")",
".",
"encode",
"(",
"default_encoding",
")",
",",
")",
")"
] | 33.222222
| 9.444444
|
def _depaginate_all(self, url):
"""GETs the url provided and traverses the 'next' url that's
returned while storing the data in a list. Returns a single list of all
items.
"""
items = []
for x in self._depagination_generator(url):
items += x
return items
|
[
"def",
"_depaginate_all",
"(",
"self",
",",
"url",
")",
":",
"items",
"=",
"[",
"]",
"for",
"x",
"in",
"self",
".",
"_depagination_generator",
"(",
"url",
")",
":",
"items",
"+=",
"x",
"return",
"items"
] | 34.888889
| 16.111111
|
def artist_create(self, name, other_names_comma=None, group_name=None,
url_string=None, body=None):
"""Function to create an artist (Requires login) (UNTESTED).
Parameters:
name (str):
other_names_comma (str): List of alternative names for this
artist, comma delimited.
group_name (str): The name of the group this artist belongs to.
url_string (str): List of URLs associated with this artist,
whitespace or newline delimited.
body (str): DText that will be used to create a wiki entry at the
same time.
"""
params = {
'artist[name]': name,
'artist[other_names_comma]': other_names_comma,
'artist[group_name]': group_name,
'artist[url_string]': url_string,
'artist[body]': body,
}
return self.get('artists.json', params, method='POST', auth=True)
|
[
"def",
"artist_create",
"(",
"self",
",",
"name",
",",
"other_names_comma",
"=",
"None",
",",
"group_name",
"=",
"None",
",",
"url_string",
"=",
"None",
",",
"body",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'artist[name]'",
":",
"name",
",",
"'artist[other_names_comma]'",
":",
"other_names_comma",
",",
"'artist[group_name]'",
":",
"group_name",
",",
"'artist[url_string]'",
":",
"url_string",
",",
"'artist[body]'",
":",
"body",
",",
"}",
"return",
"self",
".",
"get",
"(",
"'artists.json'",
",",
"params",
",",
"method",
"=",
"'POST'",
",",
"auth",
"=",
"True",
")"
] | 45.954545
| 19.363636
|
def clean_new(self, value):
"""Return a new object instantiated with cleaned data."""
value = self.schema_class(value).full_clean()
return self.object_class(**value)
|
[
"def",
"clean_new",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"self",
".",
"schema_class",
"(",
"value",
")",
".",
"full_clean",
"(",
")",
"return",
"self",
".",
"object_class",
"(",
"*",
"*",
"value",
")"
] | 46.5
| 6.75
|
def _getMatchingRowsWithRetries(self, tableInfo, fieldsToMatch,
selectFieldNames, maxRows=None):
""" Like _getMatchingRowsNoRetries(), but with retries on transient MySQL
failures
"""
with ConnectionFactory.get() as conn:
return self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch,
selectFieldNames, maxRows)
|
[
"def",
"_getMatchingRowsWithRetries",
"(",
"self",
",",
"tableInfo",
",",
"fieldsToMatch",
",",
"selectFieldNames",
",",
"maxRows",
"=",
"None",
")",
":",
"with",
"ConnectionFactory",
".",
"get",
"(",
")",
"as",
"conn",
":",
"return",
"self",
".",
"_getMatchingRowsNoRetries",
"(",
"tableInfo",
",",
"conn",
",",
"fieldsToMatch",
",",
"selectFieldNames",
",",
"maxRows",
")"
] | 51.375
| 17.875
|
def get_plugin_folders():
"""Get linkchecker plugin folders. Default is ~/.linkchecker/plugins/."""
folders = []
defaultfolder = normpath("~/.linkchecker/plugins")
if not os.path.exists(defaultfolder) and not Portable:
try:
make_userdir(defaultfolder)
except Exception as errmsg:
msg = _("could not create plugin directory %(dirname)r: %(errmsg)r")
args = dict(dirname=defaultfolder, errmsg=errmsg)
log.warn(LOG_CHECK, msg % args)
if os.path.exists(defaultfolder):
folders.append(defaultfolder)
return folders
|
[
"def",
"get_plugin_folders",
"(",
")",
":",
"folders",
"=",
"[",
"]",
"defaultfolder",
"=",
"normpath",
"(",
"\"~/.linkchecker/plugins\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"defaultfolder",
")",
"and",
"not",
"Portable",
":",
"try",
":",
"make_userdir",
"(",
"defaultfolder",
")",
"except",
"Exception",
"as",
"errmsg",
":",
"msg",
"=",
"_",
"(",
"\"could not create plugin directory %(dirname)r: %(errmsg)r\"",
")",
"args",
"=",
"dict",
"(",
"dirname",
"=",
"defaultfolder",
",",
"errmsg",
"=",
"errmsg",
")",
"log",
".",
"warn",
"(",
"LOG_CHECK",
",",
"msg",
"%",
"args",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"defaultfolder",
")",
":",
"folders",
".",
"append",
"(",
"defaultfolder",
")",
"return",
"folders"
] | 42.285714
| 14.071429
|
def parseArgs(): # pragma: no cover
"""Parses the command line options and arguments.
:returns: A :py:class:`argparse.Namespace` object created by the
:py:mod:`argparse` module. It contains the values of the
different options.
===================== ====== =========================================
Options Type Description
===================== ====== =========================================
``--file`` string The MBS file.
``--population-file`` string A file containing population information.
``--format`` string The output file format.
``--title`` string The title of the MDS plot.
``--xlabel`` string The label of the X axis.
``--ylabel`` string The label of the Y axis.
``--out`` string The prefix of the output files.
===================== ====== =========================================
.. note::
No option check is done here (except for the one automatically done by
argparse). Those need to be done elsewhere (see :py:func:`checkArgs`).
"""
# The INPUT files
group = parser.add_argument_group("Input File")
group.add_argument("--file", type=str, metavar="FILE", required=True,
help="The MBS file.")
parser.add_argument("--population-file", type=str, metavar="FORMAT",
required=True,
help="A file containing population information. "
"There must be three columns: famID, indID "
"and population information.")
# The graphical options
group = parser.add_argument_group("Graphical Options")
addCustomOptions(group)
# The OUTPUT files
group = parser.add_argument_group("Output File")
group.add_argument("--out", type=str, metavar="FILE",
default="mds",
help="The prefix of the output files. [default: "
"%(default)s]")
args = parser.parse_args()
return args
|
[
"def",
"parseArgs",
"(",
")",
":",
"# pragma: no cover",
"# The INPUT files",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"\"Input File\"",
")",
"group",
".",
"add_argument",
"(",
"\"--file\"",
",",
"type",
"=",
"str",
",",
"metavar",
"=",
"\"FILE\"",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"The MBS file.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--population-file\"",
",",
"type",
"=",
"str",
",",
"metavar",
"=",
"\"FORMAT\"",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"A file containing population information. \"",
"\"There must be three columns: famID, indID \"",
"\"and population information.\"",
")",
"# The graphical options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"\"Graphical Options\"",
")",
"addCustomOptions",
"(",
"group",
")",
"# The OUTPUT files",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"\"Output File\"",
")",
"group",
".",
"add_argument",
"(",
"\"--out\"",
",",
"type",
"=",
"str",
",",
"metavar",
"=",
"\"FILE\"",
",",
"default",
"=",
"\"mds\"",
",",
"help",
"=",
"\"The prefix of the output files. [default: \"",
"\"%(default)s]\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"return",
"args"
] | 43.166667
| 22.833333
|
def density(self, R, Rs, rho0):
"""
three dimenstional NFW profile
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:return: rho(R) density
"""
return rho0/(R/Rs*(1+R/Rs)**2)
|
[
"def",
"density",
"(",
"self",
",",
"R",
",",
"Rs",
",",
"rho0",
")",
":",
"return",
"rho0",
"/",
"(",
"R",
"/",
"Rs",
"*",
"(",
"1",
"+",
"R",
"/",
"Rs",
")",
"**",
"2",
")"
] | 28.923077
| 10.769231
|
def _follow_leafref(
self, xpath: "Expr", init: "TerminalNode") -> Optional["DataNode"]:
"""Return the data node referred to by a leafref path.
Args:
xpath: XPath expression compiled from a leafref path.
init: initial context node
"""
if isinstance(xpath, LocationPath):
lft = self._follow_leafref(xpath.left, init)
if lft is None:
return None
return lft._follow_leafref(xpath.right, init)
elif isinstance(xpath, Step):
if xpath.axis == Axis.parent:
return self.data_parent()
elif xpath.axis == Axis.child:
if isinstance(self, InternalNode) and xpath.qname:
qname = (xpath.qname if xpath.qname[1]
else (xpath.qname[0], init.ns))
return self.get_data_child(*qname)
elif isinstance(xpath, Root):
return self.schema_root()
return None
|
[
"def",
"_follow_leafref",
"(",
"self",
",",
"xpath",
":",
"\"Expr\"",
",",
"init",
":",
"\"TerminalNode\"",
")",
"->",
"Optional",
"[",
"\"DataNode\"",
"]",
":",
"if",
"isinstance",
"(",
"xpath",
",",
"LocationPath",
")",
":",
"lft",
"=",
"self",
".",
"_follow_leafref",
"(",
"xpath",
".",
"left",
",",
"init",
")",
"if",
"lft",
"is",
"None",
":",
"return",
"None",
"return",
"lft",
".",
"_follow_leafref",
"(",
"xpath",
".",
"right",
",",
"init",
")",
"elif",
"isinstance",
"(",
"xpath",
",",
"Step",
")",
":",
"if",
"xpath",
".",
"axis",
"==",
"Axis",
".",
"parent",
":",
"return",
"self",
".",
"data_parent",
"(",
")",
"elif",
"xpath",
".",
"axis",
"==",
"Axis",
".",
"child",
":",
"if",
"isinstance",
"(",
"self",
",",
"InternalNode",
")",
"and",
"xpath",
".",
"qname",
":",
"qname",
"=",
"(",
"xpath",
".",
"qname",
"if",
"xpath",
".",
"qname",
"[",
"1",
"]",
"else",
"(",
"xpath",
".",
"qname",
"[",
"0",
"]",
",",
"init",
".",
"ns",
")",
")",
"return",
"self",
".",
"get_data_child",
"(",
"*",
"qname",
")",
"elif",
"isinstance",
"(",
"xpath",
",",
"Root",
")",
":",
"return",
"self",
".",
"schema_root",
"(",
")",
"return",
"None"
] | 41.25
| 13.625
|
def get_models(app_labels):
"""
Get a list of models for the given app labels, with some exceptions.
TODO: If a required model is referenced, it should also be included.
Or at least discovered with a get_or_create() call.
"""
# These models are not to be output, e.g. because they can be generated automatically
# TODO: This should be "appname.modelname" string
EXCLUDED_MODELS = (ContentType, )
models = []
# If no app labels are given, return all
if not app_labels:
for app in apps.get_app_configs():
models += [m for m in apps.get_app_config(app.label).get_models()
if m not in EXCLUDED_MODELS]
return models
# Get all relevant apps
for app_label in app_labels:
# If a specific model is mentioned, get only that model
if "." in app_label:
app_label, model_name = app_label.split(".", 1)
models.append(apps.get_model(app_label, model_name))
# Get all models for a given app
else:
models += [m for m in apps.get_app_config(app_label).get_models()
if m not in EXCLUDED_MODELS]
return models
|
[
"def",
"get_models",
"(",
"app_labels",
")",
":",
"# These models are not to be output, e.g. because they can be generated automatically",
"# TODO: This should be \"appname.modelname\" string",
"EXCLUDED_MODELS",
"=",
"(",
"ContentType",
",",
")",
"models",
"=",
"[",
"]",
"# If no app labels are given, return all",
"if",
"not",
"app_labels",
":",
"for",
"app",
"in",
"apps",
".",
"get_app_configs",
"(",
")",
":",
"models",
"+=",
"[",
"m",
"for",
"m",
"in",
"apps",
".",
"get_app_config",
"(",
"app",
".",
"label",
")",
".",
"get_models",
"(",
")",
"if",
"m",
"not",
"in",
"EXCLUDED_MODELS",
"]",
"return",
"models",
"# Get all relevant apps",
"for",
"app_label",
"in",
"app_labels",
":",
"# If a specific model is mentioned, get only that model",
"if",
"\".\"",
"in",
"app_label",
":",
"app_label",
",",
"model_name",
"=",
"app_label",
".",
"split",
"(",
"\".\"",
",",
"1",
")",
"models",
".",
"append",
"(",
"apps",
".",
"get_model",
"(",
"app_label",
",",
"model_name",
")",
")",
"# Get all models for a given app",
"else",
":",
"models",
"+=",
"[",
"m",
"for",
"m",
"in",
"apps",
".",
"get_app_config",
"(",
"app_label",
")",
".",
"get_models",
"(",
")",
"if",
"m",
"not",
"in",
"EXCLUDED_MODELS",
"]",
"return",
"models"
] | 36.3125
| 20.9375
|
def get_uuid_list(dbconn):
"""
Get a list of tables that exist in dbconn
:param dbconn: master database connection
:return: List of uuids in the database
"""
cur = dbconn.cursor()
tables = get_table_list(dbconn)
uuids = set()
for table in tables:
cur.execute("SELECT (UUID) FROM '{table}'".format(table=table))
uuid = set([i[0] for i in cur.fetchall()])
if uuid:
uuids.update(uuid)
return uuids
|
[
"def",
"get_uuid_list",
"(",
"dbconn",
")",
":",
"cur",
"=",
"dbconn",
".",
"cursor",
"(",
")",
"tables",
"=",
"get_table_list",
"(",
"dbconn",
")",
"uuids",
"=",
"set",
"(",
")",
"for",
"table",
"in",
"tables",
":",
"cur",
".",
"execute",
"(",
"\"SELECT (UUID) FROM '{table}'\"",
".",
"format",
"(",
"table",
"=",
"table",
")",
")",
"uuid",
"=",
"set",
"(",
"[",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"cur",
".",
"fetchall",
"(",
")",
"]",
")",
"if",
"uuid",
":",
"uuids",
".",
"update",
"(",
"uuid",
")",
"return",
"uuids"
] | 30.4
| 12.266667
|
def node_to_complex_fault_geometry(node):
"""
Reads a complex fault geometry node and returns an
"""
assert "complexFaultGeometry" in node.tag
intermediate_edges = []
for subnode in node.nodes:
if "faultTopEdge" in subnode.tag:
top_edge = linestring_node_to_line(subnode.nodes[0],
with_depth=True)
elif "intermediateEdge" in subnode.tag:
int_edge = linestring_node_to_line(subnode.nodes[0],
with_depth=True)
intermediate_edges.append(int_edge)
elif "faultBottomEdge" in subnode.tag:
bottom_edge = linestring_node_to_line(subnode.nodes[0],
with_depth=True)
else:
# Redundent
pass
return [top_edge] + intermediate_edges + [bottom_edge]
|
[
"def",
"node_to_complex_fault_geometry",
"(",
"node",
")",
":",
"assert",
"\"complexFaultGeometry\"",
"in",
"node",
".",
"tag",
"intermediate_edges",
"=",
"[",
"]",
"for",
"subnode",
"in",
"node",
".",
"nodes",
":",
"if",
"\"faultTopEdge\"",
"in",
"subnode",
".",
"tag",
":",
"top_edge",
"=",
"linestring_node_to_line",
"(",
"subnode",
".",
"nodes",
"[",
"0",
"]",
",",
"with_depth",
"=",
"True",
")",
"elif",
"\"intermediateEdge\"",
"in",
"subnode",
".",
"tag",
":",
"int_edge",
"=",
"linestring_node_to_line",
"(",
"subnode",
".",
"nodes",
"[",
"0",
"]",
",",
"with_depth",
"=",
"True",
")",
"intermediate_edges",
".",
"append",
"(",
"int_edge",
")",
"elif",
"\"faultBottomEdge\"",
"in",
"subnode",
".",
"tag",
":",
"bottom_edge",
"=",
"linestring_node_to_line",
"(",
"subnode",
".",
"nodes",
"[",
"0",
"]",
",",
"with_depth",
"=",
"True",
")",
"else",
":",
"# Redundent",
"pass",
"return",
"[",
"top_edge",
"]",
"+",
"intermediate_edges",
"+",
"[",
"bottom_edge",
"]"
] | 42.333333
| 14.142857
|
def get_arg_parse_arguments(self):
"""
During the element declaration, all configuration file requirements
and all cli requirements have been described once.
This method will build a dict containing all argparse options.
It can be used to feed argparse.ArgumentParser.
You does not need to have multiple declarations.
"""
ret = dict()
if self._required:
if self.value is not None:
ret["default"] = self.value
else:
ret["required"] = True
ret["dest"] = self._name
if not self.e_type_exclude:
if self.e_type == int or self.e_type == float:
# Just override argparse.add_argument 'type' parameter for int or float.
ret["type"] = self.e_type
if self.value is not None:
ret["default"] = self.value
if self._desc:
ret["help"] = self._desc
return ret
|
[
"def",
"get_arg_parse_arguments",
"(",
"self",
")",
":",
"ret",
"=",
"dict",
"(",
")",
"if",
"self",
".",
"_required",
":",
"if",
"self",
".",
"value",
"is",
"not",
"None",
":",
"ret",
"[",
"\"default\"",
"]",
"=",
"self",
".",
"value",
"else",
":",
"ret",
"[",
"\"required\"",
"]",
"=",
"True",
"ret",
"[",
"\"dest\"",
"]",
"=",
"self",
".",
"_name",
"if",
"not",
"self",
".",
"e_type_exclude",
":",
"if",
"self",
".",
"e_type",
"==",
"int",
"or",
"self",
".",
"e_type",
"==",
"float",
":",
"# Just override argparse.add_argument 'type' parameter for int or float.",
"ret",
"[",
"\"type\"",
"]",
"=",
"self",
".",
"e_type",
"if",
"self",
".",
"value",
"is",
"not",
"None",
":",
"ret",
"[",
"\"default\"",
"]",
"=",
"self",
".",
"value",
"if",
"self",
".",
"_desc",
":",
"ret",
"[",
"\"help\"",
"]",
"=",
"self",
".",
"_desc",
"return",
"ret"
] | 38.2
| 14.2
|
def ToURN(self):
"""Converts a reference into an URN."""
if self.path_type in [PathInfo.PathType.OS, PathInfo.PathType.TSK]:
return rdfvalue.RDFURN(self.client_id).Add("fs").Add(
self.path_type.name.lower()).Add("/".join(self.path_components))
elif self.path_type == PathInfo.PathType.REGISTRY:
return rdfvalue.RDFURN(self.client_id).Add("registry").Add("/".join(
self.path_components))
elif self.path_type == PathInfo.PathType.TEMP:
return rdfvalue.RDFURN(self.client_id).Add("temp").Add("/".join(
self.path_components))
raise ValueError("Unsupported path type: %s" % self.path_type)
|
[
"def",
"ToURN",
"(",
"self",
")",
":",
"if",
"self",
".",
"path_type",
"in",
"[",
"PathInfo",
".",
"PathType",
".",
"OS",
",",
"PathInfo",
".",
"PathType",
".",
"TSK",
"]",
":",
"return",
"rdfvalue",
".",
"RDFURN",
"(",
"self",
".",
"client_id",
")",
".",
"Add",
"(",
"\"fs\"",
")",
".",
"Add",
"(",
"self",
".",
"path_type",
".",
"name",
".",
"lower",
"(",
")",
")",
".",
"Add",
"(",
"\"/\"",
".",
"join",
"(",
"self",
".",
"path_components",
")",
")",
"elif",
"self",
".",
"path_type",
"==",
"PathInfo",
".",
"PathType",
".",
"REGISTRY",
":",
"return",
"rdfvalue",
".",
"RDFURN",
"(",
"self",
".",
"client_id",
")",
".",
"Add",
"(",
"\"registry\"",
")",
".",
"Add",
"(",
"\"/\"",
".",
"join",
"(",
"self",
".",
"path_components",
")",
")",
"elif",
"self",
".",
"path_type",
"==",
"PathInfo",
".",
"PathType",
".",
"TEMP",
":",
"return",
"rdfvalue",
".",
"RDFURN",
"(",
"self",
".",
"client_id",
")",
".",
"Add",
"(",
"\"temp\"",
")",
".",
"Add",
"(",
"\"/\"",
".",
"join",
"(",
"self",
".",
"path_components",
")",
")",
"raise",
"ValueError",
"(",
"\"Unsupported path type: %s\"",
"%",
"self",
".",
"path_type",
")"
] | 45.785714
| 22.714286
|
def delete_database(self, database_name):
"""
Deletes an existing database in CosmosDB.
"""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
self.get_conn().DeleteDatabase(get_database_link(database_name))
|
[
"def",
"delete_database",
"(",
"self",
",",
"database_name",
")",
":",
"if",
"database_name",
"is",
"None",
":",
"raise",
"AirflowBadRequest",
"(",
"\"Database name cannot be None.\"",
")",
"self",
".",
"get_conn",
"(",
")",
".",
"DeleteDatabase",
"(",
"get_database_link",
"(",
"database_name",
")",
")"
] | 35.625
| 14.625
|
def _initApplicationList(self):
"""Query Asterisk Manager Interface to initialize internal list of
available applications.
CLI Command - core show applications
"""
if self.checkVersion('1.4'):
cmd = "core show applications"
else:
cmd = "show applications"
cmdresp = self.executeCommand(cmd)
self._applications = set()
for line in cmdresp.splitlines()[1:-1]:
mobj = re.match('\s*(\S+):', line)
if mobj:
self._applications.add(mobj.group(1).lower())
|
[
"def",
"_initApplicationList",
"(",
"self",
")",
":",
"if",
"self",
".",
"checkVersion",
"(",
"'1.4'",
")",
":",
"cmd",
"=",
"\"core show applications\"",
"else",
":",
"cmd",
"=",
"\"show applications\"",
"cmdresp",
"=",
"self",
".",
"executeCommand",
"(",
"cmd",
")",
"self",
".",
"_applications",
"=",
"set",
"(",
")",
"for",
"line",
"in",
"cmdresp",
".",
"splitlines",
"(",
")",
"[",
"1",
":",
"-",
"1",
"]",
":",
"mobj",
"=",
"re",
".",
"match",
"(",
"'\\s*(\\S+):'",
",",
"line",
")",
"if",
"mobj",
":",
"self",
".",
"_applications",
".",
"add",
"(",
"mobj",
".",
"group",
"(",
"1",
")",
".",
"lower",
"(",
")",
")"
] | 34.470588
| 10.823529
|
def tracker(obj):
"""Returns the :class:`Instance` of the specified object if it is one that
we track by default.
Args:
obj (object): any python object passed as an argument to a method.
Returns:
Instance: if the object is trackable, the Instance instance of
that object; else None.
"""
import types as typ
global oids, uuids
import six
from inspect import isclass
untracked = (six.string_types, six.integer_types, float,
complex, six.text_type)
semitrack = (list, dict, set, tuple)
if six.PY3: # pragma: no cover
semitrack = semitrack + (range, filter, map)
if (isinstance(obj, semitrack) and
all([isinstance(t, untracked) for t in obj])):
if len(obj) > 0:
semiform = "{0} len={1:d} min={2} max={3}"
return semiform.format(type(obj), len(obj), min(obj), max(obj))
else:
semiform = "{0} len={1:d}"
return semiform.format(type(obj), len(obj))
elif isinstance(obj, semitrack):
#We have to run the tracker on each of the elements in the list, set,
#dict or tuple; this is necessary so that we can keep track of
#subsequent calls made with unpacked parts of the tuple.
result = []
#If we get a list of 10K tuples (like plot points in matplotlib), then
#this pollutes the database. So, we restrict the maximum size of complex
#lists to be 5; we track the first 5 objects and then store a summary of
#the remaining information.
for o in obj[0:min((len(obj), 5))]:
track = tracker(o)
if isinstance(track, Instance):
result.append(track.uuid)
else:
result.append(track)
if len(obj) > 5:
result.append("... ({0:d} items)".format(len(obj)))
return tuple(result)
elif isinstance(obj, slice):
return "slice({}, {}, {})".format(obj.start, obj.stop, obj.step)
elif type(obj) is type:
return obj.__name__
elif type(obj) is typ.LambdaType:
if hasattr(obj, "__fqdn__"):
#We need to get the actual fqdn of the object *before* it was
#decorated.
return obj.__fqdn__
else:
if six.PY2:
_code = obj.func_code
else: # pragma: no cover
_code = obj.__code__
return "lambda ({})".format(', '.join(_code.co_varnames))
elif type(obj) in [typ.FunctionType, typ.MethodType]: # pragma: no cover
return obj.__name__
elif not isinstance(obj, untracked):
#For many of the numpy/scipy methods, the result is a tuple of numpy
#arrays. In that case, we should maintain the tuple structure for
#descriptive purposes, but still return a tracker.
oid = id(obj)
if oid in oids:
result = oids[oid]
else:
result = Instance(oid, obj)
oids[oid] = result
uuids[result.uuid] = result
return result
else:
return None
|
[
"def",
"tracker",
"(",
"obj",
")",
":",
"import",
"types",
"as",
"typ",
"global",
"oids",
",",
"uuids",
"import",
"six",
"from",
"inspect",
"import",
"isclass",
"untracked",
"=",
"(",
"six",
".",
"string_types",
",",
"six",
".",
"integer_types",
",",
"float",
",",
"complex",
",",
"six",
".",
"text_type",
")",
"semitrack",
"=",
"(",
"list",
",",
"dict",
",",
"set",
",",
"tuple",
")",
"if",
"six",
".",
"PY3",
":",
"# pragma: no cover",
"semitrack",
"=",
"semitrack",
"+",
"(",
"range",
",",
"filter",
",",
"map",
")",
"if",
"(",
"isinstance",
"(",
"obj",
",",
"semitrack",
")",
"and",
"all",
"(",
"[",
"isinstance",
"(",
"t",
",",
"untracked",
")",
"for",
"t",
"in",
"obj",
"]",
")",
")",
":",
"if",
"len",
"(",
"obj",
")",
">",
"0",
":",
"semiform",
"=",
"\"{0} len={1:d} min={2} max={3}\"",
"return",
"semiform",
".",
"format",
"(",
"type",
"(",
"obj",
")",
",",
"len",
"(",
"obj",
")",
",",
"min",
"(",
"obj",
")",
",",
"max",
"(",
"obj",
")",
")",
"else",
":",
"semiform",
"=",
"\"{0} len={1:d}\"",
"return",
"semiform",
".",
"format",
"(",
"type",
"(",
"obj",
")",
",",
"len",
"(",
"obj",
")",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"semitrack",
")",
":",
"#We have to run the tracker on each of the elements in the list, set,",
"#dict or tuple; this is necessary so that we can keep track of",
"#subsequent calls made with unpacked parts of the tuple.",
"result",
"=",
"[",
"]",
"#If we get a list of 10K tuples (like plot points in matplotlib), then",
"#this pollutes the database. So, we restrict the maximum size of complex",
"#lists to be 5; we track the first 5 objects and then store a summary of",
"#the remaining information.",
"for",
"o",
"in",
"obj",
"[",
"0",
":",
"min",
"(",
"(",
"len",
"(",
"obj",
")",
",",
"5",
")",
")",
"]",
":",
"track",
"=",
"tracker",
"(",
"o",
")",
"if",
"isinstance",
"(",
"track",
",",
"Instance",
")",
":",
"result",
".",
"append",
"(",
"track",
".",
"uuid",
")",
"else",
":",
"result",
".",
"append",
"(",
"track",
")",
"if",
"len",
"(",
"obj",
")",
">",
"5",
":",
"result",
".",
"append",
"(",
"\"... ({0:d} items)\"",
".",
"format",
"(",
"len",
"(",
"obj",
")",
")",
")",
"return",
"tuple",
"(",
"result",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"slice",
")",
":",
"return",
"\"slice({}, {}, {})\"",
".",
"format",
"(",
"obj",
".",
"start",
",",
"obj",
".",
"stop",
",",
"obj",
".",
"step",
")",
"elif",
"type",
"(",
"obj",
")",
"is",
"type",
":",
"return",
"obj",
".",
"__name__",
"elif",
"type",
"(",
"obj",
")",
"is",
"typ",
".",
"LambdaType",
":",
"if",
"hasattr",
"(",
"obj",
",",
"\"__fqdn__\"",
")",
":",
"#We need to get the actual fqdn of the object *before* it was",
"#decorated.",
"return",
"obj",
".",
"__fqdn__",
"else",
":",
"if",
"six",
".",
"PY2",
":",
"_code",
"=",
"obj",
".",
"func_code",
"else",
":",
"# pragma: no cover",
"_code",
"=",
"obj",
".",
"__code__",
"return",
"\"lambda ({})\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"_code",
".",
"co_varnames",
")",
")",
"elif",
"type",
"(",
"obj",
")",
"in",
"[",
"typ",
".",
"FunctionType",
",",
"typ",
".",
"MethodType",
"]",
":",
"# pragma: no cover",
"return",
"obj",
".",
"__name__",
"elif",
"not",
"isinstance",
"(",
"obj",
",",
"untracked",
")",
":",
"#For many of the numpy/scipy methods, the result is a tuple of numpy",
"#arrays. In that case, we should maintain the tuple structure for",
"#descriptive purposes, but still return a tracker.",
"oid",
"=",
"id",
"(",
"obj",
")",
"if",
"oid",
"in",
"oids",
":",
"result",
"=",
"oids",
"[",
"oid",
"]",
"else",
":",
"result",
"=",
"Instance",
"(",
"oid",
",",
"obj",
")",
"oids",
"[",
"oid",
"]",
"=",
"result",
"uuids",
"[",
"result",
".",
"uuid",
"]",
"=",
"result",
"return",
"result",
"else",
":",
"return",
"None"
] | 37.085366
| 18.341463
|
def _prepare_orders(self, orders):
"""
Each order needs to have all it's details filled with default value,
or None, in case those are not already filled.
"""
for detail in PAYU_ORDER_DETAILS:
if not any([detail in order for order in orders]):
for order in orders:
order[detail] = PAYU_ORDER_DETAILS_DEFAULTS.get(detail, None)
return orders
|
[
"def",
"_prepare_orders",
"(",
"self",
",",
"orders",
")",
":",
"for",
"detail",
"in",
"PAYU_ORDER_DETAILS",
":",
"if",
"not",
"any",
"(",
"[",
"detail",
"in",
"order",
"for",
"order",
"in",
"orders",
"]",
")",
":",
"for",
"order",
"in",
"orders",
":",
"order",
"[",
"detail",
"]",
"=",
"PAYU_ORDER_DETAILS_DEFAULTS",
".",
"get",
"(",
"detail",
",",
"None",
")",
"return",
"orders"
] | 35.583333
| 18.583333
|
def make_lat_lons(cvects):
""" Convert from directional cosines to latitidue and longitude
Parameters
----------
cvects : directional cosine (i.e., x,y,z component) values
returns (np.ndarray(2,nsrc)) with the directional cosine (i.e., x,y,z component) values
"""
lats = np.degrees(np.arcsin(cvects[2]))
lons = np.degrees(np.arctan2(cvects[0], cvects[1]))
return np.hstack([lats, lons])
|
[
"def",
"make_lat_lons",
"(",
"cvects",
")",
":",
"lats",
"=",
"np",
".",
"degrees",
"(",
"np",
".",
"arcsin",
"(",
"cvects",
"[",
"2",
"]",
")",
")",
"lons",
"=",
"np",
".",
"degrees",
"(",
"np",
".",
"arctan2",
"(",
"cvects",
"[",
"0",
"]",
",",
"cvects",
"[",
"1",
"]",
")",
")",
"return",
"np",
".",
"hstack",
"(",
"[",
"lats",
",",
"lons",
"]",
")"
] | 34.416667
| 20.25
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.