code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def moving_hudson_fst(ac1, ac2, size, start=0, stop=None, step=None):
"""Estimate average Fst in moving windows over a single chromosome/contig,
following the method of Hudson (1992) elaborated by Bhatia et al. (2013).
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
size : int
The window size (number of variants).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
step : int, optional
The number of variants between start positions of windows. If not
given, defaults to the window size, i.e., non-overlapping windows.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
"""
# calculate per-variant values
num, den = hudson_fst(ac1, ac2, fill=np.nan)
# compute the numerator and denominator in moving windows
num_sum = moving_statistic(num, statistic=np.nansum, size=size,
start=start, stop=stop, step=step)
den_sum = moving_statistic(den, statistic=np.nansum, size=size,
start=start, stop=stop, step=step)
# calculate fst in each window
fst = num_sum / den_sum
return fst | def function[moving_hudson_fst, parameter[ac1, ac2, size, start, stop, step]]:
constant[Estimate average Fst in moving windows over a single chromosome/contig,
following the method of Hudson (1992) elaborated by Bhatia et al. (2013).
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
size : int
The window size (number of variants).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
step : int, optional
The number of variants between start positions of windows. If not
given, defaults to the window size, i.e., non-overlapping windows.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
]
<ast.Tuple object at 0x7da2041d8490> assign[=] call[name[hudson_fst], parameter[name[ac1], name[ac2]]]
variable[num_sum] assign[=] call[name[moving_statistic], parameter[name[num]]]
variable[den_sum] assign[=] call[name[moving_statistic], parameter[name[den]]]
variable[fst] assign[=] binary_operation[name[num_sum] / name[den_sum]]
return[name[fst]] | keyword[def] identifier[moving_hudson_fst] ( identifier[ac1] , identifier[ac2] , identifier[size] , identifier[start] = literal[int] , identifier[stop] = keyword[None] , identifier[step] = keyword[None] ):
literal[string]
identifier[num] , identifier[den] = identifier[hudson_fst] ( identifier[ac1] , identifier[ac2] , identifier[fill] = identifier[np] . identifier[nan] )
identifier[num_sum] = identifier[moving_statistic] ( identifier[num] , identifier[statistic] = identifier[np] . identifier[nansum] , identifier[size] = identifier[size] ,
identifier[start] = identifier[start] , identifier[stop] = identifier[stop] , identifier[step] = identifier[step] )
identifier[den_sum] = identifier[moving_statistic] ( identifier[den] , identifier[statistic] = identifier[np] . identifier[nansum] , identifier[size] = identifier[size] ,
identifier[start] = identifier[start] , identifier[stop] = identifier[stop] , identifier[step] = identifier[step] )
identifier[fst] = identifier[num_sum] / identifier[den_sum]
keyword[return] identifier[fst] | def moving_hudson_fst(ac1, ac2, size, start=0, stop=None, step=None):
"""Estimate average Fst in moving windows over a single chromosome/contig,
following the method of Hudson (1992) elaborated by Bhatia et al. (2013).
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
size : int
The window size (number of variants).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
step : int, optional
The number of variants between start positions of windows. If not
given, defaults to the window size, i.e., non-overlapping windows.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
"""
# calculate per-variant values
(num, den) = hudson_fst(ac1, ac2, fill=np.nan)
# compute the numerator and denominator in moving windows
num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step)
den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step)
# calculate fst in each window
fst = num_sum / den_sum
return fst |
def by_housing_units(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.housing_units.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by house of units.
"""
return self.query(
housing_units_lower=lower,
housing_units_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | def function[by_housing_units, parameter[self, lower, upper, zipcode_type, sort_by, ascending, returns]]:
constant[
Search zipcode information by house of units.
]
return[call[name[self].query, parameter[]]] | keyword[def] identifier[by_housing_units] ( identifier[self] ,
identifier[lower] =- literal[int] ,
identifier[upper] = literal[int] ** literal[int] ,
identifier[zipcode_type] = identifier[ZipcodeType] . identifier[Standard] ,
identifier[sort_by] = identifier[SimpleZipcode] . identifier[housing_units] . identifier[name] ,
identifier[ascending] = keyword[False] ,
identifier[returns] = identifier[DEFAULT_LIMIT] ):
literal[string]
keyword[return] identifier[self] . identifier[query] (
identifier[housing_units_lower] = identifier[lower] ,
identifier[housing_units_upper] = identifier[upper] ,
identifier[sort_by] = identifier[sort_by] , identifier[zipcode_type] = identifier[zipcode_type] ,
identifier[ascending] = identifier[ascending] , identifier[returns] = identifier[returns] ,
) | def by_housing_units(self, lower=-1, upper=2 ** 31, zipcode_type=ZipcodeType.Standard, sort_by=SimpleZipcode.housing_units.name, ascending=False, returns=DEFAULT_LIMIT):
"""
Search zipcode information by house of units.
"""
return self.query(housing_units_lower=lower, housing_units_upper=upper, sort_by=sort_by, zipcode_type=zipcode_type, ascending=ascending, returns=returns) |
def __header(self, line):
"""Build the header (contain the number of CPU).
CPU0 CPU1 CPU2 CPU3
0: 21 0 0 0 IO-APIC 2-edge timer
"""
self.cpu_number = len(line.split())
return self.cpu_number | def function[__header, parameter[self, line]]:
constant[Build the header (contain the number of CPU).
CPU0 CPU1 CPU2 CPU3
0: 21 0 0 0 IO-APIC 2-edge timer
]
name[self].cpu_number assign[=] call[name[len], parameter[call[name[line].split, parameter[]]]]
return[name[self].cpu_number] | keyword[def] identifier[__header] ( identifier[self] , identifier[line] ):
literal[string]
identifier[self] . identifier[cpu_number] = identifier[len] ( identifier[line] . identifier[split] ())
keyword[return] identifier[self] . identifier[cpu_number] | def __header(self, line):
"""Build the header (contain the number of CPU).
CPU0 CPU1 CPU2 CPU3
0: 21 0 0 0 IO-APIC 2-edge timer
"""
self.cpu_number = len(line.split())
return self.cpu_number |
def addSRNLayers(self, inc, hidc, outc):
"""
Wraps SRN.addThreeLayers() for compatibility.
"""
self.addThreeLayers(inc, hidc, outc) | def function[addSRNLayers, parameter[self, inc, hidc, outc]]:
constant[
Wraps SRN.addThreeLayers() for compatibility.
]
call[name[self].addThreeLayers, parameter[name[inc], name[hidc], name[outc]]] | keyword[def] identifier[addSRNLayers] ( identifier[self] , identifier[inc] , identifier[hidc] , identifier[outc] ):
literal[string]
identifier[self] . identifier[addThreeLayers] ( identifier[inc] , identifier[hidc] , identifier[outc] ) | def addSRNLayers(self, inc, hidc, outc):
"""
Wraps SRN.addThreeLayers() for compatibility.
"""
self.addThreeLayers(inc, hidc, outc) |
def appname(path=None):
"""
Return a useful application name based on the program argument.
A special case maps 'mod_wsgi' to a more appropriate name so
web applications show up as our own.
"""
if path is None:
path = sys.argv[0]
name = os.path.basename(os.path.splitext(path)[0])
if name == 'mod_wsgi':
name = 'nvn_web' # pragma: no cover
return name | def function[appname, parameter[path]]:
constant[
Return a useful application name based on the program argument.
A special case maps 'mod_wsgi' to a more appropriate name so
web applications show up as our own.
]
if compare[name[path] is constant[None]] begin[:]
variable[path] assign[=] call[name[sys].argv][constant[0]]
variable[name] assign[=] call[name[os].path.basename, parameter[call[call[name[os].path.splitext, parameter[name[path]]]][constant[0]]]]
if compare[name[name] equal[==] constant[mod_wsgi]] begin[:]
variable[name] assign[=] constant[nvn_web]
return[name[name]] | keyword[def] identifier[appname] ( identifier[path] = keyword[None] ):
literal[string]
keyword[if] identifier[path] keyword[is] keyword[None] :
identifier[path] = identifier[sys] . identifier[argv] [ literal[int] ]
identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[os] . identifier[path] . identifier[splitext] ( identifier[path] )[ literal[int] ])
keyword[if] identifier[name] == literal[string] :
identifier[name] = literal[string]
keyword[return] identifier[name] | def appname(path=None):
"""
Return a useful application name based on the program argument.
A special case maps 'mod_wsgi' to a more appropriate name so
web applications show up as our own.
"""
if path is None:
path = sys.argv[0] # depends on [control=['if'], data=['path']]
name = os.path.basename(os.path.splitext(path)[0])
if name == 'mod_wsgi':
name = 'nvn_web' # pragma: no cover # depends on [control=['if'], data=['name']]
return name |
def make_c_args(arg_pairs):
"""
Build a C argument list from return type and arguments pairs.
"""
logging.debug(arg_pairs)
c_args = [
'{} {}'.format(arg_type, arg_name) if arg_name else arg_type
for dummy_number, arg_type, arg_name in sorted(arg_pairs)
]
return ', '.join(c_args) | def function[make_c_args, parameter[arg_pairs]]:
constant[
Build a C argument list from return type and arguments pairs.
]
call[name[logging].debug, parameter[name[arg_pairs]]]
variable[c_args] assign[=] <ast.ListComp object at 0x7da1b1eef0d0>
return[call[constant[, ].join, parameter[name[c_args]]]] | keyword[def] identifier[make_c_args] ( identifier[arg_pairs] ):
literal[string]
identifier[logging] . identifier[debug] ( identifier[arg_pairs] )
identifier[c_args] =[
literal[string] . identifier[format] ( identifier[arg_type] , identifier[arg_name] ) keyword[if] identifier[arg_name] keyword[else] identifier[arg_type]
keyword[for] identifier[dummy_number] , identifier[arg_type] , identifier[arg_name] keyword[in] identifier[sorted] ( identifier[arg_pairs] )
]
keyword[return] literal[string] . identifier[join] ( identifier[c_args] ) | def make_c_args(arg_pairs):
"""
Build a C argument list from return type and arguments pairs.
"""
logging.debug(arg_pairs)
c_args = ['{} {}'.format(arg_type, arg_name) if arg_name else arg_type for (dummy_number, arg_type, arg_name) in sorted(arg_pairs)]
return ', '.join(c_args) |
def parse_summary(content, reference_id=None):
"""\
Extracts the summary from the `content` of the cable.
If no summary can be found, ``None`` is returned.
`content`
The content of the cable.
`reference_id`
The reference identifier of the cable.
"""
summary = None
m = _END_SUMMARY_PATTERN.search(content)
if m:
end_of_summary = m.start()
m = _START_SUMMARY_PATTERN.search(content, 0, end_of_summary) or _ALTERNATIVE_START_SUMMARY_PATTERN.search(content, 0, end_of_summary)
if m:
summary = content[m.end():end_of_summary]
elif reference_id not in _CABLES_WITH_MALFORMED_SUMMARY:
logger.debug('Found "end of summary" but no start in "%s", content: "%s"' % (reference_id, content[:end_of_summary]))
else:
m = _PARSE_SUMMARY_PATTERN.search(content)
if m:
summary = content[m.start(1):m.end(1)]
if summary:
summary = _CLEAN_SUMMARY_CLS_PATTERN.sub(u'', summary)
summary = _CLEAN_SUMMARY_PATTERN.sub(u' ', summary)
summary = _CLEAN_SUMMARY_WS_PATTERN.sub(u' ', summary)
summary = summary.strip()
return summary | def function[parse_summary, parameter[content, reference_id]]:
constant[ Extracts the summary from the `content` of the cable.
If no summary can be found, ``None`` is returned.
`content`
The content of the cable.
`reference_id`
The reference identifier of the cable.
]
variable[summary] assign[=] constant[None]
variable[m] assign[=] call[name[_END_SUMMARY_PATTERN].search, parameter[name[content]]]
if name[m] begin[:]
variable[end_of_summary] assign[=] call[name[m].start, parameter[]]
variable[m] assign[=] <ast.BoolOp object at 0x7da2054a4910>
if name[m] begin[:]
variable[summary] assign[=] call[name[content]][<ast.Slice object at 0x7da2054a7700>]
if name[summary] begin[:]
variable[summary] assign[=] call[name[_CLEAN_SUMMARY_CLS_PATTERN].sub, parameter[constant[], name[summary]]]
variable[summary] assign[=] call[name[_CLEAN_SUMMARY_PATTERN].sub, parameter[constant[ ], name[summary]]]
variable[summary] assign[=] call[name[_CLEAN_SUMMARY_WS_PATTERN].sub, parameter[constant[ ], name[summary]]]
variable[summary] assign[=] call[name[summary].strip, parameter[]]
return[name[summary]] | keyword[def] identifier[parse_summary] ( identifier[content] , identifier[reference_id] = keyword[None] ):
literal[string]
identifier[summary] = keyword[None]
identifier[m] = identifier[_END_SUMMARY_PATTERN] . identifier[search] ( identifier[content] )
keyword[if] identifier[m] :
identifier[end_of_summary] = identifier[m] . identifier[start] ()
identifier[m] = identifier[_START_SUMMARY_PATTERN] . identifier[search] ( identifier[content] , literal[int] , identifier[end_of_summary] ) keyword[or] identifier[_ALTERNATIVE_START_SUMMARY_PATTERN] . identifier[search] ( identifier[content] , literal[int] , identifier[end_of_summary] )
keyword[if] identifier[m] :
identifier[summary] = identifier[content] [ identifier[m] . identifier[end] (): identifier[end_of_summary] ]
keyword[elif] identifier[reference_id] keyword[not] keyword[in] identifier[_CABLES_WITH_MALFORMED_SUMMARY] :
identifier[logger] . identifier[debug] ( literal[string] %( identifier[reference_id] , identifier[content] [: identifier[end_of_summary] ]))
keyword[else] :
identifier[m] = identifier[_PARSE_SUMMARY_PATTERN] . identifier[search] ( identifier[content] )
keyword[if] identifier[m] :
identifier[summary] = identifier[content] [ identifier[m] . identifier[start] ( literal[int] ): identifier[m] . identifier[end] ( literal[int] )]
keyword[if] identifier[summary] :
identifier[summary] = identifier[_CLEAN_SUMMARY_CLS_PATTERN] . identifier[sub] ( literal[string] , identifier[summary] )
identifier[summary] = identifier[_CLEAN_SUMMARY_PATTERN] . identifier[sub] ( literal[string] , identifier[summary] )
identifier[summary] = identifier[_CLEAN_SUMMARY_WS_PATTERN] . identifier[sub] ( literal[string] , identifier[summary] )
identifier[summary] = identifier[summary] . identifier[strip] ()
keyword[return] identifier[summary] | def parse_summary(content, reference_id=None):
""" Extracts the summary from the `content` of the cable.
If no summary can be found, ``None`` is returned.
`content`
The content of the cable.
`reference_id`
The reference identifier of the cable.
"""
summary = None
m = _END_SUMMARY_PATTERN.search(content)
if m:
end_of_summary = m.start()
m = _START_SUMMARY_PATTERN.search(content, 0, end_of_summary) or _ALTERNATIVE_START_SUMMARY_PATTERN.search(content, 0, end_of_summary)
if m:
summary = content[m.end():end_of_summary] # depends on [control=['if'], data=[]]
elif reference_id not in _CABLES_WITH_MALFORMED_SUMMARY:
logger.debug('Found "end of summary" but no start in "%s", content: "%s"' % (reference_id, content[:end_of_summary])) # depends on [control=['if'], data=['reference_id']] # depends on [control=['if'], data=[]]
else:
m = _PARSE_SUMMARY_PATTERN.search(content)
if m:
summary = content[m.start(1):m.end(1)] # depends on [control=['if'], data=[]]
if summary:
summary = _CLEAN_SUMMARY_CLS_PATTERN.sub(u'', summary)
summary = _CLEAN_SUMMARY_PATTERN.sub(u' ', summary)
summary = _CLEAN_SUMMARY_WS_PATTERN.sub(u' ', summary)
summary = summary.strip() # depends on [control=['if'], data=[]]
return summary |
async def execute(self, sql, *args):
"""Create a new Cursor object, call its execute method, and return it.
See Cursor.execute for more details.This is a convenience method
that is not part of the DB API. Since a new Cursor is allocated
by each call, this should not be used if more than one SQL
statement needs to be executed.
:param sql: str, formated sql statement
:param args: tuple, arguments for construction of sql statement
"""
_cursor = await self._execute(self._conn.execute, sql, *args)
connection = self
cursor = Cursor(_cursor, connection, echo=self._echo)
return cursor | <ast.AsyncFunctionDef object at 0x7da18f723310> | keyword[async] keyword[def] identifier[execute] ( identifier[self] , identifier[sql] ,* identifier[args] ):
literal[string]
identifier[_cursor] = keyword[await] identifier[self] . identifier[_execute] ( identifier[self] . identifier[_conn] . identifier[execute] , identifier[sql] ,* identifier[args] )
identifier[connection] = identifier[self]
identifier[cursor] = identifier[Cursor] ( identifier[_cursor] , identifier[connection] , identifier[echo] = identifier[self] . identifier[_echo] )
keyword[return] identifier[cursor] | async def execute(self, sql, *args):
"""Create a new Cursor object, call its execute method, and return it.
See Cursor.execute for more details.This is a convenience method
that is not part of the DB API. Since a new Cursor is allocated
by each call, this should not be used if more than one SQL
statement needs to be executed.
:param sql: str, formated sql statement
:param args: tuple, arguments for construction of sql statement
"""
_cursor = await self._execute(self._conn.execute, sql, *args)
connection = self
cursor = Cursor(_cursor, connection, echo=self._echo)
return cursor |
def synthesize(vers, opts):
""" Synthesize a capability enabled version response
This is a very limited emulation for relatively recent feature sets
"""
parsed_version = parse_version(vers["version"])
vers["capabilities"] = {}
for name in opts["optional"]:
vers["capabilities"][name] = check(parsed_version, name)
failed = False # noqa: F841 T25377293 Grandfathered in
for name in opts["required"]:
have = check(parsed_version, name)
vers["capabilities"][name] = have
if not have:
vers["error"] = (
"client required capability `"
+ name
+ "` is not supported by this server"
)
return vers | def function[synthesize, parameter[vers, opts]]:
constant[ Synthesize a capability enabled version response
This is a very limited emulation for relatively recent feature sets
]
variable[parsed_version] assign[=] call[name[parse_version], parameter[call[name[vers]][constant[version]]]]
call[name[vers]][constant[capabilities]] assign[=] dictionary[[], []]
for taget[name[name]] in starred[call[name[opts]][constant[optional]]] begin[:]
call[call[name[vers]][constant[capabilities]]][name[name]] assign[=] call[name[check], parameter[name[parsed_version], name[name]]]
variable[failed] assign[=] constant[False]
for taget[name[name]] in starred[call[name[opts]][constant[required]]] begin[:]
variable[have] assign[=] call[name[check], parameter[name[parsed_version], name[name]]]
call[call[name[vers]][constant[capabilities]]][name[name]] assign[=] name[have]
if <ast.UnaryOp object at 0x7da18c4cec50> begin[:]
call[name[vers]][constant[error]] assign[=] binary_operation[binary_operation[constant[client required capability `] + name[name]] + constant[` is not supported by this server]]
return[name[vers]] | keyword[def] identifier[synthesize] ( identifier[vers] , identifier[opts] ):
literal[string]
identifier[parsed_version] = identifier[parse_version] ( identifier[vers] [ literal[string] ])
identifier[vers] [ literal[string] ]={}
keyword[for] identifier[name] keyword[in] identifier[opts] [ literal[string] ]:
identifier[vers] [ literal[string] ][ identifier[name] ]= identifier[check] ( identifier[parsed_version] , identifier[name] )
identifier[failed] = keyword[False]
keyword[for] identifier[name] keyword[in] identifier[opts] [ literal[string] ]:
identifier[have] = identifier[check] ( identifier[parsed_version] , identifier[name] )
identifier[vers] [ literal[string] ][ identifier[name] ]= identifier[have]
keyword[if] keyword[not] identifier[have] :
identifier[vers] [ literal[string] ]=(
literal[string]
+ identifier[name]
+ literal[string]
)
keyword[return] identifier[vers] | def synthesize(vers, opts):
""" Synthesize a capability enabled version response
This is a very limited emulation for relatively recent feature sets
"""
parsed_version = parse_version(vers['version'])
vers['capabilities'] = {}
for name in opts['optional']:
vers['capabilities'][name] = check(parsed_version, name) # depends on [control=['for'], data=['name']]
failed = False # noqa: F841 T25377293 Grandfathered in
for name in opts['required']:
have = check(parsed_version, name)
vers['capabilities'][name] = have
if not have:
vers['error'] = 'client required capability `' + name + '` is not supported by this server' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
return vers |
def as_proj4(self):
"""
Return the PROJ.4 string which corresponds to the CRS.
For example::
>>> print(get(21781).as_proj4())
+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 \
+k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel \
+towgs84=674.4,15.1,405.3,0,0,0,0 +units=m +no_defs
"""
url = '{prefix}{code}.proj4?download'.format(prefix=EPSG_IO_URL,
code=self.id)
return requests.get(url).text.strip() | def function[as_proj4, parameter[self]]:
constant[
Return the PROJ.4 string which corresponds to the CRS.
For example::
>>> print(get(21781).as_proj4())
+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel +towgs84=674.4,15.1,405.3,0,0,0,0 +units=m +no_defs
]
variable[url] assign[=] call[constant[{prefix}{code}.proj4?download].format, parameter[]]
return[call[call[name[requests].get, parameter[name[url]]].text.strip, parameter[]]] | keyword[def] identifier[as_proj4] ( identifier[self] ):
literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[prefix] = identifier[EPSG_IO_URL] ,
identifier[code] = identifier[self] . identifier[id] )
keyword[return] identifier[requests] . identifier[get] ( identifier[url] ). identifier[text] . identifier[strip] () | def as_proj4(self):
"""
Return the PROJ.4 string which corresponds to the CRS.
For example::
>>> print(get(21781).as_proj4())
+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel +towgs84=674.4,15.1,405.3,0,0,0,0 +units=m +no_defs
"""
url = '{prefix}{code}.proj4?download'.format(prefix=EPSG_IO_URL, code=self.id)
return requests.get(url).text.strip() |
def add_answer_at_time(self, record, now):
"""Adds an answer if if does not expire by a certain time"""
if record is not None:
if now == 0 or not record.is_expired(now):
self.answers.append((record, now))
if record.rrsig is not None:
self.answers.append((record.rrsig, now)) | def function[add_answer_at_time, parameter[self, record, now]]:
constant[Adds an answer if if does not expire by a certain time]
if compare[name[record] is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da18fe926e0> begin[:]
call[name[self].answers.append, parameter[tuple[[<ast.Name object at 0x7da2041dbd00>, <ast.Name object at 0x7da2041da680>]]]]
if compare[name[record].rrsig is_not constant[None]] begin[:]
call[name[self].answers.append, parameter[tuple[[<ast.Attribute object at 0x7da2041d8eb0>, <ast.Name object at 0x7da2041daf80>]]]] | keyword[def] identifier[add_answer_at_time] ( identifier[self] , identifier[record] , identifier[now] ):
literal[string]
keyword[if] identifier[record] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[now] == literal[int] keyword[or] keyword[not] identifier[record] . identifier[is_expired] ( identifier[now] ):
identifier[self] . identifier[answers] . identifier[append] (( identifier[record] , identifier[now] ))
keyword[if] identifier[record] . identifier[rrsig] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[answers] . identifier[append] (( identifier[record] . identifier[rrsig] , identifier[now] )) | def add_answer_at_time(self, record, now):
"""Adds an answer if if does not expire by a certain time"""
if record is not None:
if now == 0 or not record.is_expired(now):
self.answers.append((record, now))
if record.rrsig is not None:
self.answers.append((record.rrsig, now)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['record']] |
def touch(self, name, data):
"""
Create a 'file' analog called 'name' and put 'data' to the d_data dictionary
under key 'name'.
The 'name' can contain a path specifier.
"""
b_OK = True
str_here = self.cwd()
# print("here!")
# print(self.snode_current)
# print(self.snode_current.d_nodes)
l_path = name.split('/')
if len(l_path) > 1:
self.cd('/'.join(l_path[0:-1]))
name = l_path[-1]
self.snode_current.d_data[name] = data
# print(self.snode_current)
self.cd(str_here)
return b_OK | def function[touch, parameter[self, name, data]]:
constant[
Create a 'file' analog called 'name' and put 'data' to the d_data dictionary
under key 'name'.
The 'name' can contain a path specifier.
]
variable[b_OK] assign[=] constant[True]
variable[str_here] assign[=] call[name[self].cwd, parameter[]]
variable[l_path] assign[=] call[name[name].split, parameter[constant[/]]]
if compare[call[name[len], parameter[name[l_path]]] greater[>] constant[1]] begin[:]
call[name[self].cd, parameter[call[constant[/].join, parameter[call[name[l_path]][<ast.Slice object at 0x7da18bcc94b0>]]]]]
variable[name] assign[=] call[name[l_path]][<ast.UnaryOp object at 0x7da18bcca560>]
call[name[self].snode_current.d_data][name[name]] assign[=] name[data]
call[name[self].cd, parameter[name[str_here]]]
return[name[b_OK]] | keyword[def] identifier[touch] ( identifier[self] , identifier[name] , identifier[data] ):
literal[string]
identifier[b_OK] = keyword[True]
identifier[str_here] = identifier[self] . identifier[cwd] ()
identifier[l_path] = identifier[name] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[l_path] )> literal[int] :
identifier[self] . identifier[cd] ( literal[string] . identifier[join] ( identifier[l_path] [ literal[int] :- literal[int] ]))
identifier[name] = identifier[l_path] [- literal[int] ]
identifier[self] . identifier[snode_current] . identifier[d_data] [ identifier[name] ]= identifier[data]
identifier[self] . identifier[cd] ( identifier[str_here] )
keyword[return] identifier[b_OK] | def touch(self, name, data):
"""
Create a 'file' analog called 'name' and put 'data' to the d_data dictionary
under key 'name'.
The 'name' can contain a path specifier.
"""
b_OK = True
str_here = self.cwd()
# print("here!")
# print(self.snode_current)
# print(self.snode_current.d_nodes)
l_path = name.split('/')
if len(l_path) > 1:
self.cd('/'.join(l_path[0:-1])) # depends on [control=['if'], data=[]]
name = l_path[-1]
self.snode_current.d_data[name] = data
# print(self.snode_current)
self.cd(str_here)
return b_OK |
def enable_mp_crash_reporting():
"""
Monkey-patch the multiprocessing.Process class with our own CrashReportingProcess.
Any subsequent imports of multiprocessing.Process will reference CrashReportingProcess instead.
This function must be called before any imports to mulitprocessing in order for the monkey-patching to work.
"""
global mp_crash_reporting_enabled
multiprocessing.Process = multiprocessing.process.Process = CrashReportingProcess
mp_crash_reporting_enabled = True | def function[enable_mp_crash_reporting, parameter[]]:
constant[
Monkey-patch the multiprocessing.Process class with our own CrashReportingProcess.
Any subsequent imports of multiprocessing.Process will reference CrashReportingProcess instead.
This function must be called before any imports to mulitprocessing in order for the monkey-patching to work.
]
<ast.Global object at 0x7da1b242c370>
name[multiprocessing].Process assign[=] name[CrashReportingProcess]
variable[mp_crash_reporting_enabled] assign[=] constant[True] | keyword[def] identifier[enable_mp_crash_reporting] ():
literal[string]
keyword[global] identifier[mp_crash_reporting_enabled]
identifier[multiprocessing] . identifier[Process] = identifier[multiprocessing] . identifier[process] . identifier[Process] = identifier[CrashReportingProcess]
identifier[mp_crash_reporting_enabled] = keyword[True] | def enable_mp_crash_reporting():
"""
Monkey-patch the multiprocessing.Process class with our own CrashReportingProcess.
Any subsequent imports of multiprocessing.Process will reference CrashReportingProcess instead.
This function must be called before any imports to mulitprocessing in order for the monkey-patching to work.
"""
global mp_crash_reporting_enabled
multiprocessing.Process = multiprocessing.process.Process = CrashReportingProcess
mp_crash_reporting_enabled = True |
def GetWindowText(handle: int) -> str:
"""
GetWindowText from Win32.
handle: int, the handle of a native window.
Return str.
"""
arrayType = ctypes.c_wchar * MAX_PATH
values = arrayType()
ctypes.windll.user32.GetWindowTextW(ctypes.c_void_p(handle), values, MAX_PATH)
return values.value | def function[GetWindowText, parameter[handle]]:
constant[
GetWindowText from Win32.
handle: int, the handle of a native window.
Return str.
]
variable[arrayType] assign[=] binary_operation[name[ctypes].c_wchar * name[MAX_PATH]]
variable[values] assign[=] call[name[arrayType], parameter[]]
call[name[ctypes].windll.user32.GetWindowTextW, parameter[call[name[ctypes].c_void_p, parameter[name[handle]]], name[values], name[MAX_PATH]]]
return[name[values].value] | keyword[def] identifier[GetWindowText] ( identifier[handle] : identifier[int] )-> identifier[str] :
literal[string]
identifier[arrayType] = identifier[ctypes] . identifier[c_wchar] * identifier[MAX_PATH]
identifier[values] = identifier[arrayType] ()
identifier[ctypes] . identifier[windll] . identifier[user32] . identifier[GetWindowTextW] ( identifier[ctypes] . identifier[c_void_p] ( identifier[handle] ), identifier[values] , identifier[MAX_PATH] )
keyword[return] identifier[values] . identifier[value] | def GetWindowText(handle: int) -> str:
"""
GetWindowText from Win32.
handle: int, the handle of a native window.
Return str.
"""
arrayType = ctypes.c_wchar * MAX_PATH
values = arrayType()
ctypes.windll.user32.GetWindowTextW(ctypes.c_void_p(handle), values, MAX_PATH)
return values.value |
def getQCAnalyses(self, qctype=None, review_state=None):
"""return the QC analyses performed in the worksheet in which, at
least, one sample of this AR is present.
Depending on qctype value, returns the analyses of:
- 'b': all Blank Reference Samples used in related worksheet/s
- 'c': all Control Reference Samples used in related worksheet/s
- 'd': duplicates only for samples contained in this AR
If qctype==None, returns all type of qc analyses mentioned above
"""
qcanalyses = []
suids = []
ans = self.getAnalyses()
wf = getToolByName(self, 'portal_workflow')
for an in ans:
an = an.getObject()
if an.getServiceUID() not in suids:
suids.append(an.getServiceUID())
def valid_dup(wan):
if wan.portal_type == 'ReferenceAnalysis':
return False
an_state = wf.getInfoFor(wan, 'review_state')
return \
wan.portal_type == 'DuplicateAnalysis' \
and wan.getRequestID() == self.id \
and (review_state is None or an_state in review_state)
def valid_ref(wan):
if wan.portal_type != 'ReferenceAnalysis':
return False
an_state = wf.getInfoFor(wan, 'review_state')
an_reftype = wan.getReferenceType()
return wan.getServiceUID() in suids \
and wan not in qcanalyses \
and (qctype is None or an_reftype == qctype) \
and (review_state is None or an_state in review_state)
for an in ans:
an = an.getObject()
ws = an.getWorksheet()
if not ws:
continue
was = ws.getAnalyses()
for wa in was:
if valid_dup(wa):
qcanalyses.append(wa)
elif valid_ref(wa):
qcanalyses.append(wa)
return qcanalyses | def function[getQCAnalyses, parameter[self, qctype, review_state]]:
constant[return the QC analyses performed in the worksheet in which, at
least, one sample of this AR is present.
Depending on qctype value, returns the analyses of:
- 'b': all Blank Reference Samples used in related worksheet/s
- 'c': all Control Reference Samples used in related worksheet/s
- 'd': duplicates only for samples contained in this AR
If qctype==None, returns all type of qc analyses mentioned above
]
variable[qcanalyses] assign[=] list[[]]
variable[suids] assign[=] list[[]]
variable[ans] assign[=] call[name[self].getAnalyses, parameter[]]
variable[wf] assign[=] call[name[getToolByName], parameter[name[self], constant[portal_workflow]]]
for taget[name[an]] in starred[name[ans]] begin[:]
variable[an] assign[=] call[name[an].getObject, parameter[]]
if compare[call[name[an].getServiceUID, parameter[]] <ast.NotIn object at 0x7da2590d7190> name[suids]] begin[:]
call[name[suids].append, parameter[call[name[an].getServiceUID, parameter[]]]]
def function[valid_dup, parameter[wan]]:
if compare[name[wan].portal_type equal[==] constant[ReferenceAnalysis]] begin[:]
return[constant[False]]
variable[an_state] assign[=] call[name[wf].getInfoFor, parameter[name[wan], constant[review_state]]]
return[<ast.BoolOp object at 0x7da18c4cd7e0>]
def function[valid_ref, parameter[wan]]:
if compare[name[wan].portal_type not_equal[!=] constant[ReferenceAnalysis]] begin[:]
return[constant[False]]
variable[an_state] assign[=] call[name[wf].getInfoFor, parameter[name[wan], constant[review_state]]]
variable[an_reftype] assign[=] call[name[wan].getReferenceType, parameter[]]
return[<ast.BoolOp object at 0x7da18c4cd180>]
for taget[name[an]] in starred[name[ans]] begin[:]
variable[an] assign[=] call[name[an].getObject, parameter[]]
variable[ws] assign[=] call[name[an].getWorksheet, parameter[]]
if <ast.UnaryOp object at 0x7da18f58db10> begin[:]
continue
variable[was] assign[=] call[name[ws].getAnalyses, parameter[]]
for taget[name[wa]] in starred[name[was]] begin[:]
if call[name[valid_dup], parameter[name[wa]]] begin[:]
call[name[qcanalyses].append, parameter[name[wa]]]
return[name[qcanalyses]] | keyword[def] identifier[getQCAnalyses] ( identifier[self] , identifier[qctype] = keyword[None] , identifier[review_state] = keyword[None] ):
literal[string]
identifier[qcanalyses] =[]
identifier[suids] =[]
identifier[ans] = identifier[self] . identifier[getAnalyses] ()
identifier[wf] = identifier[getToolByName] ( identifier[self] , literal[string] )
keyword[for] identifier[an] keyword[in] identifier[ans] :
identifier[an] = identifier[an] . identifier[getObject] ()
keyword[if] identifier[an] . identifier[getServiceUID] () keyword[not] keyword[in] identifier[suids] :
identifier[suids] . identifier[append] ( identifier[an] . identifier[getServiceUID] ())
keyword[def] identifier[valid_dup] ( identifier[wan] ):
keyword[if] identifier[wan] . identifier[portal_type] == literal[string] :
keyword[return] keyword[False]
identifier[an_state] = identifier[wf] . identifier[getInfoFor] ( identifier[wan] , literal[string] )
keyword[return] identifier[wan] . identifier[portal_type] == literal[string] keyword[and] identifier[wan] . identifier[getRequestID] ()== identifier[self] . identifier[id] keyword[and] ( identifier[review_state] keyword[is] keyword[None] keyword[or] identifier[an_state] keyword[in] identifier[review_state] )
keyword[def] identifier[valid_ref] ( identifier[wan] ):
keyword[if] identifier[wan] . identifier[portal_type] != literal[string] :
keyword[return] keyword[False]
identifier[an_state] = identifier[wf] . identifier[getInfoFor] ( identifier[wan] , literal[string] )
identifier[an_reftype] = identifier[wan] . identifier[getReferenceType] ()
keyword[return] identifier[wan] . identifier[getServiceUID] () keyword[in] identifier[suids] keyword[and] identifier[wan] keyword[not] keyword[in] identifier[qcanalyses] keyword[and] ( identifier[qctype] keyword[is] keyword[None] keyword[or] identifier[an_reftype] == identifier[qctype] ) keyword[and] ( identifier[review_state] keyword[is] keyword[None] keyword[or] identifier[an_state] keyword[in] identifier[review_state] )
keyword[for] identifier[an] keyword[in] identifier[ans] :
identifier[an] = identifier[an] . identifier[getObject] ()
identifier[ws] = identifier[an] . identifier[getWorksheet] ()
keyword[if] keyword[not] identifier[ws] :
keyword[continue]
identifier[was] = identifier[ws] . identifier[getAnalyses] ()
keyword[for] identifier[wa] keyword[in] identifier[was] :
keyword[if] identifier[valid_dup] ( identifier[wa] ):
identifier[qcanalyses] . identifier[append] ( identifier[wa] )
keyword[elif] identifier[valid_ref] ( identifier[wa] ):
identifier[qcanalyses] . identifier[append] ( identifier[wa] )
keyword[return] identifier[qcanalyses] | def getQCAnalyses(self, qctype=None, review_state=None):
"""return the QC analyses performed in the worksheet in which, at
least, one sample of this AR is present.
Depending on qctype value, returns the analyses of:
- 'b': all Blank Reference Samples used in related worksheet/s
- 'c': all Control Reference Samples used in related worksheet/s
- 'd': duplicates only for samples contained in this AR
If qctype==None, returns all type of qc analyses mentioned above
"""
qcanalyses = []
suids = []
ans = self.getAnalyses()
wf = getToolByName(self, 'portal_workflow')
for an in ans:
an = an.getObject()
if an.getServiceUID() not in suids:
suids.append(an.getServiceUID()) # depends on [control=['if'], data=['suids']] # depends on [control=['for'], data=['an']]
def valid_dup(wan):
if wan.portal_type == 'ReferenceAnalysis':
return False # depends on [control=['if'], data=[]]
an_state = wf.getInfoFor(wan, 'review_state')
return wan.portal_type == 'DuplicateAnalysis' and wan.getRequestID() == self.id and (review_state is None or an_state in review_state)
def valid_ref(wan):
if wan.portal_type != 'ReferenceAnalysis':
return False # depends on [control=['if'], data=[]]
an_state = wf.getInfoFor(wan, 'review_state')
an_reftype = wan.getReferenceType()
return wan.getServiceUID() in suids and wan not in qcanalyses and (qctype is None or an_reftype == qctype) and (review_state is None or an_state in review_state)
for an in ans:
an = an.getObject()
ws = an.getWorksheet()
if not ws:
continue # depends on [control=['if'], data=[]]
was = ws.getAnalyses()
for wa in was:
if valid_dup(wa):
qcanalyses.append(wa) # depends on [control=['if'], data=[]]
elif valid_ref(wa):
qcanalyses.append(wa) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['wa']] # depends on [control=['for'], data=['an']]
return qcanalyses |
def forecast_until(self, timestamp, tsformat=None):
"""Sets the forecasting goal (timestamp wise).
This function enables the automatic determination of valuesToForecast.
:param timestamp: timestamp containing the end date of the forecast.
:param string tsformat: Format of the timestamp. This is used to convert the
timestamp from UNIX epochs, if necessary. For valid examples
take a look into the :py:func:`time.strptime` documentation.
"""
if tsformat is not None:
timestamp = TimeSeries.convert_timestamp_to_epoch(timestamp, tsformat)
self._forecastUntil = timestamp | def function[forecast_until, parameter[self, timestamp, tsformat]]:
constant[Sets the forecasting goal (timestamp wise).
This function enables the automatic determination of valuesToForecast.
:param timestamp: timestamp containing the end date of the forecast.
:param string tsformat: Format of the timestamp. This is used to convert the
timestamp from UNIX epochs, if necessary. For valid examples
take a look into the :py:func:`time.strptime` documentation.
]
if compare[name[tsformat] is_not constant[None]] begin[:]
variable[timestamp] assign[=] call[name[TimeSeries].convert_timestamp_to_epoch, parameter[name[timestamp], name[tsformat]]]
name[self]._forecastUntil assign[=] name[timestamp] | keyword[def] identifier[forecast_until] ( identifier[self] , identifier[timestamp] , identifier[tsformat] = keyword[None] ):
literal[string]
keyword[if] identifier[tsformat] keyword[is] keyword[not] keyword[None] :
identifier[timestamp] = identifier[TimeSeries] . identifier[convert_timestamp_to_epoch] ( identifier[timestamp] , identifier[tsformat] )
identifier[self] . identifier[_forecastUntil] = identifier[timestamp] | def forecast_until(self, timestamp, tsformat=None):
"""Sets the forecasting goal (timestamp wise).
This function enables the automatic determination of valuesToForecast.
:param timestamp: timestamp containing the end date of the forecast.
:param string tsformat: Format of the timestamp. This is used to convert the
timestamp from UNIX epochs, if necessary. For valid examples
take a look into the :py:func:`time.strptime` documentation.
"""
if tsformat is not None:
timestamp = TimeSeries.convert_timestamp_to_epoch(timestamp, tsformat) # depends on [control=['if'], data=['tsformat']]
self._forecastUntil = timestamp |
def _create_table_setup(self):
"""
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
"""
column_names_and_types = self._get_column_names_and_types(
self._sql_type_name
)
pat = re.compile(r'\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join(escape(c) for c in keys)
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join(escape(c) for c in ix_cols)
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts | def function[_create_table_setup, parameter[self]]:
constant[
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
]
variable[column_names_and_types] assign[=] call[name[self]._get_column_names_and_types, parameter[name[self]._sql_type_name]]
variable[pat] assign[=] call[name[re].compile, parameter[constant[\s+]]]
variable[column_names] assign[=] <ast.ListComp object at 0x7da18f00fd90>
if call[name[any], parameter[call[name[map], parameter[name[pat].search, name[column_names]]]]] begin[:]
call[name[warnings].warn, parameter[name[_SAFE_NAMES_WARNING]]]
variable[escape] assign[=] name[_get_valid_sqlite_name]
variable[create_tbl_stmts] assign[=] <ast.ListComp object at 0x7da18f00d960>
if <ast.BoolOp object at 0x7da18c4cfee0> begin[:]
if <ast.UnaryOp object at 0x7da20c990370> begin[:]
variable[keys] assign[=] list[[<ast.Attribute object at 0x7da20c993cd0>]]
variable[cnames_br] assign[=] call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da18f00c3a0>]]
call[name[create_tbl_stmts].append, parameter[call[constant[CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})].format, parameter[]]]]
variable[create_stmts] assign[=] list[[<ast.BinOp object at 0x7da18f00f8b0>]]
variable[ix_cols] assign[=] <ast.ListComp object at 0x7da18f00ce80>
if call[name[len], parameter[name[ix_cols]]] begin[:]
variable[cnames] assign[=] call[constant[_].join, parameter[name[ix_cols]]]
variable[cnames_br] assign[=] call[constant[,].join, parameter[<ast.GeneratorExp object at 0x7da18f00faf0>]]
call[name[create_stmts].append, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[CREATE INDEX ] + call[name[escape], parameter[binary_operation[binary_operation[binary_operation[constant[ix_] + name[self].name] + constant[_]] + name[cnames]]]]] + constant[ON ]] + call[name[escape], parameter[name[self].name]]] + constant[ (]] + name[cnames_br]] + constant[)]]]]
return[name[create_stmts]] | keyword[def] identifier[_create_table_setup] ( identifier[self] ):
literal[string]
identifier[column_names_and_types] = identifier[self] . identifier[_get_column_names_and_types] (
identifier[self] . identifier[_sql_type_name]
)
identifier[pat] = identifier[re] . identifier[compile] ( literal[string] )
identifier[column_names] =[ identifier[col_name] keyword[for] identifier[col_name] , identifier[_] , identifier[_] keyword[in] identifier[column_names_and_types] ]
keyword[if] identifier[any] ( identifier[map] ( identifier[pat] . identifier[search] , identifier[column_names] )):
identifier[warnings] . identifier[warn] ( identifier[_SAFE_NAMES_WARNING] , identifier[stacklevel] = literal[int] )
identifier[escape] = identifier[_get_valid_sqlite_name]
identifier[create_tbl_stmts] =[ identifier[escape] ( identifier[cname] )+ literal[string] + identifier[ctype]
keyword[for] identifier[cname] , identifier[ctype] , identifier[_] keyword[in] identifier[column_names_and_types] ]
keyword[if] identifier[self] . identifier[keys] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[self] . identifier[keys] ):
keyword[if] keyword[not] identifier[is_list_like] ( identifier[self] . identifier[keys] ):
identifier[keys] =[ identifier[self] . identifier[keys] ]
keyword[else] :
identifier[keys] = identifier[self] . identifier[keys]
identifier[cnames_br] = literal[string] . identifier[join] ( identifier[escape] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[keys] )
identifier[create_tbl_stmts] . identifier[append] (
literal[string] . identifier[format] (
identifier[tbl] = identifier[self] . identifier[name] , identifier[cnames_br] = identifier[cnames_br] ))
identifier[create_stmts] =[ literal[string] + identifier[escape] ( identifier[self] . identifier[name] )+ literal[string] +
literal[string] . identifier[join] ( identifier[create_tbl_stmts] )+ literal[string] ]
identifier[ix_cols] =[ identifier[cname] keyword[for] identifier[cname] , identifier[_] , identifier[is_index] keyword[in] identifier[column_names_and_types]
keyword[if] identifier[is_index] ]
keyword[if] identifier[len] ( identifier[ix_cols] ):
identifier[cnames] = literal[string] . identifier[join] ( identifier[ix_cols] )
identifier[cnames_br] = literal[string] . identifier[join] ( identifier[escape] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[ix_cols] )
identifier[create_stmts] . identifier[append] (
literal[string] + identifier[escape] ( literal[string] + identifier[self] . identifier[name] + literal[string] + identifier[cnames] )+
literal[string] + identifier[escape] ( identifier[self] . identifier[name] )+ literal[string] + identifier[cnames_br] + literal[string] )
keyword[return] identifier[create_stmts] | def _create_table_setup(self):
"""
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
"""
column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
pat = re.compile('\\s+')
column_names = [col_name for (col_name, _, _) in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6) # depends on [control=['if'], data=[]]
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype for (cname, ctype, _) in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys] # depends on [control=['if'], data=[]]
else:
keys = self.keys
cnames_br = ', '.join((escape(c) for c in keys))
create_tbl_stmts.append('CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})'.format(tbl=self.name, cnames_br=cnames_br)) # depends on [control=['if'], data=[]]
create_stmts = ['CREATE TABLE ' + escape(self.name) + ' (\n' + ',\n '.join(create_tbl_stmts) + '\n)']
ix_cols = [cname for (cname, _, is_index) in column_names_and_types if is_index]
if len(ix_cols):
cnames = '_'.join(ix_cols)
cnames_br = ','.join((escape(c) for c in ix_cols))
create_stmts.append('CREATE INDEX ' + escape('ix_' + self.name + '_' + cnames) + 'ON ' + escape(self.name) + ' (' + cnames_br + ')') # depends on [control=['if'], data=[]]
return create_stmts |
def set_proxy(url, basic_auth=None):
"""
Access Bot API through a proxy.
:param url: proxy URL
:param basic_auth: 2-tuple ``('username', 'password')``
"""
global _pools, _onetime_pool_spec
if not url:
_pools['default'] = urllib3.PoolManager(**_default_pool_params)
_onetime_pool_spec = (urllib3.PoolManager, _onetime_pool_params)
elif basic_auth:
h = urllib3.make_headers(proxy_basic_auth=':'.join(basic_auth))
_pools['default'] = urllib3.ProxyManager(url, proxy_headers=h, **_default_pool_params)
_onetime_pool_spec = (urllib3.ProxyManager, dict(proxy_url=url, proxy_headers=h, **_onetime_pool_params))
else:
_pools['default'] = urllib3.ProxyManager(url, **_default_pool_params)
_onetime_pool_spec = (urllib3.ProxyManager, dict(proxy_url=url, **_onetime_pool_params)) | def function[set_proxy, parameter[url, basic_auth]]:
constant[
Access Bot API through a proxy.
:param url: proxy URL
:param basic_auth: 2-tuple ``('username', 'password')``
]
<ast.Global object at 0x7da1b1b3f040>
if <ast.UnaryOp object at 0x7da1b1b3f430> begin[:]
call[name[_pools]][constant[default]] assign[=] call[name[urllib3].PoolManager, parameter[]]
variable[_onetime_pool_spec] assign[=] tuple[[<ast.Attribute object at 0x7da1b1b3cdf0>, <ast.Name object at 0x7da1b1b3dd20>]] | keyword[def] identifier[set_proxy] ( identifier[url] , identifier[basic_auth] = keyword[None] ):
literal[string]
keyword[global] identifier[_pools] , identifier[_onetime_pool_spec]
keyword[if] keyword[not] identifier[url] :
identifier[_pools] [ literal[string] ]= identifier[urllib3] . identifier[PoolManager] (** identifier[_default_pool_params] )
identifier[_onetime_pool_spec] =( identifier[urllib3] . identifier[PoolManager] , identifier[_onetime_pool_params] )
keyword[elif] identifier[basic_auth] :
identifier[h] = identifier[urllib3] . identifier[make_headers] ( identifier[proxy_basic_auth] = literal[string] . identifier[join] ( identifier[basic_auth] ))
identifier[_pools] [ literal[string] ]= identifier[urllib3] . identifier[ProxyManager] ( identifier[url] , identifier[proxy_headers] = identifier[h] ,** identifier[_default_pool_params] )
identifier[_onetime_pool_spec] =( identifier[urllib3] . identifier[ProxyManager] , identifier[dict] ( identifier[proxy_url] = identifier[url] , identifier[proxy_headers] = identifier[h] ,** identifier[_onetime_pool_params] ))
keyword[else] :
identifier[_pools] [ literal[string] ]= identifier[urllib3] . identifier[ProxyManager] ( identifier[url] ,** identifier[_default_pool_params] )
identifier[_onetime_pool_spec] =( identifier[urllib3] . identifier[ProxyManager] , identifier[dict] ( identifier[proxy_url] = identifier[url] ,** identifier[_onetime_pool_params] )) | def set_proxy(url, basic_auth=None):
"""
Access Bot API through a proxy.
:param url: proxy URL
:param basic_auth: 2-tuple ``('username', 'password')``
"""
global _pools, _onetime_pool_spec
if not url:
_pools['default'] = urllib3.PoolManager(**_default_pool_params)
_onetime_pool_spec = (urllib3.PoolManager, _onetime_pool_params) # depends on [control=['if'], data=[]]
elif basic_auth:
h = urllib3.make_headers(proxy_basic_auth=':'.join(basic_auth))
_pools['default'] = urllib3.ProxyManager(url, proxy_headers=h, **_default_pool_params)
_onetime_pool_spec = (urllib3.ProxyManager, dict(proxy_url=url, proxy_headers=h, **_onetime_pool_params)) # depends on [control=['if'], data=[]]
else:
_pools['default'] = urllib3.ProxyManager(url, **_default_pool_params)
_onetime_pool_spec = (urllib3.ProxyManager, dict(proxy_url=url, **_onetime_pool_params)) |
def resolve_broadcast_params(inputs, function_proto, batch_size):
'''Resolve shape parameter and returns shape.
'''
f = function_proto # alias
# A. Detect multiple negative dimensions (not allowed).
negative_count = 0
for d in f.broadcast_param.shape.dim:
if d < 0:
negative_count += 1
if negative_count > 1:
raise ValueError('Reshape: shape has multiple negative number.')
# B. Fill negative dimensions with batch size.
shape = tuple(
[d if d >= 0 else batch_size for d in f.broadcast_param.shape.dim])
return shape | def function[resolve_broadcast_params, parameter[inputs, function_proto, batch_size]]:
constant[Resolve shape parameter and returns shape.
]
variable[f] assign[=] name[function_proto]
variable[negative_count] assign[=] constant[0]
for taget[name[d]] in starred[name[f].broadcast_param.shape.dim] begin[:]
if compare[name[d] less[<] constant[0]] begin[:]
<ast.AugAssign object at 0x7da18f00d7e0>
if compare[name[negative_count] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da18f00f130>
variable[shape] assign[=] call[name[tuple], parameter[<ast.ListComp object at 0x7da18f00e920>]]
return[name[shape]] | keyword[def] identifier[resolve_broadcast_params] ( identifier[inputs] , identifier[function_proto] , identifier[batch_size] ):
literal[string]
identifier[f] = identifier[function_proto]
identifier[negative_count] = literal[int]
keyword[for] identifier[d] keyword[in] identifier[f] . identifier[broadcast_param] . identifier[shape] . identifier[dim] :
keyword[if] identifier[d] < literal[int] :
identifier[negative_count] += literal[int]
keyword[if] identifier[negative_count] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[shape] = identifier[tuple] (
[ identifier[d] keyword[if] identifier[d] >= literal[int] keyword[else] identifier[batch_size] keyword[for] identifier[d] keyword[in] identifier[f] . identifier[broadcast_param] . identifier[shape] . identifier[dim] ])
keyword[return] identifier[shape] | def resolve_broadcast_params(inputs, function_proto, batch_size):
"""Resolve shape parameter and returns shape.
"""
f = function_proto # alias
# A. Detect multiple negative dimensions (not allowed).
negative_count = 0
for d in f.broadcast_param.shape.dim:
if d < 0:
negative_count += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d']]
if negative_count > 1:
raise ValueError('Reshape: shape has multiple negative number.') # depends on [control=['if'], data=[]]
# B. Fill negative dimensions with batch size.
shape = tuple([d if d >= 0 else batch_size for d in f.broadcast_param.shape.dim])
return shape |
def encode(self, envelope, session, **kwargs):
""" :meth:`.WMessengerOnionCoderLayerProto.encode` method implementation.
:param envelope: original envelope
:param session: original session
:param kwargs: additional arguments
:return: WMessengerTextEnvelope
"""
return WMessengerTextEnvelope(str(WHex(envelope.message())), meta=envelope) | def function[encode, parameter[self, envelope, session]]:
constant[ :meth:`.WMessengerOnionCoderLayerProto.encode` method implementation.
:param envelope: original envelope
:param session: original session
:param kwargs: additional arguments
:return: WMessengerTextEnvelope
]
return[call[name[WMessengerTextEnvelope], parameter[call[name[str], parameter[call[name[WHex], parameter[call[name[envelope].message, parameter[]]]]]]]]] | keyword[def] identifier[encode] ( identifier[self] , identifier[envelope] , identifier[session] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[WMessengerTextEnvelope] ( identifier[str] ( identifier[WHex] ( identifier[envelope] . identifier[message] ())), identifier[meta] = identifier[envelope] ) | def encode(self, envelope, session, **kwargs):
""" :meth:`.WMessengerOnionCoderLayerProto.encode` method implementation.
:param envelope: original envelope
:param session: original session
:param kwargs: additional arguments
:return: WMessengerTextEnvelope
"""
return WMessengerTextEnvelope(str(WHex(envelope.message())), meta=envelope) |
def check_i18n():
"""Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
"""
while True:
try:
token_type, text, _, _, line = yield
except GeneratorExit:
return
if text == "def" and token_type == tokenize.NAME:
# explicitly ignore function definitions, as oslo defines these
return
if (token_type == tokenize.NAME and
text in ["_", "_LI", "_LW", "_LE", "_LC"]):
while True:
token_type, text, start, _, _ = yield
if token_type != tokenize.NL:
break
if token_type != tokenize.OP or text != "(":
continue # not a localization call
format_string = ''
while True:
token_type, text, start, _, _ = yield
if token_type == tokenize.STRING:
format_string += eval(text)
elif token_type == tokenize.NL:
pass
else:
break
if not format_string:
raise LocalizationError(
start, "H701: Empty localization string")
if token_type != tokenize.OP:
raise LocalizationError(
start, "H701: Invalid localization call")
if text != ")":
if text == "%":
raise LocalizationError(
start,
"H702: Formatting operation should be outside"
" of localization method call")
elif text == "+":
raise LocalizationError(
start,
"H702: Use bare string concatenation instead of +")
else:
raise LocalizationError(
start, "H702: Argument to _, _LI, _LW, _LC, or _LE "
"must be just a string")
format_specs = FORMAT_RE.findall(format_string)
positional_specs = [(key, spec) for key, spec in format_specs
if not key and spec]
# not spec means %%, key means %(smth)s
if len(positional_specs) > 1:
raise LocalizationError(
start, "H703: Multiple positional placeholders") | def function[check_i18n, parameter[]]:
constant[Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
]
while constant[True] begin[:]
<ast.Try object at 0x7da18eb54700>
if <ast.BoolOp object at 0x7da18eb550c0> begin[:]
return[None]
if <ast.BoolOp object at 0x7da18eb547c0> begin[:]
while constant[True] begin[:]
<ast.Tuple object at 0x7da18eb57340> assign[=] <ast.Yield object at 0x7da18eb56fb0>
if compare[name[token_type] not_equal[!=] name[tokenize].NL] begin[:]
break
if <ast.BoolOp object at 0x7da18eb543a0> begin[:]
continue
variable[format_string] assign[=] constant[]
while constant[True] begin[:]
<ast.Tuple object at 0x7da18eb54940> assign[=] <ast.Yield object at 0x7da18eb57a00>
if compare[name[token_type] equal[==] name[tokenize].STRING] begin[:]
<ast.AugAssign object at 0x7da18eb565f0>
if <ast.UnaryOp object at 0x7da18eb54a00> begin[:]
<ast.Raise object at 0x7da1b0499de0>
if compare[name[token_type] not_equal[!=] name[tokenize].OP] begin[:]
<ast.Raise object at 0x7da1b049a200>
if compare[name[text] not_equal[!=] constant[)]] begin[:]
if compare[name[text] equal[==] constant[%]] begin[:]
<ast.Raise object at 0x7da1b049a410>
variable[format_specs] assign[=] call[name[FORMAT_RE].findall, parameter[name[format_string]]]
variable[positional_specs] assign[=] <ast.ListComp object at 0x7da1b04b7010>
if compare[call[name[len], parameter[name[positional_specs]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b04999f0> | keyword[def] identifier[check_i18n] ():
literal[string]
keyword[while] keyword[True] :
keyword[try] :
identifier[token_type] , identifier[text] , identifier[_] , identifier[_] , identifier[line] = keyword[yield]
keyword[except] identifier[GeneratorExit] :
keyword[return]
keyword[if] identifier[text] == literal[string] keyword[and] identifier[token_type] == identifier[tokenize] . identifier[NAME] :
keyword[return]
keyword[if] ( identifier[token_type] == identifier[tokenize] . identifier[NAME] keyword[and]
identifier[text] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]):
keyword[while] keyword[True] :
identifier[token_type] , identifier[text] , identifier[start] , identifier[_] , identifier[_] = keyword[yield]
keyword[if] identifier[token_type] != identifier[tokenize] . identifier[NL] :
keyword[break]
keyword[if] identifier[token_type] != identifier[tokenize] . identifier[OP] keyword[or] identifier[text] != literal[string] :
keyword[continue]
identifier[format_string] = literal[string]
keyword[while] keyword[True] :
identifier[token_type] , identifier[text] , identifier[start] , identifier[_] , identifier[_] = keyword[yield]
keyword[if] identifier[token_type] == identifier[tokenize] . identifier[STRING] :
identifier[format_string] += identifier[eval] ( identifier[text] )
keyword[elif] identifier[token_type] == identifier[tokenize] . identifier[NL] :
keyword[pass]
keyword[else] :
keyword[break]
keyword[if] keyword[not] identifier[format_string] :
keyword[raise] identifier[LocalizationError] (
identifier[start] , literal[string] )
keyword[if] identifier[token_type] != identifier[tokenize] . identifier[OP] :
keyword[raise] identifier[LocalizationError] (
identifier[start] , literal[string] )
keyword[if] identifier[text] != literal[string] :
keyword[if] identifier[text] == literal[string] :
keyword[raise] identifier[LocalizationError] (
identifier[start] ,
literal[string]
literal[string] )
keyword[elif] identifier[text] == literal[string] :
keyword[raise] identifier[LocalizationError] (
identifier[start] ,
literal[string] )
keyword[else] :
keyword[raise] identifier[LocalizationError] (
identifier[start] , literal[string]
literal[string] )
identifier[format_specs] = identifier[FORMAT_RE] . identifier[findall] ( identifier[format_string] )
identifier[positional_specs] =[( identifier[key] , identifier[spec] ) keyword[for] identifier[key] , identifier[spec] keyword[in] identifier[format_specs]
keyword[if] keyword[not] identifier[key] keyword[and] identifier[spec] ]
keyword[if] identifier[len] ( identifier[positional_specs] )> literal[int] :
keyword[raise] identifier[LocalizationError] (
identifier[start] , literal[string] ) | def check_i18n():
"""Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
"""
while True:
try:
(token_type, text, _, _, line) = (yield) # depends on [control=['try'], data=[]]
except GeneratorExit:
return # depends on [control=['except'], data=[]]
if text == 'def' and token_type == tokenize.NAME:
# explicitly ignore function definitions, as oslo defines these
return # depends on [control=['if'], data=[]]
if token_type == tokenize.NAME and text in ['_', '_LI', '_LW', '_LE', '_LC']:
while True:
(token_type, text, start, _, _) = (yield)
if token_type != tokenize.NL:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
if token_type != tokenize.OP or text != '(':
continue # not a localization call # depends on [control=['if'], data=[]]
format_string = ''
while True:
(token_type, text, start, _, _) = (yield)
if token_type == tokenize.STRING:
format_string += eval(text) # depends on [control=['if'], data=[]]
elif token_type == tokenize.NL:
pass # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]]
if not format_string:
raise LocalizationError(start, 'H701: Empty localization string') # depends on [control=['if'], data=[]]
if token_type != tokenize.OP:
raise LocalizationError(start, 'H701: Invalid localization call') # depends on [control=['if'], data=[]]
if text != ')':
if text == '%':
raise LocalizationError(start, 'H702: Formatting operation should be outside of localization method call') # depends on [control=['if'], data=[]]
elif text == '+':
raise LocalizationError(start, 'H702: Use bare string concatenation instead of +') # depends on [control=['if'], data=[]]
else:
raise LocalizationError(start, 'H702: Argument to _, _LI, _LW, _LC, or _LE must be just a string') # depends on [control=['if'], data=['text']]
format_specs = FORMAT_RE.findall(format_string)
positional_specs = [(key, spec) for (key, spec) in format_specs if not key and spec]
# not spec means %%, key means %(smth)s
if len(positional_specs) > 1:
raise LocalizationError(start, 'H703: Multiple positional placeholders') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def cowsay(text='', align='centre'):
"""
Simulate an ASCII cow saying text.
:type text: string
:param text: The text to print out.
:type align: string
:param algin: Where to align the cow. Can be 'left', 'centre' or 'right'
"""
# Make align lowercase
align = align.lower()
# Set the cowtext
cowtext = str(text)
# Set top part of speech bubble to the length of the text plus 2
topbar = ' ' * (len(text) + 2)
# Set bottom part of speech bubble to the length of the text plus 2
bottombar = ' ' * (len(text) + 2)
# If align is centre
if align in ["center", "centre"]:
# Set the spacing before the cow to the length of half of the length of topbar plus 1
spacing = " " * (int(len(topbar) / 2) + 1)
# If align is left
elif align == 'left':
# Set spacing to a single space
spacing = ' '
# If align is right
elif align == 'right':
# Set the spacing to the length of the text plus 2
spacing = " " * (len(text) + 2)
else:
# Raise a runtime warning
raise ValueError("Invalid alignment provided.")
# Print the top bar
print(topbar)
# Print the text
print('( ' + repr(str(cowtext)) + ' )')
# Print the bottom bar
print(bottombar)
# Print the cow with the spacing
print(spacing + r'o ^__^ ')
print(spacing + r' o (oO)\_______')
print(spacing + r' (__)\ )\/\ ')
print(spacing + r' U ||----w | ')
print(spacing + r' || || ') | def function[cowsay, parameter[text, align]]:
constant[
Simulate an ASCII cow saying text.
:type text: string
:param text: The text to print out.
:type align: string
:param algin: Where to align the cow. Can be 'left', 'centre' or 'right'
]
variable[align] assign[=] call[name[align].lower, parameter[]]
variable[cowtext] assign[=] call[name[str], parameter[name[text]]]
variable[topbar] assign[=] binary_operation[constant[ ] * binary_operation[call[name[len], parameter[name[text]]] + constant[2]]]
variable[bottombar] assign[=] binary_operation[constant[ ] * binary_operation[call[name[len], parameter[name[text]]] + constant[2]]]
if compare[name[align] in list[[<ast.Constant object at 0x7da1b0b3b670>, <ast.Constant object at 0x7da1b0b38640>]]] begin[:]
variable[spacing] assign[=] binary_operation[constant[ ] * binary_operation[call[name[int], parameter[binary_operation[call[name[len], parameter[name[topbar]]] / constant[2]]]] + constant[1]]]
call[name[print], parameter[name[topbar]]]
call[name[print], parameter[binary_operation[binary_operation[constant[( ] + call[name[repr], parameter[call[name[str], parameter[name[cowtext]]]]]] + constant[ )]]]]
call[name[print], parameter[name[bottombar]]]
call[name[print], parameter[binary_operation[name[spacing] + constant[o ^__^ ]]]]
call[name[print], parameter[binary_operation[name[spacing] + constant[ o (oO)\_______]]]]
call[name[print], parameter[binary_operation[name[spacing] + constant[ (__)\ )\/\ ]]]]
call[name[print], parameter[binary_operation[name[spacing] + constant[ U ||----w | ]]]]
call[name[print], parameter[binary_operation[name[spacing] + constant[ || || ]]]] | keyword[def] identifier[cowsay] ( identifier[text] = literal[string] , identifier[align] = literal[string] ):
literal[string]
identifier[align] = identifier[align] . identifier[lower] ()
identifier[cowtext] = identifier[str] ( identifier[text] )
identifier[topbar] = literal[string] *( identifier[len] ( identifier[text] )+ literal[int] )
identifier[bottombar] = literal[string] *( identifier[len] ( identifier[text] )+ literal[int] )
keyword[if] identifier[align] keyword[in] [ literal[string] , literal[string] ]:
identifier[spacing] = literal[string] *( identifier[int] ( identifier[len] ( identifier[topbar] )/ literal[int] )+ literal[int] )
keyword[elif] identifier[align] == literal[string] :
identifier[spacing] = literal[string]
keyword[elif] identifier[align] == literal[string] :
identifier[spacing] = literal[string] *( identifier[len] ( identifier[text] )+ literal[int] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[print] ( identifier[topbar] )
identifier[print] ( literal[string] + identifier[repr] ( identifier[str] ( identifier[cowtext] ))+ literal[string] )
identifier[print] ( identifier[bottombar] )
identifier[print] ( identifier[spacing] + literal[string] )
identifier[print] ( identifier[spacing] + literal[string] )
identifier[print] ( identifier[spacing] + literal[string] )
identifier[print] ( identifier[spacing] + literal[string] )
identifier[print] ( identifier[spacing] + literal[string] ) | def cowsay(text='', align='centre'):
"""
Simulate an ASCII cow saying text.
:type text: string
:param text: The text to print out.
:type align: string
:param algin: Where to align the cow. Can be 'left', 'centre' or 'right'
"""
# Make align lowercase
align = align.lower()
# Set the cowtext
cowtext = str(text)
# Set top part of speech bubble to the length of the text plus 2
topbar = ' ' * (len(text) + 2)
# Set bottom part of speech bubble to the length of the text plus 2
bottombar = ' ' * (len(text) + 2)
# If align is centre
if align in ['center', 'centre']:
# Set the spacing before the cow to the length of half of the length of topbar plus 1
spacing = ' ' * (int(len(topbar) / 2) + 1) # depends on [control=['if'], data=[]]
# If align is left
elif align == 'left':
# Set spacing to a single space
spacing = ' ' # depends on [control=['if'], data=[]]
# If align is right
elif align == 'right':
# Set the spacing to the length of the text plus 2
spacing = ' ' * (len(text) + 2) # depends on [control=['if'], data=[]]
else:
# Raise a runtime warning
raise ValueError('Invalid alignment provided.')
# Print the top bar
print(topbar)
# Print the text
print('( ' + repr(str(cowtext)) + ' )')
# Print the bottom bar
print(bottombar)
# Print the cow with the spacing
print(spacing + 'o ^__^ ')
print(spacing + ' o (oO)\\_______')
print(spacing + ' (__)\\ )\\/\\ ')
print(spacing + ' U ||----w | ')
print(spacing + ' || || ') |
def _check_perms(obj_name, obj_type, new_perms, cur_perms, access_mode, ret):
'''
Helper function used by ``check_perms`` for checking and setting Grant and
Deny permissions.
Args:
obj_name (str):
The name or full path to the object
obj_type (Optional[str]):
The type of object for which to check permissions. Default is 'file'
new_perms (dict):
A dictionary containing the user/group and the basic permissions to
check/grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
cur_perms (dict):
A dictionary containing the user/group permissions as they currently
exists on the target object.
access_mode (str):
The access mode to set. Either ``grant`` or ``deny``
ret (dict):
A dictionary to append changes to and return. If not passed, will
create a new dictionary to return.
Returns:
dict: A dictionary of return data as expected by the state system
'''
access_mode = access_mode.lower()
changes = {}
for user in new_perms:
applies_to_text = ''
# Check that user exists:
try:
user_name = get_name(principal=user)
except CommandExecutionError:
ret['comment'].append(
'{0} Perms: User "{1}" missing from Target System'
''.format(access_mode.capitalize(), user))
continue
# Get the proper applies_to text
if 'applies_to' in new_perms[user]:
applies_to = new_perms[user]['applies_to']
at_flag = flags().ace_prop['file'][applies_to]
applies_to_text = flags().ace_prop['file'][at_flag]
else:
applies_to = None
if user_name not in cur_perms['Not Inherited']:
if user not in changes:
changes[user] = {}
changes[user][access_mode] = new_perms[user]['perms']
if applies_to:
changes[user]['applies_to'] = applies_to
else:
# Check Perms for basic perms
if isinstance(new_perms[user]['perms'], six.string_types):
if not has_permission(obj_name=obj_name,
principal=user_name,
permission=new_perms[user]['perms'],
access_mode=access_mode,
obj_type=obj_type,
exact=False):
if user not in changes:
changes[user] = {}
changes[user][access_mode] = new_perms[user]['perms']
# Check Perms for advanced perms
else:
for perm in new_perms[user]['perms']:
if not has_permission(obj_name=obj_name,
principal=user_name,
permission=perm,
access_mode=access_mode,
obj_type=obj_type,
exact=False):
if user not in changes:
changes[user] = {access_mode: []}
changes[user][access_mode].append(perm)
# Check if applies_to was passed
if applies_to:
# Is there a deny/grant permission set
if access_mode in cur_perms['Not Inherited'][user_name]:
# If the applies to settings are different, use the new one
if not cur_perms['Not Inherited'][user_name][access_mode]['applies to'] == applies_to_text:
if user not in changes:
changes[user] = {}
changes[user]['applies_to'] = applies_to
if changes:
if 'perms' not in ret['changes']:
ret['changes']['perms'] = {}
for user in changes:
user_name = get_name(principal=user)
if __opts__['test'] is True:
if user not in ret['changes']['perms']:
ret['changes']['perms'][user] = {}
ret['changes']['perms'][user][access_mode] = changes[user][access_mode]
else:
# Get applies_to
applies_to = None
if 'applies_to' not in changes[user]:
# Get current "applies to" settings from the file
if user_name in cur_perms['Not Inherited'] and \
access_mode in cur_perms['Not Inherited'][user_name]:
for flag in flags().ace_prop[obj_type]:
if flags().ace_prop[obj_type][flag] == cur_perms['Not Inherited'][user_name][access_mode]['applies to']:
at_flag = flag
for flag1 in flags().ace_prop[obj_type]:
if salt.utils.win_dacl.flags().ace_prop[obj_type][flag1] == at_flag:
applies_to = flag1
if not applies_to:
if obj_type.lower() in ['registry', 'registry32']:
applies_to = 'this_key_subkeys'
else:
applies_to = 'this_folder_subfolders_files'
else:
applies_to = changes[user]['applies_to']
perms = []
if access_mode not in changes[user]:
# Get current perms
# Check for basic perms
for perm in cur_perms['Not Inherited'][user_name][access_mode]['permissions']:
for flag in flags().ace_perms[obj_type]['basic']:
if flags().ace_perms[obj_type]['basic'][flag] == perm:
perm_flag = flag
for flag1 in flags().ace_perms[obj_type]['basic']:
if flags().ace_perms[obj_type]['basic'][flag1] == perm_flag:
perms = flag1
# Make a list of advanced perms
if not perms:
for perm in cur_perms['Not Inherited'][user_name][access_mode]['permissions']:
for flag in flags().ace_perms[obj_type]['advanced']:
if flags().ace_perms[obj_type]['advanced'][flag] == perm:
perm_flag = flag
for flag1 in flags().ace_perms[obj_type]['advanced']:
if flags().ace_perms[obj_type]['advanced'][flag1] == perm_flag:
perms.append(flag1)
else:
perms = changes[user][access_mode]
try:
set_permissions(
obj_name=obj_name,
principal=user_name,
permissions=perms,
access_mode=access_mode,
applies_to=applies_to,
obj_type=obj_type)
if user not in ret['changes']['perms']:
ret['changes']['perms'][user] = {}
ret['changes']['perms'][user][access_mode] = changes[user][access_mode]
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'].append(
'Failed to change {0} permissions for "{1}" to {2}\n'
'Error: {3}'.format(access_mode, user, changes[user], exc.strerror))
return ret | def function[_check_perms, parameter[obj_name, obj_type, new_perms, cur_perms, access_mode, ret]]:
constant[
Helper function used by ``check_perms`` for checking and setting Grant and
Deny permissions.
Args:
obj_name (str):
The name or full path to the object
obj_type (Optional[str]):
The type of object for which to check permissions. Default is 'file'
new_perms (dict):
A dictionary containing the user/group and the basic permissions to
check/grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
cur_perms (dict):
A dictionary containing the user/group permissions as they currently
exists on the target object.
access_mode (str):
The access mode to set. Either ``grant`` or ``deny``
ret (dict):
A dictionary to append changes to and return. If not passed, will
create a new dictionary to return.
Returns:
dict: A dictionary of return data as expected by the state system
]
variable[access_mode] assign[=] call[name[access_mode].lower, parameter[]]
variable[changes] assign[=] dictionary[[], []]
for taget[name[user]] in starred[name[new_perms]] begin[:]
variable[applies_to_text] assign[=] constant[]
<ast.Try object at 0x7da18ede6f80>
if compare[constant[applies_to] in call[name[new_perms]][name[user]]] begin[:]
variable[applies_to] assign[=] call[call[name[new_perms]][name[user]]][constant[applies_to]]
variable[at_flag] assign[=] call[call[call[name[flags], parameter[]].ace_prop][constant[file]]][name[applies_to]]
variable[applies_to_text] assign[=] call[call[call[name[flags], parameter[]].ace_prop][constant[file]]][name[at_flag]]
if compare[name[user_name] <ast.NotIn object at 0x7da2590d7190> call[name[cur_perms]][constant[Not Inherited]]] begin[:]
if compare[name[user] <ast.NotIn object at 0x7da2590d7190> name[changes]] begin[:]
call[name[changes]][name[user]] assign[=] dictionary[[], []]
call[call[name[changes]][name[user]]][name[access_mode]] assign[=] call[call[name[new_perms]][name[user]]][constant[perms]]
if name[applies_to] begin[:]
call[call[name[changes]][name[user]]][constant[applies_to]] assign[=] name[applies_to]
if name[changes] begin[:]
if compare[constant[perms] <ast.NotIn object at 0x7da2590d7190> call[name[ret]][constant[changes]]] begin[:]
call[call[name[ret]][constant[changes]]][constant[perms]] assign[=] dictionary[[], []]
for taget[name[user]] in starred[name[changes]] begin[:]
variable[user_name] assign[=] call[name[get_name], parameter[]]
if compare[call[name[__opts__]][constant[test]] is constant[True]] begin[:]
if compare[name[user] <ast.NotIn object at 0x7da2590d7190> call[call[name[ret]][constant[changes]]][constant[perms]]] begin[:]
call[call[call[name[ret]][constant[changes]]][constant[perms]]][name[user]] assign[=] dictionary[[], []]
call[call[call[call[name[ret]][constant[changes]]][constant[perms]]][name[user]]][name[access_mode]] assign[=] call[call[name[changes]][name[user]]][name[access_mode]]
return[name[ret]] | keyword[def] identifier[_check_perms] ( identifier[obj_name] , identifier[obj_type] , identifier[new_perms] , identifier[cur_perms] , identifier[access_mode] , identifier[ret] ):
literal[string]
identifier[access_mode] = identifier[access_mode] . identifier[lower] ()
identifier[changes] ={}
keyword[for] identifier[user] keyword[in] identifier[new_perms] :
identifier[applies_to_text] = literal[string]
keyword[try] :
identifier[user_name] = identifier[get_name] ( identifier[principal] = identifier[user] )
keyword[except] identifier[CommandExecutionError] :
identifier[ret] [ literal[string] ]. identifier[append] (
literal[string]
literal[string] . identifier[format] ( identifier[access_mode] . identifier[capitalize] (), identifier[user] ))
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[new_perms] [ identifier[user] ]:
identifier[applies_to] = identifier[new_perms] [ identifier[user] ][ literal[string] ]
identifier[at_flag] = identifier[flags] (). identifier[ace_prop] [ literal[string] ][ identifier[applies_to] ]
identifier[applies_to_text] = identifier[flags] (). identifier[ace_prop] [ literal[string] ][ identifier[at_flag] ]
keyword[else] :
identifier[applies_to] = keyword[None]
keyword[if] identifier[user_name] keyword[not] keyword[in] identifier[cur_perms] [ literal[string] ]:
keyword[if] identifier[user] keyword[not] keyword[in] identifier[changes] :
identifier[changes] [ identifier[user] ]={}
identifier[changes] [ identifier[user] ][ identifier[access_mode] ]= identifier[new_perms] [ identifier[user] ][ literal[string] ]
keyword[if] identifier[applies_to] :
identifier[changes] [ identifier[user] ][ literal[string] ]= identifier[applies_to]
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[new_perms] [ identifier[user] ][ literal[string] ], identifier[six] . identifier[string_types] ):
keyword[if] keyword[not] identifier[has_permission] ( identifier[obj_name] = identifier[obj_name] ,
identifier[principal] = identifier[user_name] ,
identifier[permission] = identifier[new_perms] [ identifier[user] ][ literal[string] ],
identifier[access_mode] = identifier[access_mode] ,
identifier[obj_type] = identifier[obj_type] ,
identifier[exact] = keyword[False] ):
keyword[if] identifier[user] keyword[not] keyword[in] identifier[changes] :
identifier[changes] [ identifier[user] ]={}
identifier[changes] [ identifier[user] ][ identifier[access_mode] ]= identifier[new_perms] [ identifier[user] ][ literal[string] ]
keyword[else] :
keyword[for] identifier[perm] keyword[in] identifier[new_perms] [ identifier[user] ][ literal[string] ]:
keyword[if] keyword[not] identifier[has_permission] ( identifier[obj_name] = identifier[obj_name] ,
identifier[principal] = identifier[user_name] ,
identifier[permission] = identifier[perm] ,
identifier[access_mode] = identifier[access_mode] ,
identifier[obj_type] = identifier[obj_type] ,
identifier[exact] = keyword[False] ):
keyword[if] identifier[user] keyword[not] keyword[in] identifier[changes] :
identifier[changes] [ identifier[user] ]={ identifier[access_mode] :[]}
identifier[changes] [ identifier[user] ][ identifier[access_mode] ]. identifier[append] ( identifier[perm] )
keyword[if] identifier[applies_to] :
keyword[if] identifier[access_mode] keyword[in] identifier[cur_perms] [ literal[string] ][ identifier[user_name] ]:
keyword[if] keyword[not] identifier[cur_perms] [ literal[string] ][ identifier[user_name] ][ identifier[access_mode] ][ literal[string] ]== identifier[applies_to_text] :
keyword[if] identifier[user] keyword[not] keyword[in] identifier[changes] :
identifier[changes] [ identifier[user] ]={}
identifier[changes] [ identifier[user] ][ literal[string] ]= identifier[applies_to]
keyword[if] identifier[changes] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[ret] [ literal[string] ]:
identifier[ret] [ literal[string] ][ literal[string] ]={}
keyword[for] identifier[user] keyword[in] identifier[changes] :
identifier[user_name] = identifier[get_name] ( identifier[principal] = identifier[user] )
keyword[if] identifier[__opts__] [ literal[string] ] keyword[is] keyword[True] :
keyword[if] identifier[user] keyword[not] keyword[in] identifier[ret] [ literal[string] ][ literal[string] ]:
identifier[ret] [ literal[string] ][ literal[string] ][ identifier[user] ]={}
identifier[ret] [ literal[string] ][ literal[string] ][ identifier[user] ][ identifier[access_mode] ]= identifier[changes] [ identifier[user] ][ identifier[access_mode] ]
keyword[else] :
identifier[applies_to] = keyword[None]
keyword[if] literal[string] keyword[not] keyword[in] identifier[changes] [ identifier[user] ]:
keyword[if] identifier[user_name] keyword[in] identifier[cur_perms] [ literal[string] ] keyword[and] identifier[access_mode] keyword[in] identifier[cur_perms] [ literal[string] ][ identifier[user_name] ]:
keyword[for] identifier[flag] keyword[in] identifier[flags] (). identifier[ace_prop] [ identifier[obj_type] ]:
keyword[if] identifier[flags] (). identifier[ace_prop] [ identifier[obj_type] ][ identifier[flag] ]== identifier[cur_perms] [ literal[string] ][ identifier[user_name] ][ identifier[access_mode] ][ literal[string] ]:
identifier[at_flag] = identifier[flag]
keyword[for] identifier[flag1] keyword[in] identifier[flags] (). identifier[ace_prop] [ identifier[obj_type] ]:
keyword[if] identifier[salt] . identifier[utils] . identifier[win_dacl] . identifier[flags] (). identifier[ace_prop] [ identifier[obj_type] ][ identifier[flag1] ]== identifier[at_flag] :
identifier[applies_to] = identifier[flag1]
keyword[if] keyword[not] identifier[applies_to] :
keyword[if] identifier[obj_type] . identifier[lower] () keyword[in] [ literal[string] , literal[string] ]:
identifier[applies_to] = literal[string]
keyword[else] :
identifier[applies_to] = literal[string]
keyword[else] :
identifier[applies_to] = identifier[changes] [ identifier[user] ][ literal[string] ]
identifier[perms] =[]
keyword[if] identifier[access_mode] keyword[not] keyword[in] identifier[changes] [ identifier[user] ]:
keyword[for] identifier[perm] keyword[in] identifier[cur_perms] [ literal[string] ][ identifier[user_name] ][ identifier[access_mode] ][ literal[string] ]:
keyword[for] identifier[flag] keyword[in] identifier[flags] (). identifier[ace_perms] [ identifier[obj_type] ][ literal[string] ]:
keyword[if] identifier[flags] (). identifier[ace_perms] [ identifier[obj_type] ][ literal[string] ][ identifier[flag] ]== identifier[perm] :
identifier[perm_flag] = identifier[flag]
keyword[for] identifier[flag1] keyword[in] identifier[flags] (). identifier[ace_perms] [ identifier[obj_type] ][ literal[string] ]:
keyword[if] identifier[flags] (). identifier[ace_perms] [ identifier[obj_type] ][ literal[string] ][ identifier[flag1] ]== identifier[perm_flag] :
identifier[perms] = identifier[flag1]
keyword[if] keyword[not] identifier[perms] :
keyword[for] identifier[perm] keyword[in] identifier[cur_perms] [ literal[string] ][ identifier[user_name] ][ identifier[access_mode] ][ literal[string] ]:
keyword[for] identifier[flag] keyword[in] identifier[flags] (). identifier[ace_perms] [ identifier[obj_type] ][ literal[string] ]:
keyword[if] identifier[flags] (). identifier[ace_perms] [ identifier[obj_type] ][ literal[string] ][ identifier[flag] ]== identifier[perm] :
identifier[perm_flag] = identifier[flag]
keyword[for] identifier[flag1] keyword[in] identifier[flags] (). identifier[ace_perms] [ identifier[obj_type] ][ literal[string] ]:
keyword[if] identifier[flags] (). identifier[ace_perms] [ identifier[obj_type] ][ literal[string] ][ identifier[flag1] ]== identifier[perm_flag] :
identifier[perms] . identifier[append] ( identifier[flag1] )
keyword[else] :
identifier[perms] = identifier[changes] [ identifier[user] ][ identifier[access_mode] ]
keyword[try] :
identifier[set_permissions] (
identifier[obj_name] = identifier[obj_name] ,
identifier[principal] = identifier[user_name] ,
identifier[permissions] = identifier[perms] ,
identifier[access_mode] = identifier[access_mode] ,
identifier[applies_to] = identifier[applies_to] ,
identifier[obj_type] = identifier[obj_type] )
keyword[if] identifier[user] keyword[not] keyword[in] identifier[ret] [ literal[string] ][ literal[string] ]:
identifier[ret] [ literal[string] ][ literal[string] ][ identifier[user] ]={}
identifier[ret] [ literal[string] ][ literal[string] ][ identifier[user] ][ identifier[access_mode] ]= identifier[changes] [ identifier[user] ][ identifier[access_mode] ]
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[exc] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]. identifier[append] (
literal[string]
literal[string] . identifier[format] ( identifier[access_mode] , identifier[user] , identifier[changes] [ identifier[user] ], identifier[exc] . identifier[strerror] ))
keyword[return] identifier[ret] | def _check_perms(obj_name, obj_type, new_perms, cur_perms, access_mode, ret):
"""
Helper function used by ``check_perms`` for checking and setting Grant and
Deny permissions.
Args:
obj_name (str):
The name or full path to the object
obj_type (Optional[str]):
The type of object for which to check permissions. Default is 'file'
new_perms (dict):
A dictionary containing the user/group and the basic permissions to
check/grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
cur_perms (dict):
A dictionary containing the user/group permissions as they currently
exists on the target object.
access_mode (str):
The access mode to set. Either ``grant`` or ``deny``
ret (dict):
A dictionary to append changes to and return. If not passed, will
create a new dictionary to return.
Returns:
dict: A dictionary of return data as expected by the state system
"""
access_mode = access_mode.lower()
changes = {}
for user in new_perms:
applies_to_text = ''
# Check that user exists:
try:
user_name = get_name(principal=user) # depends on [control=['try'], data=[]]
except CommandExecutionError:
ret['comment'].append('{0} Perms: User "{1}" missing from Target System'.format(access_mode.capitalize(), user))
continue # depends on [control=['except'], data=[]]
# Get the proper applies_to text
if 'applies_to' in new_perms[user]:
applies_to = new_perms[user]['applies_to']
at_flag = flags().ace_prop['file'][applies_to]
applies_to_text = flags().ace_prop['file'][at_flag] # depends on [control=['if'], data=[]]
else:
applies_to = None
if user_name not in cur_perms['Not Inherited']:
if user not in changes:
changes[user] = {} # depends on [control=['if'], data=['user', 'changes']]
changes[user][access_mode] = new_perms[user]['perms']
if applies_to:
changes[user]['applies_to'] = applies_to # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# Check Perms for basic perms
if isinstance(new_perms[user]['perms'], six.string_types):
if not has_permission(obj_name=obj_name, principal=user_name, permission=new_perms[user]['perms'], access_mode=access_mode, obj_type=obj_type, exact=False):
if user not in changes:
changes[user] = {} # depends on [control=['if'], data=['user', 'changes']]
changes[user][access_mode] = new_perms[user]['perms'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# Check Perms for advanced perms
for perm in new_perms[user]['perms']:
if not has_permission(obj_name=obj_name, principal=user_name, permission=perm, access_mode=access_mode, obj_type=obj_type, exact=False):
if user not in changes:
changes[user] = {access_mode: []} # depends on [control=['if'], data=['user', 'changes']]
changes[user][access_mode].append(perm) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['perm']]
# Check if applies_to was passed
if applies_to:
# Is there a deny/grant permission set
if access_mode in cur_perms['Not Inherited'][user_name]:
# If the applies to settings are different, use the new one
if not cur_perms['Not Inherited'][user_name][access_mode]['applies to'] == applies_to_text:
if user not in changes:
changes[user] = {} # depends on [control=['if'], data=['user', 'changes']]
changes[user]['applies_to'] = applies_to # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['access_mode']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['user']]
if changes:
if 'perms' not in ret['changes']:
ret['changes']['perms'] = {} # depends on [control=['if'], data=[]]
for user in changes:
user_name = get_name(principal=user)
if __opts__['test'] is True:
if user not in ret['changes']['perms']:
ret['changes']['perms'][user] = {} # depends on [control=['if'], data=['user']]
ret['changes']['perms'][user][access_mode] = changes[user][access_mode] # depends on [control=['if'], data=[]]
else:
# Get applies_to
applies_to = None
if 'applies_to' not in changes[user]:
# Get current "applies to" settings from the file
if user_name in cur_perms['Not Inherited'] and access_mode in cur_perms['Not Inherited'][user_name]:
for flag in flags().ace_prop[obj_type]:
if flags().ace_prop[obj_type][flag] == cur_perms['Not Inherited'][user_name][access_mode]['applies to']:
at_flag = flag
for flag1 in flags().ace_prop[obj_type]:
if salt.utils.win_dacl.flags().ace_prop[obj_type][flag1] == at_flag:
applies_to = flag1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['flag1']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['flag']] # depends on [control=['if'], data=[]]
if not applies_to:
if obj_type.lower() in ['registry', 'registry32']:
applies_to = 'this_key_subkeys' # depends on [control=['if'], data=[]]
else:
applies_to = 'this_folder_subfolders_files' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
applies_to = changes[user]['applies_to']
perms = []
if access_mode not in changes[user]:
# Get current perms
# Check for basic perms
for perm in cur_perms['Not Inherited'][user_name][access_mode]['permissions']:
for flag in flags().ace_perms[obj_type]['basic']:
if flags().ace_perms[obj_type]['basic'][flag] == perm:
perm_flag = flag
for flag1 in flags().ace_perms[obj_type]['basic']:
if flags().ace_perms[obj_type]['basic'][flag1] == perm_flag:
perms = flag1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['flag1']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['flag']] # depends on [control=['for'], data=['perm']]
# Make a list of advanced perms
if not perms:
for perm in cur_perms['Not Inherited'][user_name][access_mode]['permissions']:
for flag in flags().ace_perms[obj_type]['advanced']:
if flags().ace_perms[obj_type]['advanced'][flag] == perm:
perm_flag = flag
for flag1 in flags().ace_perms[obj_type]['advanced']:
if flags().ace_perms[obj_type]['advanced'][flag1] == perm_flag:
perms.append(flag1) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['flag1']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['flag']] # depends on [control=['for'], data=['perm']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['access_mode']]
else:
perms = changes[user][access_mode]
try:
set_permissions(obj_name=obj_name, principal=user_name, permissions=perms, access_mode=access_mode, applies_to=applies_to, obj_type=obj_type)
if user not in ret['changes']['perms']:
ret['changes']['perms'][user] = {} # depends on [control=['if'], data=['user']]
ret['changes']['perms'][user][access_mode] = changes[user][access_mode] # depends on [control=['try'], data=[]]
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'].append('Failed to change {0} permissions for "{1}" to {2}\nError: {3}'.format(access_mode, user, changes[user], exc.strerror)) # depends on [control=['except'], data=['exc']] # depends on [control=['for'], data=['user']] # depends on [control=['if'], data=[]]
return ret |
def add_member(self, login, team):
"""Add ``login`` to ``team`` and thereby to this organization.
.. warning::
This method is no longer valid. To add a member to a team, you
must now retrieve the team directly, and use the ``invite``
method.
Any user that is to be added to an organization, must be added
to a team as per the GitHub api.
.. note::
This method is of complexity O(n). This iterates over all teams in
your organization and only adds the user when the team name
matches the team parameter above. If you want constant time, you
should retrieve the team and call ``add_member`` on that team
directly.
:param str login: (required), login name of the user to be added
:param str team: (required), team name
:returns: bool
"""
warnings.warn(
'This is no longer supported by the GitHub API, see '
'https://developer.github.com/changes/2014-09-23-one-more-week'
'-before-the-add-team-member-api-breaking-change/',
DeprecationWarning)
for t in self.iter_teams():
if team == t.name:
return t.add_member(login)
return False | def function[add_member, parameter[self, login, team]]:
constant[Add ``login`` to ``team`` and thereby to this organization.
.. warning::
This method is no longer valid. To add a member to a team, you
must now retrieve the team directly, and use the ``invite``
method.
Any user that is to be added to an organization, must be added
to a team as per the GitHub api.
.. note::
This method is of complexity O(n). This iterates over all teams in
your organization and only adds the user when the team name
matches the team parameter above. If you want constant time, you
should retrieve the team and call ``add_member`` on that team
directly.
:param str login: (required), login name of the user to be added
:param str team: (required), team name
:returns: bool
]
call[name[warnings].warn, parameter[constant[This is no longer supported by the GitHub API, see https://developer.github.com/changes/2014-09-23-one-more-week-before-the-add-team-member-api-breaking-change/], name[DeprecationWarning]]]
for taget[name[t]] in starred[call[name[self].iter_teams, parameter[]]] begin[:]
if compare[name[team] equal[==] name[t].name] begin[:]
return[call[name[t].add_member, parameter[name[login]]]]
return[constant[False]] | keyword[def] identifier[add_member] ( identifier[self] , identifier[login] , identifier[team] ):
literal[string]
identifier[warnings] . identifier[warn] (
literal[string]
literal[string]
literal[string] ,
identifier[DeprecationWarning] )
keyword[for] identifier[t] keyword[in] identifier[self] . identifier[iter_teams] ():
keyword[if] identifier[team] == identifier[t] . identifier[name] :
keyword[return] identifier[t] . identifier[add_member] ( identifier[login] )
keyword[return] keyword[False] | def add_member(self, login, team):
"""Add ``login`` to ``team`` and thereby to this organization.
.. warning::
This method is no longer valid. To add a member to a team, you
must now retrieve the team directly, and use the ``invite``
method.
Any user that is to be added to an organization, must be added
to a team as per the GitHub api.
.. note::
This method is of complexity O(n). This iterates over all teams in
your organization and only adds the user when the team name
matches the team parameter above. If you want constant time, you
should retrieve the team and call ``add_member`` on that team
directly.
:param str login: (required), login name of the user to be added
:param str team: (required), team name
:returns: bool
"""
warnings.warn('This is no longer supported by the GitHub API, see https://developer.github.com/changes/2014-09-23-one-more-week-before-the-add-team-member-api-breaking-change/', DeprecationWarning)
for t in self.iter_teams():
if team == t.name:
return t.add_member(login) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']]
return False |
def add_tag(context, id, name):
"""add_tag(context, id, name)
Attach a tag to a job.
>>> dcictl job-add-tag [OPTIONS]
:param string id: ID of the job to attach the tag on [required]
:param string tag_name: name of the tag to be attached [required]
"""
result = job.add_tag(context, id=id, name=name)
utils.format_output(result, context.format) | def function[add_tag, parameter[context, id, name]]:
constant[add_tag(context, id, name)
Attach a tag to a job.
>>> dcictl job-add-tag [OPTIONS]
:param string id: ID of the job to attach the tag on [required]
:param string tag_name: name of the tag to be attached [required]
]
variable[result] assign[=] call[name[job].add_tag, parameter[name[context]]]
call[name[utils].format_output, parameter[name[result], name[context].format]] | keyword[def] identifier[add_tag] ( identifier[context] , identifier[id] , identifier[name] ):
literal[string]
identifier[result] = identifier[job] . identifier[add_tag] ( identifier[context] , identifier[id] = identifier[id] , identifier[name] = identifier[name] )
identifier[utils] . identifier[format_output] ( identifier[result] , identifier[context] . identifier[format] ) | def add_tag(context, id, name):
"""add_tag(context, id, name)
Attach a tag to a job.
>>> dcictl job-add-tag [OPTIONS]
:param string id: ID of the job to attach the tag on [required]
:param string tag_name: name of the tag to be attached [required]
"""
result = job.add_tag(context, id=id, name=name)
utils.format_output(result, context.format) |
def clear(self, exclude=None):
"""
Clear build output dir
:type exclude: list|None
"""
exclude = exclude or []
for root, dirs, files in os.walk(self.config.output_dir):
for f in files:
if f not in exclude:
os.unlink(os.path.join(root, f))
for d in dirs:
if d not in exclude:
shutil.rmtree(os.path.join(root, d)) | def function[clear, parameter[self, exclude]]:
constant[
Clear build output dir
:type exclude: list|None
]
variable[exclude] assign[=] <ast.BoolOp object at 0x7da1afea41c0>
for taget[tuple[[<ast.Name object at 0x7da1afea7c70>, <ast.Name object at 0x7da1afea7d60>, <ast.Name object at 0x7da1afea7f10>]]] in starred[call[name[os].walk, parameter[name[self].config.output_dir]]] begin[:]
for taget[name[f]] in starred[name[files]] begin[:]
if compare[name[f] <ast.NotIn object at 0x7da2590d7190> name[exclude]] begin[:]
call[name[os].unlink, parameter[call[name[os].path.join, parameter[name[root], name[f]]]]]
for taget[name[d]] in starred[name[dirs]] begin[:]
if compare[name[d] <ast.NotIn object at 0x7da2590d7190> name[exclude]] begin[:]
call[name[shutil].rmtree, parameter[call[name[os].path.join, parameter[name[root], name[d]]]]] | keyword[def] identifier[clear] ( identifier[self] , identifier[exclude] = keyword[None] ):
literal[string]
identifier[exclude] = identifier[exclude] keyword[or] []
keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[self] . identifier[config] . identifier[output_dir] ):
keyword[for] identifier[f] keyword[in] identifier[files] :
keyword[if] identifier[f] keyword[not] keyword[in] identifier[exclude] :
identifier[os] . identifier[unlink] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[f] ))
keyword[for] identifier[d] keyword[in] identifier[dirs] :
keyword[if] identifier[d] keyword[not] keyword[in] identifier[exclude] :
identifier[shutil] . identifier[rmtree] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[d] )) | def clear(self, exclude=None):
"""
Clear build output dir
:type exclude: list|None
"""
exclude = exclude or []
for (root, dirs, files) in os.walk(self.config.output_dir):
for f in files:
if f not in exclude:
os.unlink(os.path.join(root, f)) # depends on [control=['if'], data=['f']] # depends on [control=['for'], data=['f']]
for d in dirs:
if d not in exclude:
shutil.rmtree(os.path.join(root, d)) # depends on [control=['if'], data=['d']] # depends on [control=['for'], data=['d']] # depends on [control=['for'], data=[]] |
def _get_fault_type_dummy_variables(self, rup):
"""
Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal.
Note that the 'Unspecified' case is not considered,
because rake is always given.
"""
U, SS, NS, RS = 0, 0, 0, 0
if np.abs(rup.rake) <= 30.0 or (180.0 - np.abs(rup.rake)) <= 30.0:
# strike-slip
SS = 1
elif rup.rake > 30.0 and rup.rake < 150.0:
# reverse
RS = 1
else:
# normal
NS = 1
return U, SS, NS, RS | def function[_get_fault_type_dummy_variables, parameter[self, rup]]:
constant[
Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal.
Note that the 'Unspecified' case is not considered,
because rake is always given.
]
<ast.Tuple object at 0x7da18bcca470> assign[=] tuple[[<ast.Constant object at 0x7da18bccb250>, <ast.Constant object at 0x7da18bccac80>, <ast.Constant object at 0x7da18bccbf70>, <ast.Constant object at 0x7da18bcc9b10>]]
if <ast.BoolOp object at 0x7da18bcc9f00> begin[:]
variable[SS] assign[=] constant[1]
return[tuple[[<ast.Name object at 0x7da18bcc8310>, <ast.Name object at 0x7da18bcc90f0>, <ast.Name object at 0x7da18bccb910>, <ast.Name object at 0x7da18bcc8d30>]]] | keyword[def] identifier[_get_fault_type_dummy_variables] ( identifier[self] , identifier[rup] ):
literal[string]
identifier[U] , identifier[SS] , identifier[NS] , identifier[RS] = literal[int] , literal[int] , literal[int] , literal[int]
keyword[if] identifier[np] . identifier[abs] ( identifier[rup] . identifier[rake] )<= literal[int] keyword[or] ( literal[int] - identifier[np] . identifier[abs] ( identifier[rup] . identifier[rake] ))<= literal[int] :
identifier[SS] = literal[int]
keyword[elif] identifier[rup] . identifier[rake] > literal[int] keyword[and] identifier[rup] . identifier[rake] < literal[int] :
identifier[RS] = literal[int]
keyword[else] :
identifier[NS] = literal[int]
keyword[return] identifier[U] , identifier[SS] , identifier[NS] , identifier[RS] | def _get_fault_type_dummy_variables(self, rup):
"""
Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal.
Note that the 'Unspecified' case is not considered,
because rake is always given.
"""
(U, SS, NS, RS) = (0, 0, 0, 0)
if np.abs(rup.rake) <= 30.0 or 180.0 - np.abs(rup.rake) <= 30.0:
# strike-slip
SS = 1 # depends on [control=['if'], data=[]]
elif rup.rake > 30.0 and rup.rake < 150.0:
# reverse
RS = 1 # depends on [control=['if'], data=[]]
else:
# normal
NS = 1
return (U, SS, NS, RS) |
def fuzzyfinder(input, collection, accessor=lambda x: x, sort_results=True):
"""
Args:
input (str): A partial string which is typically entered by a user.
collection (iterable): A collection of strings which will be filtered
based on the `input`.
accessor (function): If the `collection` is not an iterable of strings,
then use the accessor to fetch the string that
will be used for fuzzy matching.
sort_results(bool): The suggestions are sorted by considering the
smallest contiguous match, followed by where the
match is found in the full string. If two suggestions
have the same rank, they are then sorted
alpha-numerically. This parameter controls the
*last tie-breaker-alpha-numeric sorting*. The sorting
based on match length and position will be intact.
Returns:
suggestions (generator): A generator object that produces a list of
suggestions narrowed down from `collection` using the `input`.
"""
suggestions = []
input = str(input) if not isinstance(input, str) else input
pat = '.*?'.join(map(re.escape, input))
pat = '(?=({0}))'.format(pat) # lookahead regex to manage overlapping matches
regex = re.compile(pat, re.IGNORECASE)
for item in collection:
r = list(regex.finditer(accessor(item)))
if r:
best = min(r, key=lambda x: len(x.group(1))) # find shortest match
suggestions.append((len(best.group(1)), best.start(), accessor(item), item))
if sort_results:
return (z[-1] for z in sorted(suggestions))
else:
return (z[-1] for z in sorted(suggestions, key=lambda x: x[:2])) | def function[fuzzyfinder, parameter[input, collection, accessor, sort_results]]:
constant[
Args:
input (str): A partial string which is typically entered by a user.
collection (iterable): A collection of strings which will be filtered
based on the `input`.
accessor (function): If the `collection` is not an iterable of strings,
then use the accessor to fetch the string that
will be used for fuzzy matching.
sort_results(bool): The suggestions are sorted by considering the
smallest contiguous match, followed by where the
match is found in the full string. If two suggestions
have the same rank, they are then sorted
alpha-numerically. This parameter controls the
*last tie-breaker-alpha-numeric sorting*. The sorting
based on match length and position will be intact.
Returns:
suggestions (generator): A generator object that produces a list of
suggestions narrowed down from `collection` using the `input`.
]
variable[suggestions] assign[=] list[[]]
variable[input] assign[=] <ast.IfExp object at 0x7da1b0004880>
variable[pat] assign[=] call[constant[.*?].join, parameter[call[name[map], parameter[name[re].escape, name[input]]]]]
variable[pat] assign[=] call[constant[(?=({0}))].format, parameter[name[pat]]]
variable[regex] assign[=] call[name[re].compile, parameter[name[pat], name[re].IGNORECASE]]
for taget[name[item]] in starred[name[collection]] begin[:]
variable[r] assign[=] call[name[list], parameter[call[name[regex].finditer, parameter[call[name[accessor], parameter[name[item]]]]]]]
if name[r] begin[:]
variable[best] assign[=] call[name[min], parameter[name[r]]]
call[name[suggestions].append, parameter[tuple[[<ast.Call object at 0x7da1affc08b0>, <ast.Call object at 0x7da1affc1870>, <ast.Call object at 0x7da1affc1fc0>, <ast.Name object at 0x7da1affc27a0>]]]]
if name[sort_results] begin[:]
return[<ast.GeneratorExp object at 0x7da1affc3d90>] | keyword[def] identifier[fuzzyfinder] ( identifier[input] , identifier[collection] , identifier[accessor] = keyword[lambda] identifier[x] : identifier[x] , identifier[sort_results] = keyword[True] ):
literal[string]
identifier[suggestions] =[]
identifier[input] = identifier[str] ( identifier[input] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[input] , identifier[str] ) keyword[else] identifier[input]
identifier[pat] = literal[string] . identifier[join] ( identifier[map] ( identifier[re] . identifier[escape] , identifier[input] ))
identifier[pat] = literal[string] . identifier[format] ( identifier[pat] )
identifier[regex] = identifier[re] . identifier[compile] ( identifier[pat] , identifier[re] . identifier[IGNORECASE] )
keyword[for] identifier[item] keyword[in] identifier[collection] :
identifier[r] = identifier[list] ( identifier[regex] . identifier[finditer] ( identifier[accessor] ( identifier[item] )))
keyword[if] identifier[r] :
identifier[best] = identifier[min] ( identifier[r] , identifier[key] = keyword[lambda] identifier[x] : identifier[len] ( identifier[x] . identifier[group] ( literal[int] )))
identifier[suggestions] . identifier[append] (( identifier[len] ( identifier[best] . identifier[group] ( literal[int] )), identifier[best] . identifier[start] (), identifier[accessor] ( identifier[item] ), identifier[item] ))
keyword[if] identifier[sort_results] :
keyword[return] ( identifier[z] [- literal[int] ] keyword[for] identifier[z] keyword[in] identifier[sorted] ( identifier[suggestions] ))
keyword[else] :
keyword[return] ( identifier[z] [- literal[int] ] keyword[for] identifier[z] keyword[in] identifier[sorted] ( identifier[suggestions] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] [: literal[int] ])) | def fuzzyfinder(input, collection, accessor=lambda x: x, sort_results=True):
"""
Args:
input (str): A partial string which is typically entered by a user.
collection (iterable): A collection of strings which will be filtered
based on the `input`.
accessor (function): If the `collection` is not an iterable of strings,
then use the accessor to fetch the string that
will be used for fuzzy matching.
sort_results(bool): The suggestions are sorted by considering the
smallest contiguous match, followed by where the
match is found in the full string. If two suggestions
have the same rank, they are then sorted
alpha-numerically. This parameter controls the
*last tie-breaker-alpha-numeric sorting*. The sorting
based on match length and position will be intact.
Returns:
suggestions (generator): A generator object that produces a list of
suggestions narrowed down from `collection` using the `input`.
"""
suggestions = []
input = str(input) if not isinstance(input, str) else input
pat = '.*?'.join(map(re.escape, input))
pat = '(?=({0}))'.format(pat) # lookahead regex to manage overlapping matches
regex = re.compile(pat, re.IGNORECASE)
for item in collection:
r = list(regex.finditer(accessor(item)))
if r:
best = min(r, key=lambda x: len(x.group(1))) # find shortest match
suggestions.append((len(best.group(1)), best.start(), accessor(item), item)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
if sort_results:
return (z[-1] for z in sorted(suggestions)) # depends on [control=['if'], data=[]]
else:
return (z[-1] for z in sorted(suggestions, key=lambda x: x[:2])) |
def __run(db_name,argv):
"""
Runs the Database DS as a standalone database. Run it with::
./DataBaseds pydb-test -ORBendPoint giop:tcp::11000
"""
tango.Util.set_use_db(False)
util = tango.Util(argv)
__monkey_patch_util(util)
__monkey_patch_database_class()
dbi = DbInter()
util.set_interceptors(dbi)
def post_init_cb():
logging.debug("post_init_cb()")
util = tango.Util.instance()
dserver = util.get_dserver_device()
dserver_name = dserver.get_name()
dserver_ior = util.get_dserver_ior(dserver)
dbase = util.get_device_by_name(db_name)
dbase_name = dbase.get_name()
dbase_ior = util.get_device_ior(dbase)
host = util.get_host_name()
pid = util.get_pid_str()
version = util.get_version_str()
DbExportDevice(dbase, [dserver_name, dserver_ior, host, pid, version])
DbExportDevice(dbase, [dbase_name, dbase_ior, host, pid, version])
run((DataBase,), args=argv, util=util, post_init_callback=post_init_cb,
green_mode=GreenMode.Gevent, verbose=True) | def function[__run, parameter[db_name, argv]]:
constant[
Runs the Database DS as a standalone database. Run it with::
./DataBaseds pydb-test -ORBendPoint giop:tcp::11000
]
call[name[tango].Util.set_use_db, parameter[constant[False]]]
variable[util] assign[=] call[name[tango].Util, parameter[name[argv]]]
call[name[__monkey_patch_util], parameter[name[util]]]
call[name[__monkey_patch_database_class], parameter[]]
variable[dbi] assign[=] call[name[DbInter], parameter[]]
call[name[util].set_interceptors, parameter[name[dbi]]]
def function[post_init_cb, parameter[]]:
call[name[logging].debug, parameter[constant[post_init_cb()]]]
variable[util] assign[=] call[name[tango].Util.instance, parameter[]]
variable[dserver] assign[=] call[name[util].get_dserver_device, parameter[]]
variable[dserver_name] assign[=] call[name[dserver].get_name, parameter[]]
variable[dserver_ior] assign[=] call[name[util].get_dserver_ior, parameter[name[dserver]]]
variable[dbase] assign[=] call[name[util].get_device_by_name, parameter[name[db_name]]]
variable[dbase_name] assign[=] call[name[dbase].get_name, parameter[]]
variable[dbase_ior] assign[=] call[name[util].get_device_ior, parameter[name[dbase]]]
variable[host] assign[=] call[name[util].get_host_name, parameter[]]
variable[pid] assign[=] call[name[util].get_pid_str, parameter[]]
variable[version] assign[=] call[name[util].get_version_str, parameter[]]
call[name[DbExportDevice], parameter[name[dbase], list[[<ast.Name object at 0x7da2041da020>, <ast.Name object at 0x7da2041dbd90>, <ast.Name object at 0x7da2041db040>, <ast.Name object at 0x7da2041dbc40>, <ast.Name object at 0x7da2041da560>]]]]
call[name[DbExportDevice], parameter[name[dbase], list[[<ast.Name object at 0x7da2041da5f0>, <ast.Name object at 0x7da2041da650>, <ast.Name object at 0x7da2041dba90>, <ast.Name object at 0x7da2041d85e0>, <ast.Name object at 0x7da2041d98d0>]]]]
call[name[run], parameter[tuple[[<ast.Name object at 0x7da2041db6d0>]]]] | keyword[def] identifier[__run] ( identifier[db_name] , identifier[argv] ):
literal[string]
identifier[tango] . identifier[Util] . identifier[set_use_db] ( keyword[False] )
identifier[util] = identifier[tango] . identifier[Util] ( identifier[argv] )
identifier[__monkey_patch_util] ( identifier[util] )
identifier[__monkey_patch_database_class] ()
identifier[dbi] = identifier[DbInter] ()
identifier[util] . identifier[set_interceptors] ( identifier[dbi] )
keyword[def] identifier[post_init_cb] ():
identifier[logging] . identifier[debug] ( literal[string] )
identifier[util] = identifier[tango] . identifier[Util] . identifier[instance] ()
identifier[dserver] = identifier[util] . identifier[get_dserver_device] ()
identifier[dserver_name] = identifier[dserver] . identifier[get_name] ()
identifier[dserver_ior] = identifier[util] . identifier[get_dserver_ior] ( identifier[dserver] )
identifier[dbase] = identifier[util] . identifier[get_device_by_name] ( identifier[db_name] )
identifier[dbase_name] = identifier[dbase] . identifier[get_name] ()
identifier[dbase_ior] = identifier[util] . identifier[get_device_ior] ( identifier[dbase] )
identifier[host] = identifier[util] . identifier[get_host_name] ()
identifier[pid] = identifier[util] . identifier[get_pid_str] ()
identifier[version] = identifier[util] . identifier[get_version_str] ()
identifier[DbExportDevice] ( identifier[dbase] ,[ identifier[dserver_name] , identifier[dserver_ior] , identifier[host] , identifier[pid] , identifier[version] ])
identifier[DbExportDevice] ( identifier[dbase] ,[ identifier[dbase_name] , identifier[dbase_ior] , identifier[host] , identifier[pid] , identifier[version] ])
identifier[run] (( identifier[DataBase] ,), identifier[args] = identifier[argv] , identifier[util] = identifier[util] , identifier[post_init_callback] = identifier[post_init_cb] ,
identifier[green_mode] = identifier[GreenMode] . identifier[Gevent] , identifier[verbose] = keyword[True] ) | def __run(db_name, argv):
"""
Runs the Database DS as a standalone database. Run it with::
./DataBaseds pydb-test -ORBendPoint giop:tcp::11000
"""
tango.Util.set_use_db(False)
util = tango.Util(argv)
__monkey_patch_util(util)
__monkey_patch_database_class()
dbi = DbInter()
util.set_interceptors(dbi)
def post_init_cb():
logging.debug('post_init_cb()')
util = tango.Util.instance()
dserver = util.get_dserver_device()
dserver_name = dserver.get_name()
dserver_ior = util.get_dserver_ior(dserver)
dbase = util.get_device_by_name(db_name)
dbase_name = dbase.get_name()
dbase_ior = util.get_device_ior(dbase)
host = util.get_host_name()
pid = util.get_pid_str()
version = util.get_version_str()
DbExportDevice(dbase, [dserver_name, dserver_ior, host, pid, version])
DbExportDevice(dbase, [dbase_name, dbase_ior, host, pid, version])
run((DataBase,), args=argv, util=util, post_init_callback=post_init_cb, green_mode=GreenMode.Gevent, verbose=True) |
def GET_utxos_insight( self, path_info, address ):
"""
Handle GET /insight-api/addr/:address/utxo
NOTE: this is not compatible with the Bitcore Insight API method of the same name
"""
if not BLOCKSTACK_TEST:
return self._send_headers(status_code=404, content_type='text/plain')
if not check_address(address):
return self._reply_json({'error': 'Invalid address'}, status_code=400)
bitcoind_opts = get_bitcoin_opts()
bitcoind_host = bitcoind_opts['bitcoind_server']
bitcoind_port = bitcoind_opts['bitcoind_port']
bitcoind_user = bitcoind_opts['bitcoind_user']
bitcoind_passwd = bitcoind_opts['bitcoind_passwd']
bitcoind = create_bitcoind_service_proxy(bitcoind_user, bitcoind_passwd, server=bitcoind_host, port=bitcoind_port)
address = virtualchain.address_reencode(address)
utxos = get_unspents(address, bitcoind)
return self._reply_json(utxos) | def function[GET_utxos_insight, parameter[self, path_info, address]]:
constant[
Handle GET /insight-api/addr/:address/utxo
NOTE: this is not compatible with the Bitcore Insight API method of the same name
]
if <ast.UnaryOp object at 0x7da18f09fbe0> begin[:]
return[call[name[self]._send_headers, parameter[]]]
if <ast.UnaryOp object at 0x7da18f09d330> begin[:]
return[call[name[self]._reply_json, parameter[dictionary[[<ast.Constant object at 0x7da18f09dbd0>], [<ast.Constant object at 0x7da18f09e860>]]]]]
variable[bitcoind_opts] assign[=] call[name[get_bitcoin_opts], parameter[]]
variable[bitcoind_host] assign[=] call[name[bitcoind_opts]][constant[bitcoind_server]]
variable[bitcoind_port] assign[=] call[name[bitcoind_opts]][constant[bitcoind_port]]
variable[bitcoind_user] assign[=] call[name[bitcoind_opts]][constant[bitcoind_user]]
variable[bitcoind_passwd] assign[=] call[name[bitcoind_opts]][constant[bitcoind_passwd]]
variable[bitcoind] assign[=] call[name[create_bitcoind_service_proxy], parameter[name[bitcoind_user], name[bitcoind_passwd]]]
variable[address] assign[=] call[name[virtualchain].address_reencode, parameter[name[address]]]
variable[utxos] assign[=] call[name[get_unspents], parameter[name[address], name[bitcoind]]]
return[call[name[self]._reply_json, parameter[name[utxos]]]] | keyword[def] identifier[GET_utxos_insight] ( identifier[self] , identifier[path_info] , identifier[address] ):
literal[string]
keyword[if] keyword[not] identifier[BLOCKSTACK_TEST] :
keyword[return] identifier[self] . identifier[_send_headers] ( identifier[status_code] = literal[int] , identifier[content_type] = literal[string] )
keyword[if] keyword[not] identifier[check_address] ( identifier[address] ):
keyword[return] identifier[self] . identifier[_reply_json] ({ literal[string] : literal[string] }, identifier[status_code] = literal[int] )
identifier[bitcoind_opts] = identifier[get_bitcoin_opts] ()
identifier[bitcoind_host] = identifier[bitcoind_opts] [ literal[string] ]
identifier[bitcoind_port] = identifier[bitcoind_opts] [ literal[string] ]
identifier[bitcoind_user] = identifier[bitcoind_opts] [ literal[string] ]
identifier[bitcoind_passwd] = identifier[bitcoind_opts] [ literal[string] ]
identifier[bitcoind] = identifier[create_bitcoind_service_proxy] ( identifier[bitcoind_user] , identifier[bitcoind_passwd] , identifier[server] = identifier[bitcoind_host] , identifier[port] = identifier[bitcoind_port] )
identifier[address] = identifier[virtualchain] . identifier[address_reencode] ( identifier[address] )
identifier[utxos] = identifier[get_unspents] ( identifier[address] , identifier[bitcoind] )
keyword[return] identifier[self] . identifier[_reply_json] ( identifier[utxos] ) | def GET_utxos_insight(self, path_info, address):
"""
Handle GET /insight-api/addr/:address/utxo
NOTE: this is not compatible with the Bitcore Insight API method of the same name
"""
if not BLOCKSTACK_TEST:
return self._send_headers(status_code=404, content_type='text/plain') # depends on [control=['if'], data=[]]
if not check_address(address):
return self._reply_json({'error': 'Invalid address'}, status_code=400) # depends on [control=['if'], data=[]]
bitcoind_opts = get_bitcoin_opts()
bitcoind_host = bitcoind_opts['bitcoind_server']
bitcoind_port = bitcoind_opts['bitcoind_port']
bitcoind_user = bitcoind_opts['bitcoind_user']
bitcoind_passwd = bitcoind_opts['bitcoind_passwd']
bitcoind = create_bitcoind_service_proxy(bitcoind_user, bitcoind_passwd, server=bitcoind_host, port=bitcoind_port)
address = virtualchain.address_reencode(address)
utxos = get_unspents(address, bitcoind)
return self._reply_json(utxos) |
def addAlleleOfGene(self, allele_id, gene_id, rel_id=None):
"""
We make the assumption here that if the relationship is not provided,
it is a
GENO:is_allele_of.
Here, the allele should be a variant_locus, not a sequence alteration.
:param allele_id:
:param gene_id:
:param rel_id:
:return:
"""
if rel_id is None:
rel_id = self.globaltt["is_allele_of"]
self.graph.addTriple(allele_id, rel_id, gene_id)
return | def function[addAlleleOfGene, parameter[self, allele_id, gene_id, rel_id]]:
constant[
We make the assumption here that if the relationship is not provided,
it is a
GENO:is_allele_of.
Here, the allele should be a variant_locus, not a sequence alteration.
:param allele_id:
:param gene_id:
:param rel_id:
:return:
]
if compare[name[rel_id] is constant[None]] begin[:]
variable[rel_id] assign[=] call[name[self].globaltt][constant[is_allele_of]]
call[name[self].graph.addTriple, parameter[name[allele_id], name[rel_id], name[gene_id]]]
return[None] | keyword[def] identifier[addAlleleOfGene] ( identifier[self] , identifier[allele_id] , identifier[gene_id] , identifier[rel_id] = keyword[None] ):
literal[string]
keyword[if] identifier[rel_id] keyword[is] keyword[None] :
identifier[rel_id] = identifier[self] . identifier[globaltt] [ literal[string] ]
identifier[self] . identifier[graph] . identifier[addTriple] ( identifier[allele_id] , identifier[rel_id] , identifier[gene_id] )
keyword[return] | def addAlleleOfGene(self, allele_id, gene_id, rel_id=None):
"""
We make the assumption here that if the relationship is not provided,
it is a
GENO:is_allele_of.
Here, the allele should be a variant_locus, not a sequence alteration.
:param allele_id:
:param gene_id:
:param rel_id:
:return:
"""
if rel_id is None:
rel_id = self.globaltt['is_allele_of'] # depends on [control=['if'], data=['rel_id']]
self.graph.addTriple(allele_id, rel_id, gene_id)
return |
def combine_argtype(observations):
"""Combines a list of Tuple types into one.
Basically these are combined element wise into a Union with some
additional unification effort (e.g. can apply PEP 484 style numeric tower).
"""
assert len(observations) > 0
assert is_Tuple(observations[0])
if len(observations) > 1:
prms = [get_Tuple_params(observations[0])]
ln = len(prms[0])
for obs in observations[1:]:
assert is_Tuple(obs)
prms.append(get_Tuple_params(obs))
assert len(prms[-1]) == ln
if simplify:
prms = map(list, zip(*prms))
if not isinstance(prms, list):
# special care for Python 3
prms = list(prms)
for type_list in prms:
simplify_for_Union(type_list)
prms = map(tuple, prms)
else:
prms = map(tuple, zip(*prms))
prms = map(Union.__getitem__, prms)
return Tuple[tuple(prms)]
else:
return observations[0] | def function[combine_argtype, parameter[observations]]:
constant[Combines a list of Tuple types into one.
Basically these are combined element wise into a Union with some
additional unification effort (e.g. can apply PEP 484 style numeric tower).
]
assert[compare[call[name[len], parameter[name[observations]]] greater[>] constant[0]]]
assert[call[name[is_Tuple], parameter[call[name[observations]][constant[0]]]]]
if compare[call[name[len], parameter[name[observations]]] greater[>] constant[1]] begin[:]
variable[prms] assign[=] list[[<ast.Call object at 0x7da1b0dbc2b0>]]
variable[ln] assign[=] call[name[len], parameter[call[name[prms]][constant[0]]]]
for taget[name[obs]] in starred[call[name[observations]][<ast.Slice object at 0x7da1b0dbed40>]] begin[:]
assert[call[name[is_Tuple], parameter[name[obs]]]]
call[name[prms].append, parameter[call[name[get_Tuple_params], parameter[name[obs]]]]]
assert[compare[call[name[len], parameter[call[name[prms]][<ast.UnaryOp object at 0x7da1b0dbcf40>]]] equal[==] name[ln]]]
if name[simplify] begin[:]
variable[prms] assign[=] call[name[map], parameter[name[list], call[name[zip], parameter[<ast.Starred object at 0x7da1b0dbc520>]]]]
if <ast.UnaryOp object at 0x7da1b0dbc280> begin[:]
variable[prms] assign[=] call[name[list], parameter[name[prms]]]
for taget[name[type_list]] in starred[name[prms]] begin[:]
call[name[simplify_for_Union], parameter[name[type_list]]]
variable[prms] assign[=] call[name[map], parameter[name[tuple], name[prms]]]
variable[prms] assign[=] call[name[map], parameter[name[Union].__getitem__, name[prms]]]
return[call[name[Tuple]][call[name[tuple], parameter[name[prms]]]]] | keyword[def] identifier[combine_argtype] ( identifier[observations] ):
literal[string]
keyword[assert] identifier[len] ( identifier[observations] )> literal[int]
keyword[assert] identifier[is_Tuple] ( identifier[observations] [ literal[int] ])
keyword[if] identifier[len] ( identifier[observations] )> literal[int] :
identifier[prms] =[ identifier[get_Tuple_params] ( identifier[observations] [ literal[int] ])]
identifier[ln] = identifier[len] ( identifier[prms] [ literal[int] ])
keyword[for] identifier[obs] keyword[in] identifier[observations] [ literal[int] :]:
keyword[assert] identifier[is_Tuple] ( identifier[obs] )
identifier[prms] . identifier[append] ( identifier[get_Tuple_params] ( identifier[obs] ))
keyword[assert] identifier[len] ( identifier[prms] [- literal[int] ])== identifier[ln]
keyword[if] identifier[simplify] :
identifier[prms] = identifier[map] ( identifier[list] , identifier[zip] (* identifier[prms] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[prms] , identifier[list] ):
identifier[prms] = identifier[list] ( identifier[prms] )
keyword[for] identifier[type_list] keyword[in] identifier[prms] :
identifier[simplify_for_Union] ( identifier[type_list] )
identifier[prms] = identifier[map] ( identifier[tuple] , identifier[prms] )
keyword[else] :
identifier[prms] = identifier[map] ( identifier[tuple] , identifier[zip] (* identifier[prms] ))
identifier[prms] = identifier[map] ( identifier[Union] . identifier[__getitem__] , identifier[prms] )
keyword[return] identifier[Tuple] [ identifier[tuple] ( identifier[prms] )]
keyword[else] :
keyword[return] identifier[observations] [ literal[int] ] | def combine_argtype(observations):
"""Combines a list of Tuple types into one.
Basically these are combined element wise into a Union with some
additional unification effort (e.g. can apply PEP 484 style numeric tower).
"""
assert len(observations) > 0
assert is_Tuple(observations[0])
if len(observations) > 1:
prms = [get_Tuple_params(observations[0])]
ln = len(prms[0])
for obs in observations[1:]:
assert is_Tuple(obs)
prms.append(get_Tuple_params(obs))
assert len(prms[-1]) == ln # depends on [control=['for'], data=['obs']]
if simplify:
prms = map(list, zip(*prms))
if not isinstance(prms, list):
# special care for Python 3
prms = list(prms) # depends on [control=['if'], data=[]]
for type_list in prms:
simplify_for_Union(type_list) # depends on [control=['for'], data=['type_list']]
prms = map(tuple, prms) # depends on [control=['if'], data=[]]
else:
prms = map(tuple, zip(*prms))
prms = map(Union.__getitem__, prms)
return Tuple[tuple(prms)] # depends on [control=['if'], data=[]]
else:
return observations[0] |
def _get_mine(fun):
'''
Return the mine function from all the targeted minions.
Just a small helper to avoid redundant pieces of code.
'''
if fun in _CACHE and _CACHE[fun]:
return _CACHE[fun]
net_runner_opts = _get_net_runner_opts()
_CACHE[fun] = __salt__['mine.get'](net_runner_opts.get('target'),
fun,
tgt_type=net_runner_opts.get('expr_form'))
return _CACHE[fun] | def function[_get_mine, parameter[fun]]:
constant[
Return the mine function from all the targeted minions.
Just a small helper to avoid redundant pieces of code.
]
if <ast.BoolOp object at 0x7da1b21784f0> begin[:]
return[call[name[_CACHE]][name[fun]]]
variable[net_runner_opts] assign[=] call[name[_get_net_runner_opts], parameter[]]
call[name[_CACHE]][name[fun]] assign[=] call[call[name[__salt__]][constant[mine.get]], parameter[call[name[net_runner_opts].get, parameter[constant[target]]], name[fun]]]
return[call[name[_CACHE]][name[fun]]] | keyword[def] identifier[_get_mine] ( identifier[fun] ):
literal[string]
keyword[if] identifier[fun] keyword[in] identifier[_CACHE] keyword[and] identifier[_CACHE] [ identifier[fun] ]:
keyword[return] identifier[_CACHE] [ identifier[fun] ]
identifier[net_runner_opts] = identifier[_get_net_runner_opts] ()
identifier[_CACHE] [ identifier[fun] ]= identifier[__salt__] [ literal[string] ]( identifier[net_runner_opts] . identifier[get] ( literal[string] ),
identifier[fun] ,
identifier[tgt_type] = identifier[net_runner_opts] . identifier[get] ( literal[string] ))
keyword[return] identifier[_CACHE] [ identifier[fun] ] | def _get_mine(fun):
"""
Return the mine function from all the targeted minions.
Just a small helper to avoid redundant pieces of code.
"""
if fun in _CACHE and _CACHE[fun]:
return _CACHE[fun] # depends on [control=['if'], data=[]]
net_runner_opts = _get_net_runner_opts()
_CACHE[fun] = __salt__['mine.get'](net_runner_opts.get('target'), fun, tgt_type=net_runner_opts.get('expr_form'))
return _CACHE[fun] |
def docker_fabric(*args, **kwargs):
"""
:param args: Positional arguments to Docker client.
:param kwargs: Keyword arguments to Docker client.
:return: Docker client.
:rtype: dockerfabric.apiclient.DockerFabricClient | dockerfabric.cli.DockerCliClient
"""
ci = kwargs.get('client_implementation') or env.get('docker_fabric_implementation') or CLIENT_API
if ci == CLIENT_API:
return docker_api(*args, **kwargs)
elif ci == CLIENT_CLI:
return docker_cli(*args, **kwargs)
raise ValueError("Invalid client implementation.", ci) | def function[docker_fabric, parameter[]]:
constant[
:param args: Positional arguments to Docker client.
:param kwargs: Keyword arguments to Docker client.
:return: Docker client.
:rtype: dockerfabric.apiclient.DockerFabricClient | dockerfabric.cli.DockerCliClient
]
variable[ci] assign[=] <ast.BoolOp object at 0x7da20e9612a0>
if compare[name[ci] equal[==] name[CLIENT_API]] begin[:]
return[call[name[docker_api], parameter[<ast.Starred object at 0x7da204623f40>]]]
<ast.Raise object at 0x7da204620190> | keyword[def] identifier[docker_fabric] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[ci] = identifier[kwargs] . identifier[get] ( literal[string] ) keyword[or] identifier[env] . identifier[get] ( literal[string] ) keyword[or] identifier[CLIENT_API]
keyword[if] identifier[ci] == identifier[CLIENT_API] :
keyword[return] identifier[docker_api] (* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[ci] == identifier[CLIENT_CLI] :
keyword[return] identifier[docker_cli] (* identifier[args] ,** identifier[kwargs] )
keyword[raise] identifier[ValueError] ( literal[string] , identifier[ci] ) | def docker_fabric(*args, **kwargs):
"""
:param args: Positional arguments to Docker client.
:param kwargs: Keyword arguments to Docker client.
:return: Docker client.
:rtype: dockerfabric.apiclient.DockerFabricClient | dockerfabric.cli.DockerCliClient
"""
ci = kwargs.get('client_implementation') or env.get('docker_fabric_implementation') or CLIENT_API
if ci == CLIENT_API:
return docker_api(*args, **kwargs) # depends on [control=['if'], data=[]]
elif ci == CLIENT_CLI:
return docker_cli(*args, **kwargs) # depends on [control=['if'], data=[]]
raise ValueError('Invalid client implementation.', ci) |
def unify_string_literals(js_string):
"""this function parses the string just like javascript
for example literal '\d' in JavaScript would be interpreted
as 'd' - backslash would be ignored and in Pyhon this
would be interpreted as '\\d' This function fixes this problem."""
n = 0
res = ''
limit = len(js_string)
while n < limit:
char = js_string[n]
if char == '\\':
new, n = do_escape(js_string, n)
res += new
else:
res += char
n += 1
return res | def function[unify_string_literals, parameter[js_string]]:
constant[this function parses the string just like javascript
for example literal '\d' in JavaScript would be interpreted
as 'd' - backslash would be ignored and in Pyhon this
would be interpreted as '\d' This function fixes this problem.]
variable[n] assign[=] constant[0]
variable[res] assign[=] constant[]
variable[limit] assign[=] call[name[len], parameter[name[js_string]]]
while compare[name[n] less[<] name[limit]] begin[:]
variable[char] assign[=] call[name[js_string]][name[n]]
if compare[name[char] equal[==] constant[\]] begin[:]
<ast.Tuple object at 0x7da18dc9abf0> assign[=] call[name[do_escape], parameter[name[js_string], name[n]]]
<ast.AugAssign object at 0x7da18dc98670>
return[name[res]] | keyword[def] identifier[unify_string_literals] ( identifier[js_string] ):
literal[string]
identifier[n] = literal[int]
identifier[res] = literal[string]
identifier[limit] = identifier[len] ( identifier[js_string] )
keyword[while] identifier[n] < identifier[limit] :
identifier[char] = identifier[js_string] [ identifier[n] ]
keyword[if] identifier[char] == literal[string] :
identifier[new] , identifier[n] = identifier[do_escape] ( identifier[js_string] , identifier[n] )
identifier[res] += identifier[new]
keyword[else] :
identifier[res] += identifier[char]
identifier[n] += literal[int]
keyword[return] identifier[res] | def unify_string_literals(js_string):
"""this function parses the string just like javascript
for example literal '\\d' in JavaScript would be interpreted
as 'd' - backslash would be ignored and in Pyhon this
would be interpreted as '\\d' This function fixes this problem."""
n = 0
res = ''
limit = len(js_string)
while n < limit:
char = js_string[n]
if char == '\\':
(new, n) = do_escape(js_string, n)
res += new # depends on [control=['if'], data=[]]
else:
res += char
n += 1 # depends on [control=['while'], data=['n']]
return res |
def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check iterable
contents less destructively.
"""
it = iter(iterable)
try:
value = next(it)
except StopIteration:
raise too_short or ValueError('too few items in iterable (expected 1)')
try:
next(it)
except StopIteration:
pass
else:
raise too_long or ValueError('too many items in iterable (expected 1)')
return value | def function[one, parameter[iterable, too_short, too_long]]:
constant[Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check iterable
contents less destructively.
]
variable[it] assign[=] call[name[iter], parameter[name[iterable]]]
<ast.Try object at 0x7da1b1da3a60>
<ast.Try object at 0x7da1b1da2350>
return[name[value]] | keyword[def] identifier[one] ( identifier[iterable] , identifier[too_short] = keyword[None] , identifier[too_long] = keyword[None] ):
literal[string]
identifier[it] = identifier[iter] ( identifier[iterable] )
keyword[try] :
identifier[value] = identifier[next] ( identifier[it] )
keyword[except] identifier[StopIteration] :
keyword[raise] identifier[too_short] keyword[or] identifier[ValueError] ( literal[string] )
keyword[try] :
identifier[next] ( identifier[it] )
keyword[except] identifier[StopIteration] :
keyword[pass]
keyword[else] :
keyword[raise] identifier[too_long] keyword[or] identifier[ValueError] ( literal[string] )
keyword[return] identifier[value] | def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check iterable
contents less destructively.
"""
it = iter(iterable)
try:
value = next(it) # depends on [control=['try'], data=[]]
except StopIteration:
raise too_short or ValueError('too few items in iterable (expected 1)') # depends on [control=['except'], data=[]]
try:
next(it) # depends on [control=['try'], data=[]]
except StopIteration:
pass # depends on [control=['except'], data=[]]
else:
raise too_long or ValueError('too many items in iterable (expected 1)')
return value |
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled earlier.
if yielded is None:
return moment
elif isinstance(yielded, (list, dict)):
return multi(yielded)
elif is_future(yielded):
return yielded
elif isawaitable(yielded):
return _wrap_awaitable(yielded)
else:
raise BadYieldError("yielded unknown object %r" % (yielded,)) | def function[convert_yielded, parameter[yielded]]:
constant[Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
]
if compare[name[yielded] is constant[None]] begin[:]
return[name[moment]] | keyword[def] identifier[convert_yielded] ( identifier[yielded] ):
literal[string]
keyword[if] identifier[yielded] keyword[is] keyword[None] :
keyword[return] identifier[moment]
keyword[elif] identifier[isinstance] ( identifier[yielded] ,( identifier[list] , identifier[dict] )):
keyword[return] identifier[multi] ( identifier[yielded] )
keyword[elif] identifier[is_future] ( identifier[yielded] ):
keyword[return] identifier[yielded]
keyword[elif] identifier[isawaitable] ( identifier[yielded] ):
keyword[return] identifier[_wrap_awaitable] ( identifier[yielded] )
keyword[else] :
keyword[raise] identifier[BadYieldError] ( literal[string] %( identifier[yielded] ,)) | def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled earlier.
if yielded is None:
return moment # depends on [control=['if'], data=[]]
elif isinstance(yielded, (list, dict)):
return multi(yielded) # depends on [control=['if'], data=[]]
elif is_future(yielded):
return yielded # depends on [control=['if'], data=[]]
elif isawaitable(yielded):
return _wrap_awaitable(yielded) # depends on [control=['if'], data=[]]
else:
raise BadYieldError('yielded unknown object %r' % (yielded,)) |
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset) | def function[run, parameter[cosmology, zi, Mi, z, com, mah, filename, verbose, retcosmo]]:
constant[ Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
]
if <ast.BoolOp object at 0x7da1b10806a0> begin[:]
call[name[print], parameter[constant[User has to choose com=True and / or mah=True ]]]
return[<ast.UnaryOp object at 0x7da1b1081b70>]
variable[results] assign[=] call[name[_checkinput], parameter[name[zi], name[Mi]]]
if compare[name[results] equal[==] <ast.UnaryOp object at 0x7da1b1081cf0>] begin[:]
return[<ast.UnaryOp object at 0x7da1b1082e60>]
variable[cosmo] assign[=] call[name[getcosmo], parameter[name[cosmology]]]
if name[filename] begin[:]
call[name[print], parameter[binary_operation[constant[Output to file %r] <ast.Mod object at 0x7da2590d6920> name[filename]]]]
variable[fout] assign[=] call[name[open], parameter[name[filename], constant[wb]]]
<ast.Try object at 0x7da1b10820e0>
if name[retcosmo] begin[:]
return[tuple[[<ast.Name object at 0x7da1b26ae9e0>, <ast.Name object at 0x7da1b26aee90>]]] | keyword[def] identifier[run] ( identifier[cosmology] , identifier[zi] = literal[int] , identifier[Mi] = literal[int] , identifier[z] = keyword[False] , identifier[com] = keyword[True] , identifier[mah] = keyword[True] ,
identifier[filename] = keyword[None] , identifier[verbose] = keyword[None] , identifier[retcosmo] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[com] keyword[and] keyword[not] identifier[mah] :
identifier[print] ( literal[string] )
keyword[return] (- literal[int] )
identifier[results] = identifier[_checkinput] ( identifier[zi] , identifier[Mi] , identifier[z] = identifier[z] , identifier[verbose] = identifier[verbose] )
keyword[if] ( identifier[results] ==- literal[int] ):
keyword[return] (- literal[int] )
keyword[else] :
identifier[zi] , identifier[Mi] , identifier[z] , identifier[lenz] , identifier[lenm] , identifier[lenzout] = identifier[results]
identifier[cosmo] = identifier[getcosmo] ( identifier[cosmology] )
keyword[if] identifier[filename] :
identifier[print] ( literal[string] %( identifier[filename] ))
identifier[fout] = identifier[open] ( identifier[filename] , literal[string] )
keyword[try] :
keyword[if] identifier[mah] keyword[and] identifier[com] :
keyword[if] identifier[verbose] :
identifier[print] ( literal[string]
literal[string] )
keyword[if] identifier[filename] :
identifier[fout] . identifier[write] ( identifier[_getcosmoheader] ( identifier[cosmo] )+ literal[string] )
identifier[fout] . identifier[write] ( literal[string]
literal[string]
literal[string] + literal[string] )
identifier[fout] . identifier[write] ( literal[string]
literal[string]
literal[string] + literal[string] )
identifier[fout] . identifier[write] ( literal[string]
literal[string]
literal[string] + literal[string] )
identifier[fout] . identifier[write] ( literal[string]
literal[string]
literal[string] + literal[string] )
identifier[dataset] = identifier[np] . identifier[zeros] (( identifier[lenm] , identifier[lenzout] ), identifier[dtype] =[( literal[string] , identifier[float] ),
( literal[string] , identifier[float] ),( literal[string] , identifier[float] ),( literal[string] , identifier[float] ),
( literal[string] , identifier[float] ),( literal[string] , identifier[float] ),( literal[string] , identifier[float] ),
( literal[string] , identifier[float] ),( literal[string] , identifier[float] )])
keyword[elif] identifier[mah] :
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] )
keyword[if] identifier[filename] :
identifier[fout] . identifier[write] ( identifier[_getcosmoheader] ( identifier[cosmo] )+ literal[string] )
identifier[fout] . identifier[write] ( literal[string]
literal[string] + literal[string] )
identifier[fout] . identifier[write] ( literal[string]
literal[string] + literal[string] )
identifier[fout] . identifier[write] ( literal[string]
literal[string] + literal[string] )
identifier[fout] . identifier[write] ( literal[string]
literal[string] + literal[string] )
identifier[dataset] = identifier[np] . identifier[zeros] (( identifier[lenm] , identifier[lenzout] ), identifier[dtype] =[( literal[string] , identifier[float] ),
( literal[string] , identifier[float] ),( literal[string] , identifier[float] ),
( literal[string] , identifier[float] ),( literal[string] , identifier[float] )])
keyword[else] :
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] )
keyword[if] identifier[filename] :
identifier[fout] . identifier[write] ( identifier[_getcosmoheader] ( identifier[cosmo] )+ literal[string] )
identifier[fout] . identifier[write] ( literal[string]
literal[string]
literal[string] + literal[string] )
identifier[fout] . identifier[write] ( literal[string]
literal[string]
literal[string] + literal[string] )
identifier[fout] . identifier[write] ( literal[string]
literal[string]
literal[string] + literal[string] )
identifier[fout] . identifier[write] ( literal[string]
literal[string]
literal[string] + literal[string] )
identifier[dataset] = identifier[np] . identifier[zeros] (( identifier[lenm] , identifier[lenzout] ), identifier[dtype] =[( literal[string] , identifier[float] ),
( literal[string] , identifier[float] ),( literal[string] , identifier[float] ),( literal[string] , identifier[float] ),
( literal[string] , identifier[float] ),( literal[string] , identifier[float] ),( literal[string] , identifier[float] )])
keyword[for] identifier[i_ind] ,( identifier[zval] , identifier[Mval] ) keyword[in] identifier[enumerate] ( identifier[_izip] ( identifier[zi] , identifier[Mi] )):
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] %( identifier[Mval] , identifier[zval] ))
keyword[if] identifier[z] keyword[is] keyword[False] :
identifier[ztemp] = identifier[np] . identifier[array] ( identifier[zval] , identifier[ndmin] = literal[int] , identifier[dtype] = identifier[float] )
keyword[else] :
identifier[ztemp] = identifier[np] . identifier[array] ( identifier[z] [ identifier[z] >= identifier[zval] ], identifier[dtype] = identifier[float] )
keyword[if] identifier[ztemp] . identifier[size] :
identifier[dMdt] , identifier[Mz] = identifier[MAH] ( identifier[ztemp] , identifier[zval] , identifier[Mval] ,** identifier[cosmo] )
keyword[if] identifier[mah] keyword[and] identifier[com] :
identifier[c] , identifier[sig] , identifier[nu] , identifier[zf] = identifier[COM] ( identifier[ztemp] , identifier[Mz] ,** identifier[cosmo] )
keyword[for] identifier[j_ind] , identifier[j_val] keyword[in] identifier[enumerate] ( identifier[ztemp] ):
identifier[dataset] [ identifier[i_ind] , identifier[j_ind] ]=( identifier[zval] , identifier[Mval] , identifier[ztemp] [ identifier[j_ind] ], identifier[dMdt] [ identifier[j_ind] ], identifier[Mz] [ identifier[j_ind] ],
identifier[c] [ identifier[j_ind] ], identifier[sig] [ identifier[j_ind] ], identifier[nu] [ identifier[j_ind] ], identifier[zf] [ identifier[j_ind] ])
keyword[if] identifier[filename] :
identifier[fout] . identifier[write] (
literal[string] . identifier[format] (
identifier[zval] , identifier[Mval] , identifier[ztemp] [ identifier[j_ind] ], identifier[dMdt] [ identifier[j_ind] ],
identifier[Mz] [ identifier[j_ind] ], identifier[c] [ identifier[j_ind] ], identifier[sig] [ identifier[j_ind] ], identifier[nu] [ identifier[j_ind] ],
identifier[zf] [ identifier[j_ind] ]))
keyword[elif] identifier[mah] :
keyword[for] identifier[j_ind] , identifier[j_val] keyword[in] identifier[enumerate] ( identifier[ztemp] ):
identifier[dataset] [ identifier[i_ind] , identifier[j_ind] ]=( identifier[zval] , identifier[Mval] , identifier[ztemp] [ identifier[j_ind] ], identifier[dMdt] [ identifier[j_ind] ], identifier[Mz] [ identifier[j_ind] ])
keyword[if] identifier[filename] :
identifier[fout] . identifier[write] ( literal[string] . identifier[format] (
identifier[zval] , identifier[Mval] , identifier[ztemp] [ identifier[j_ind] ], identifier[dMdt] [ identifier[j_ind] ],
identifier[Mz] [ identifier[j_ind] ]))
keyword[else] :
identifier[c] , identifier[sig] , identifier[nu] , identifier[zf] = identifier[COM] ( identifier[ztemp] , identifier[Mz] ,** identifier[cosmo] )
keyword[for] identifier[j_ind] , identifier[j_val] keyword[in] identifier[enumerate] ( identifier[ztemp] ):
identifier[dataset] [ identifier[i_ind] , identifier[j_ind] ]=( identifier[zval] , identifier[Mval] , identifier[ztemp] [ identifier[j_ind] ], identifier[c] [ identifier[j_ind] ], identifier[sig] [ identifier[j_ind] ],
identifier[nu] [ identifier[j_ind] ], identifier[zf] [ identifier[j_ind] ])
keyword[if] identifier[filename] :
identifier[fout] . identifier[write] ( literal[string] . identifier[format] (
identifier[zval] , identifier[Mval] , identifier[ztemp] [ identifier[j_ind] ], identifier[c] [ identifier[j_ind] ], identifier[sig] [ identifier[j_ind] ],
identifier[nu] [ identifier[j_ind] ], identifier[zf] [ identifier[j_ind] ]))
keyword[finally] :
identifier[fout] . identifier[close] () keyword[if] identifier[filename] keyword[else] keyword[None]
keyword[if] identifier[retcosmo] :
keyword[return] ( identifier[dataset] , identifier[cosmo] )
keyword[else] :
keyword[return] ( identifier[dataset] ) | def run(cosmology, zi=0, Mi=1000000000000.0, z=False, com=True, mah=True, filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and (not mah):
print('User has to choose com=True and / or mah=True ')
return -1 # depends on [control=['if'], data=[]]
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if results == -1:
return -1 # depends on [control=['if'], data=[]]
else:
# If not, unpack the returned iterable
(zi, Mi, z, lenz, lenm, lenzout) = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print('Output to file %r' % filename)
fout = open(filename, 'wb') # depends on [control=['if'], data=[]]
# Create the structured dataset
try:
if mah and com:
if verbose:
print('Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, zf') # depends on [control=['if'], data=[]]
if filename:
fout.write(_getcosmoheader(cosmo) + '\n')
fout.write('# Initial z - Initial Halo - Output z - Accretion - Final Halo - concentration - Mass - Peak - Formation z ' + '\n')
fout.write('# - mass - - rate - mass - - Variance - Height - ' + '\n')
fout.write('# - (M200) - - (dM/dt) - (M200) - - (sigma) - (nu) - ' + '\n')
fout.write('# - [Msol] - - [Msol/yr] - [Msol] - - - - ' + '\n') # depends on [control=['if'], data=[]]
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float), ('Mi', float), ('z', float), ('dMdt', float), ('Mz', float), ('c', float), ('sig', float), ('nu', float), ('zf', float)]) # depends on [control=['if'], data=[]]
elif mah:
if verbose:
print('Output requested is zi, Mi, z, dMdt, Mz') # depends on [control=['if'], data=[]]
if filename:
fout.write(_getcosmoheader(cosmo) + '\n')
fout.write('# Initial z - Initial Halo - Output z - Accretion - Final Halo ' + '\n')
fout.write('# - mass - - rate - mass ' + '\n')
fout.write('# - (M200) - - (dm/dt) - (M200) ' + '\n')
fout.write('# - [Msol] - - [Msol/yr] - [Msol] ' + '\n') # depends on [control=['if'], data=[]]
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float), ('Mi', float), ('z', float), ('dMdt', float), ('Mz', float)]) # depends on [control=['if'], data=[]]
else:
if verbose:
print('Output requested is zi, Mi, z, c, sig, nu, zf') # depends on [control=['if'], data=[]]
if filename:
fout.write(_getcosmoheader(cosmo) + '\n')
fout.write('# Initial z - Initial Halo - Output z - concentration - Mass - Peak - Formation z ' + '\n')
fout.write('# - mass - - - Variance - Height - ' + '\n')
fout.write('# - (M200) - - - (sigma) - (nu) - ' + '\n')
fout.write('# - [Msol] - - - - - ' + '\n') # depends on [control=['if'], data=[]]
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float), ('Mi', float), ('z', float), ('c', float), ('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for (i_ind, (zval, Mval)) in enumerate(_izip(zi, Mi)):
if verbose:
print('Output Halo of Mass Mi=%s at zi=%s' % (Mval, zval)) # depends on [control=['if'], data=[]]
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float) # depends on [control=['if'], data=[]]
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
(dMdt, Mz) = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
(c, sig, nu, zf) = COM(ztemp, Mz, **cosmo)
# Save all arrays
for (j_ind, j_val) in enumerate(ztemp):
dataset[i_ind, j_ind] = (zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write('{}, {}, {}, {}, {}, {}, {}, {}, {} \n'.format(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif mah:
# Save only MAH arrays
for (j_ind, j_val) in enumerate(ztemp):
dataset[i_ind, j_ind] = (zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write('{}, {}, {}, {}, {} \n'.format(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
# Output only COM arrays
(c, sig, nu, zf) = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for (j_ind, j_val) in enumerate(ztemp):
dataset[i_ind, j_ind] = (zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write('{}, {}, {}, {}, {}, {}, {} \n'.format(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
finally:
# Make sure to close the file if it was opened
fout.close() if filename else None
if retcosmo:
return (dataset, cosmo) # depends on [control=['if'], data=[]]
else:
return dataset |
def update_item(TableName=None, Key=None, AttributeUpdates=None, Expected=None, ConditionalOperator=None, ReturnValues=None, ReturnConsumedCapacity=None, ReturnItemCollectionMetrics=None, UpdateExpression=None, ConditionExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None):
"""
Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).
You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.
See also: AWS API Documentation
Examples
This example updates an item in the Music table. It adds a new attribute (Year) and modifies the AlbumTitle attribute. All of the attributes in the item, as they appear after the update, are returned in the response.
Expected Output:
:example: response = client.update_item(
TableName='string',
Key={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
AttributeUpdates={
'string': {
'Value': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
'Action': 'ADD'|'PUT'|'DELETE'
}
},
Expected={
'string': {
'Value': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
'Exists': True|False,
'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH',
'AttributeValueList': [
{
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
]
}
},
ConditionalOperator='AND'|'OR',
ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW',
ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',
ReturnItemCollectionMetrics='SIZE'|'NONE',
UpdateExpression='string',
ConditionExpression='string',
ExpressionAttributeNames={
'string': 'string'
},
ExpressionAttributeValues={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
}
)
:type TableName: string
:param TableName: [REQUIRED]
The name of the table containing the item to update.
:type Key: dict
:param Key: [REQUIRED]
The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute.
For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:type AttributeUpdates: dict
:param AttributeUpdates: This is a legacy parameter. Use UpdateExpression instead. For more information, see AttributeUpdates in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --For the UpdateItem operation, represents the attributes to be modified, the action to perform on each, and the new value for each.
Note
You cannot use UpdateItem to update any primary key attributes. Instead, you will need to delete the item, and then use PutItem to create a new item with new attributes.
Attribute values cannot be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception.
Value (dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data TYpes in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
Action (string) --Specifies how to perform the update. Valid values are PUT (default), DELETE , and ADD . The behavior depends on whether the specified primary key already exists in the table.
If an item with the specified *Key* is found in the table:
PUT - Adds the specified attribute to the item. If the attribute already exists, it is replaced by the new value.
DELETE - If no value is specified, the attribute and its value are removed from the item. The data type of the specified value must match the existing value's data type. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specified [a,c] , then the final attribute value would be [b] . Specifying an empty set is an error.
ADD - If the attribute does not already exist, then the attribute and its values are added to the item. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:
If the existing attribute is a number, and if Value is also a number, then the Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.
Note
If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. In addition, if you use ADD to update an existing item, and intend to increment or decrement an attribute value which does not yet exist, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update does not yet have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway, even though it currently does not exist. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 .
If the existing data type is a set, and if the Value is also a set, then the Value is added to the existing set. (This is a set operation, not mathematical addition.) For example, if the attribute value was the set [1,2] , and the ADD action specified [3] , then the final attribute value would be [1,2,3] . An error occurs if an Add action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. The same holds true for number sets and binary sets.
This action is only valid for an existing attribute whose data type is number or is a set. Do not use ADD for any other data types.
If no item with the specified *Key* is found:
PUT - DynamoDB creates a new item with the specified primary key, and then adds the attribute.
DELETE - Nothing happens; there is no attribute to delete.
ADD - DynamoDB creates an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are number and number set; no other data types can be specified.
:type Expected: dict
:param Expected: This is a legacy parameter. Use ConditionExpresssion instead. For more information, see Expected in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:
Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.
Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.
Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.
Value (dict) --Represents the data for the expected attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation:
If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionalCheckFailedException .
If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionalCheckFailedException .
The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied.
DynamoDB returns a ValidationException if:
Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.)
Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.)
ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc.
The following comparison operators are available:
EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN
The following are descriptions of each comparison operator.
EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .
NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .
LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.
Note
This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator.
NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.
Note
This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator.
CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.
NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.
BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).
IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.
BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']}
AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.
For type Number, value comparisons are numeric.
String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .
For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.
For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide .
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:type ConditionalOperator: string
:param ConditionalOperator: This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide .
:type ReturnValues: string
:param ReturnValues: Use ReturnValues if you want to get the item attributes as they appeared either before or after they were updated. For UpdateItem , the valid values are:
NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .)
ALL_OLD - Returns all of the attributes of the item, as they appeared before the UpdateItem operation.
UPDATED_OLD - Returns only the updated attributes, as they appeared before the UpdateItem operation.
ALL_NEW - Returns all of the attributes of the item, as they appear after the UpdateItem operation.
UPDATED_NEW - Returns only the updated attributes, as they appear after the UpdateItem operation.
There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No Read Capacity Units are consumed.
Values returned are strongly consistent
:type ReturnConsumedCapacity: string
:param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:
INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).
TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.
NONE - No ConsumedCapacity details are included in the response.
:type ReturnItemCollectionMetrics: string
:param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.
:type UpdateExpression: string
:param UpdateExpression: An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them.
The following action values are available for UpdateExpression .
SET - Adds one or more attributes and values to an item. If any of these attribute already exist, they are replaced by the new values. You can also use SET to add or subtract from an attribute that is of type Number. For example: SET myNum = myNum + :val SET supports the following functions:
if_not_exists (path, operand) - if the item does not contain an attribute at the specified path, then if_not_exists evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item.
list_append (operand, operand) - evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands.
These function names are case-sensitive.
REMOVE - Removes one or more attributes from an item.
ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:
If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.
Note
If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 .
If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set [1,2] , and the ADD action specified [3] , then the final attribute value is [1,2,3] . An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings.
Warning
The ADD action only supports Number and set data types. In addition, ADD can only be used on top-level attributes, not nested attributes.
DELETE - Deletes an element from a set. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c] , then the final attribute value is [b] . Specifying an empty set is an error.
Warning
The DELETE action only supports set data types. In addition, DELETE can only be used on top-level attributes, not nested attributes.
You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5
For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide .
:type ConditionExpression: string
:param ConditionExpression: A condition that must be satisfied in order for a conditional update to succeed.
An expression can contain any of the following:
Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive.
Comparison operators: = | | | | = | = | BETWEEN | IN
Logical operators: AND | OR | NOT
For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide .
:type ExpressionAttributeNames: dict
:param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :
To access an attribute whose name conflicts with a DynamoDB reserved word.
To create a placeholder for repeating occurrences of an attribute name in an expression.
To prevent special characters in an attribute name from being misinterpreted in an expression.
Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
Percentile
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :
{'#P':'Percentile'}
You could then use this substitution in an expression, as in this example:
#P = :val
Note
Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.
For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .
(string) --
(string) --
:type ExpressionAttributeValues: dict
:param ExpressionAttributeValues: One or more values that can be substituted in an expression.
Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:
Available | Backordered | Discontinued
You would first need to specify ExpressionAttributeValues as follows:
{ ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} }
You could then use these values in an expression, such as this:
ProductStatus IN (:avail, :back, :disc)
For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:rtype: dict
:return: {
'Attributes': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'ConsumedCapacity': {
'TableName': 'string',
'CapacityUnits': 123.0,
'Table': {
'CapacityUnits': 123.0
},
'LocalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
},
'GlobalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
}
},
'ItemCollectionMetrics': {
'ItemCollectionKey': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'SizeEstimateRangeGB': [
123.0,
]
}
}
:returns:
(string) --
"""
pass | def function[update_item, parameter[TableName, Key, AttributeUpdates, Expected, ConditionalOperator, ReturnValues, ReturnConsumedCapacity, ReturnItemCollectionMetrics, UpdateExpression, ConditionExpression, ExpressionAttributeNames, ExpressionAttributeValues]]:
constant[
Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).
You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.
See also: AWS API Documentation
Examples
This example updates an item in the Music table. It adds a new attribute (Year) and modifies the AlbumTitle attribute. All of the attributes in the item, as they appear after the update, are returned in the response.
Expected Output:
:example: response = client.update_item(
TableName='string',
Key={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
AttributeUpdates={
'string': {
'Value': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
'Action': 'ADD'|'PUT'|'DELETE'
}
},
Expected={
'string': {
'Value': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
'Exists': True|False,
'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH',
'AttributeValueList': [
{
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
]
}
},
ConditionalOperator='AND'|'OR',
ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW',
ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',
ReturnItemCollectionMetrics='SIZE'|'NONE',
UpdateExpression='string',
ConditionExpression='string',
ExpressionAttributeNames={
'string': 'string'
},
ExpressionAttributeValues={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
}
)
:type TableName: string
:param TableName: [REQUIRED]
The name of the table containing the item to update.
:type Key: dict
:param Key: [REQUIRED]
The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute.
For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:type AttributeUpdates: dict
:param AttributeUpdates: This is a legacy parameter. Use UpdateExpression instead. For more information, see AttributeUpdates in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --For the UpdateItem operation, represents the attributes to be modified, the action to perform on each, and the new value for each.
Note
You cannot use UpdateItem to update any primary key attributes. Instead, you will need to delete the item, and then use PutItem to create a new item with new attributes.
Attribute values cannot be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception.
Value (dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data TYpes in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
Action (string) --Specifies how to perform the update. Valid values are PUT (default), DELETE , and ADD . The behavior depends on whether the specified primary key already exists in the table.
If an item with the specified *Key* is found in the table:
PUT - Adds the specified attribute to the item. If the attribute already exists, it is replaced by the new value.
DELETE - If no value is specified, the attribute and its value are removed from the item. The data type of the specified value must match the existing value's data type. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specified [a,c] , then the final attribute value would be [b] . Specifying an empty set is an error.
ADD - If the attribute does not already exist, then the attribute and its values are added to the item. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:
If the existing attribute is a number, and if Value is also a number, then the Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.
Note
If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. In addition, if you use ADD to update an existing item, and intend to increment or decrement an attribute value which does not yet exist, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update does not yet have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway, even though it currently does not exist. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 .
If the existing data type is a set, and if the Value is also a set, then the Value is added to the existing set. (This is a set operation, not mathematical addition.) For example, if the attribute value was the set [1,2] , and the ADD action specified [3] , then the final attribute value would be [1,2,3] . An error occurs if an Add action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. The same holds true for number sets and binary sets.
This action is only valid for an existing attribute whose data type is number or is a set. Do not use ADD for any other data types.
If no item with the specified *Key* is found:
PUT - DynamoDB creates a new item with the specified primary key, and then adds the attribute.
DELETE - Nothing happens; there is no attribute to delete.
ADD - DynamoDB creates an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are number and number set; no other data types can be specified.
:type Expected: dict
:param Expected: This is a legacy parameter. Use ConditionExpresssion instead. For more information, see Expected in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:
Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.
Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.
Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.
Value (dict) --Represents the data for the expected attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation:
If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionalCheckFailedException .
If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionalCheckFailedException .
The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied.
DynamoDB returns a ValidationException if:
Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.)
Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.)
ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc.
The following comparison operators are available:
EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN
The following are descriptions of each comparison operator.
EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .
NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .
LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.
Note
This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator.
NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.
Note
This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator.
CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.
NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.
BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).
IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.
BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']}
AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.
For type Number, value comparisons are numeric.
String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .
For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.
For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide .
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:type ConditionalOperator: string
:param ConditionalOperator: This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide .
:type ReturnValues: string
:param ReturnValues: Use ReturnValues if you want to get the item attributes as they appeared either before or after they were updated. For UpdateItem , the valid values are:
NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .)
ALL_OLD - Returns all of the attributes of the item, as they appeared before the UpdateItem operation.
UPDATED_OLD - Returns only the updated attributes, as they appeared before the UpdateItem operation.
ALL_NEW - Returns all of the attributes of the item, as they appear after the UpdateItem operation.
UPDATED_NEW - Returns only the updated attributes, as they appear after the UpdateItem operation.
There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No Read Capacity Units are consumed.
Values returned are strongly consistent
:type ReturnConsumedCapacity: string
:param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:
INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).
TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.
NONE - No ConsumedCapacity details are included in the response.
:type ReturnItemCollectionMetrics: string
:param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.
:type UpdateExpression: string
:param UpdateExpression: An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them.
The following action values are available for UpdateExpression .
SET - Adds one or more attributes and values to an item. If any of these attribute already exist, they are replaced by the new values. You can also use SET to add or subtract from an attribute that is of type Number. For example: SET myNum = myNum + :val SET supports the following functions:
if_not_exists (path, operand) - if the item does not contain an attribute at the specified path, then if_not_exists evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item.
list_append (operand, operand) - evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands.
These function names are case-sensitive.
REMOVE - Removes one or more attributes from an item.
ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:
If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.
Note
If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 .
If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set [1,2] , and the ADD action specified [3] , then the final attribute value is [1,2,3] . An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings.
Warning
The ADD action only supports Number and set data types. In addition, ADD can only be used on top-level attributes, not nested attributes.
DELETE - Deletes an element from a set. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c] , then the final attribute value is [b] . Specifying an empty set is an error.
Warning
The DELETE action only supports set data types. In addition, DELETE can only be used on top-level attributes, not nested attributes.
You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5
For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide .
:type ConditionExpression: string
:param ConditionExpression: A condition that must be satisfied in order for a conditional update to succeed.
An expression can contain any of the following:
Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive.
Comparison operators: = | | | | = | = | BETWEEN | IN
Logical operators: AND | OR | NOT
For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide .
:type ExpressionAttributeNames: dict
:param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :
To access an attribute whose name conflicts with a DynamoDB reserved word.
To create a placeholder for repeating occurrences of an attribute name in an expression.
To prevent special characters in an attribute name from being misinterpreted in an expression.
Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
Percentile
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :
{'#P':'Percentile'}
You could then use this substitution in an expression, as in this example:
#P = :val
Note
Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.
For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .
(string) --
(string) --
:type ExpressionAttributeValues: dict
:param ExpressionAttributeValues: One or more values that can be substituted in an expression.
Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:
Available | Backordered | Discontinued
You would first need to specify ExpressionAttributeValues as follows:
{ ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} }
You could then use these values in an expression, such as this:
ProductStatus IN (:avail, :back, :disc)
For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:rtype: dict
:return: {
'Attributes': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'ConsumedCapacity': {
'TableName': 'string',
'CapacityUnits': 123.0,
'Table': {
'CapacityUnits': 123.0
},
'LocalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
},
'GlobalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
}
},
'ItemCollectionMetrics': {
'ItemCollectionKey': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'SizeEstimateRangeGB': [
123.0,
]
}
}
:returns:
(string) --
]
pass | keyword[def] identifier[update_item] ( identifier[TableName] = keyword[None] , identifier[Key] = keyword[None] , identifier[AttributeUpdates] = keyword[None] , identifier[Expected] = keyword[None] , identifier[ConditionalOperator] = keyword[None] , identifier[ReturnValues] = keyword[None] , identifier[ReturnConsumedCapacity] = keyword[None] , identifier[ReturnItemCollectionMetrics] = keyword[None] , identifier[UpdateExpression] = keyword[None] , identifier[ConditionExpression] = keyword[None] , identifier[ExpressionAttributeNames] = keyword[None] , identifier[ExpressionAttributeValues] = keyword[None] ):
literal[string]
keyword[pass] | def update_item(TableName=None, Key=None, AttributeUpdates=None, Expected=None, ConditionalOperator=None, ReturnValues=None, ReturnConsumedCapacity=None, ReturnItemCollectionMetrics=None, UpdateExpression=None, ConditionExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None):
"""
Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).
You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.
See also: AWS API Documentation
Examples
This example updates an item in the Music table. It adds a new attribute (Year) and modifies the AlbumTitle attribute. All of the attributes in the item, as they appear after the update, are returned in the response.
Expected Output:
:example: response = client.update_item(
TableName='string',
Key={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
AttributeUpdates={
'string': {
'Value': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
'Action': 'ADD'|'PUT'|'DELETE'
}
},
Expected={
'string': {
'Value': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
'Exists': True|False,
'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH',
'AttributeValueList': [
{
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
]
}
},
ConditionalOperator='AND'|'OR',
ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW',
ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',
ReturnItemCollectionMetrics='SIZE'|'NONE',
UpdateExpression='string',
ConditionExpression='string',
ExpressionAttributeNames={
'string': 'string'
},
ExpressionAttributeValues={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
}
)
:type TableName: string
:param TableName: [REQUIRED]
The name of the table containing the item to update.
:type Key: dict
:param Key: [REQUIRED]
The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute.
For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:type AttributeUpdates: dict
:param AttributeUpdates: This is a legacy parameter. Use UpdateExpression instead. For more information, see AttributeUpdates in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --For the UpdateItem operation, represents the attributes to be modified, the action to perform on each, and the new value for each.
Note
You cannot use UpdateItem to update any primary key attributes. Instead, you will need to delete the item, and then use PutItem to create a new item with new attributes.
Attribute values cannot be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception.
Value (dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data TYpes in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
Action (string) --Specifies how to perform the update. Valid values are PUT (default), DELETE , and ADD . The behavior depends on whether the specified primary key already exists in the table.
If an item with the specified *Key* is found in the table:
PUT - Adds the specified attribute to the item. If the attribute already exists, it is replaced by the new value.
DELETE - If no value is specified, the attribute and its value are removed from the item. The data type of the specified value must match the existing value's data type. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specified [a,c] , then the final attribute value would be [b] . Specifying an empty set is an error.
ADD - If the attribute does not already exist, then the attribute and its values are added to the item. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:
If the existing attribute is a number, and if Value is also a number, then the Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.
Note
If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. In addition, if you use ADD to update an existing item, and intend to increment or decrement an attribute value which does not yet exist, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update does not yet have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway, even though it currently does not exist. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 .
If the existing data type is a set, and if the Value is also a set, then the Value is added to the existing set. (This is a set operation, not mathematical addition.) For example, if the attribute value was the set [1,2] , and the ADD action specified [3] , then the final attribute value would be [1,2,3] . An error occurs if an Add action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. The same holds true for number sets and binary sets.
This action is only valid for an existing attribute whose data type is number or is a set. Do not use ADD for any other data types.
If no item with the specified *Key* is found:
PUT - DynamoDB creates a new item with the specified primary key, and then adds the attribute.
DELETE - Nothing happens; there is no attribute to delete.
ADD - DynamoDB creates an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are number and number set; no other data types can be specified.
:type Expected: dict
:param Expected: This is a legacy parameter. Use ConditionExpresssion instead. For more information, see Expected in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:
Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.
Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.
Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.
Value (dict) --Represents the data for the expected attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation:
If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionalCheckFailedException .
If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionalCheckFailedException .
The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied.
DynamoDB returns a ValidationException if:
Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.)
Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.)
ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc.
The following comparison operators are available:
EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN
The following are descriptions of each comparison operator.
EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .
NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .
LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.
Note
This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator.
NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.
Note
This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator.
CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.
NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.
BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).
IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.
BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']}
AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.
For type Number, value comparisons are numeric.
String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .
For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.
For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide .
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:type ConditionalOperator: string
:param ConditionalOperator: This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide .
:type ReturnValues: string
:param ReturnValues: Use ReturnValues if you want to get the item attributes as they appeared either before or after they were updated. For UpdateItem , the valid values are:
NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .)
ALL_OLD - Returns all of the attributes of the item, as they appeared before the UpdateItem operation.
UPDATED_OLD - Returns only the updated attributes, as they appeared before the UpdateItem operation.
ALL_NEW - Returns all of the attributes of the item, as they appear after the UpdateItem operation.
UPDATED_NEW - Returns only the updated attributes, as they appear after the UpdateItem operation.
There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No Read Capacity Units are consumed.
Values returned are strongly consistent
:type ReturnConsumedCapacity: string
:param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:
INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).
TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.
NONE - No ConsumedCapacity details are included in the response.
:type ReturnItemCollectionMetrics: string
:param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.
:type UpdateExpression: string
:param UpdateExpression: An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them.
The following action values are available for UpdateExpression .
SET - Adds one or more attributes and values to an item. If any of these attribute already exist, they are replaced by the new values. You can also use SET to add or subtract from an attribute that is of type Number. For example: SET myNum = myNum + :val SET supports the following functions:
if_not_exists (path, operand) - if the item does not contain an attribute at the specified path, then if_not_exists evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item.
list_append (operand, operand) - evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands.
These function names are case-sensitive.
REMOVE - Removes one or more attributes from an item.
ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:
If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.
Note
If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 .
If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set [1,2] , and the ADD action specified [3] , then the final attribute value is [1,2,3] . An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings.
Warning
The ADD action only supports Number and set data types. In addition, ADD can only be used on top-level attributes, not nested attributes.
DELETE - Deletes an element from a set. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c] , then the final attribute value is [b] . Specifying an empty set is an error.
Warning
The DELETE action only supports set data types. In addition, DELETE can only be used on top-level attributes, not nested attributes.
You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5
For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide .
:type ConditionExpression: string
:param ConditionExpression: A condition that must be satisfied in order for a conditional update to succeed.
An expression can contain any of the following:
Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive.
Comparison operators: = | | | | = | = | BETWEEN | IN
Logical operators: AND | OR | NOT
For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide .
:type ExpressionAttributeNames: dict
:param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :
To access an attribute whose name conflicts with a DynamoDB reserved word.
To create a placeholder for repeating occurrences of an attribute name in an expression.
To prevent special characters in an attribute name from being misinterpreted in an expression.
Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
Percentile
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :
{'#P':'Percentile'}
You could then use this substitution in an expression, as in this example:
#P = :val
Note
Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.
For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .
(string) --
(string) --
:type ExpressionAttributeValues: dict
:param ExpressionAttributeValues: One or more values that can be substituted in an expression.
Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:
Available | Backordered | Discontinued
You would first need to specify ExpressionAttributeValues as follows:
{ ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} }
You could then use these values in an expression, such as this:
ProductStatus IN (:avail, :back, :disc)
For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:rtype: dict
:return: {
'Attributes': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'ConsumedCapacity': {
'TableName': 'string',
'CapacityUnits': 123.0,
'Table': {
'CapacityUnits': 123.0
},
'LocalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
},
'GlobalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
}
},
'ItemCollectionMetrics': {
'ItemCollectionKey': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'SizeEstimateRangeGB': [
123.0,
]
}
}
:returns:
(string) --
"""
pass |
def threshold_monitor_hidden_threshold_monitor_Memory_poll(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor")
threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor")
Memory = ET.SubElement(threshold_monitor, "Memory")
poll = ET.SubElement(Memory, "poll")
poll.text = kwargs.pop('poll')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[threshold_monitor_hidden_threshold_monitor_Memory_poll, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[threshold_monitor_hidden] assign[=] call[name[ET].SubElement, parameter[name[config], constant[threshold-monitor-hidden]]]
variable[threshold_monitor] assign[=] call[name[ET].SubElement, parameter[name[threshold_monitor_hidden], constant[threshold-monitor]]]
variable[Memory] assign[=] call[name[ET].SubElement, parameter[name[threshold_monitor], constant[Memory]]]
variable[poll] assign[=] call[name[ET].SubElement, parameter[name[Memory], constant[poll]]]
name[poll].text assign[=] call[name[kwargs].pop, parameter[constant[poll]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[threshold_monitor_hidden_threshold_monitor_Memory_poll] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[threshold_monitor_hidden] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[threshold_monitor] = identifier[ET] . identifier[SubElement] ( identifier[threshold_monitor_hidden] , literal[string] )
identifier[Memory] = identifier[ET] . identifier[SubElement] ( identifier[threshold_monitor] , literal[string] )
identifier[poll] = identifier[ET] . identifier[SubElement] ( identifier[Memory] , literal[string] )
identifier[poll] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def threshold_monitor_hidden_threshold_monitor_Memory_poll(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
threshold_monitor_hidden = ET.SubElement(config, 'threshold-monitor-hidden', xmlns='urn:brocade.com:mgmt:brocade-threshold-monitor')
threshold_monitor = ET.SubElement(threshold_monitor_hidden, 'threshold-monitor')
Memory = ET.SubElement(threshold_monitor, 'Memory')
poll = ET.SubElement(Memory, 'poll')
poll.text = kwargs.pop('poll')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _process_state_in_progress(self, job_record):
""" method that takes care of processing job records in STATE_IN_PROGRESS state"""
def _process_state(target_state, uow):
if uow.is_active:
# Large Job processing takes more than 1 tick of the Scheduler
# Let the Job processing complete - do no updates to Scheduler records
pass
elif uow.is_finished:
# create new UOW to cover new inserts
new_uow, is_duplicate = self.insert_and_publish_uow(job_record, 0, int(uow.end_id) + 1)
self.update_job(job_record, new_uow, target_state)
uow = self.uow_dao.get_one(job_record.related_unit_of_work)
try:
target_state = self._compute_next_job_state(job_record)
_process_state(target_state, uow)
except ValueError:
# do no processing for the future timeperiods
pass | def function[_process_state_in_progress, parameter[self, job_record]]:
constant[ method that takes care of processing job records in STATE_IN_PROGRESS state]
def function[_process_state, parameter[target_state, uow]]:
if name[uow].is_active begin[:]
pass
variable[uow] assign[=] call[name[self].uow_dao.get_one, parameter[name[job_record].related_unit_of_work]]
<ast.Try object at 0x7da1b247d7b0> | keyword[def] identifier[_process_state_in_progress] ( identifier[self] , identifier[job_record] ):
literal[string]
keyword[def] identifier[_process_state] ( identifier[target_state] , identifier[uow] ):
keyword[if] identifier[uow] . identifier[is_active] :
keyword[pass]
keyword[elif] identifier[uow] . identifier[is_finished] :
identifier[new_uow] , identifier[is_duplicate] = identifier[self] . identifier[insert_and_publish_uow] ( identifier[job_record] , literal[int] , identifier[int] ( identifier[uow] . identifier[end_id] )+ literal[int] )
identifier[self] . identifier[update_job] ( identifier[job_record] , identifier[new_uow] , identifier[target_state] )
identifier[uow] = identifier[self] . identifier[uow_dao] . identifier[get_one] ( identifier[job_record] . identifier[related_unit_of_work] )
keyword[try] :
identifier[target_state] = identifier[self] . identifier[_compute_next_job_state] ( identifier[job_record] )
identifier[_process_state] ( identifier[target_state] , identifier[uow] )
keyword[except] identifier[ValueError] :
keyword[pass] | def _process_state_in_progress(self, job_record):
""" method that takes care of processing job records in STATE_IN_PROGRESS state"""
def _process_state(target_state, uow):
if uow.is_active:
# Large Job processing takes more than 1 tick of the Scheduler
# Let the Job processing complete - do no updates to Scheduler records
pass # depends on [control=['if'], data=[]]
elif uow.is_finished:
# create new UOW to cover new inserts
(new_uow, is_duplicate) = self.insert_and_publish_uow(job_record, 0, int(uow.end_id) + 1)
self.update_job(job_record, new_uow, target_state) # depends on [control=['if'], data=[]]
uow = self.uow_dao.get_one(job_record.related_unit_of_work)
try:
target_state = self._compute_next_job_state(job_record)
_process_state(target_state, uow) # depends on [control=['try'], data=[]]
except ValueError:
# do no processing for the future timeperiods
pass # depends on [control=['except'], data=[]] |
def newTextReaderFilename(URI):
"""Create an xmlTextReader structure fed with the resource at
@URI """
ret = libxml2mod.xmlNewTextReaderFilename(URI)
if ret is None:raise treeError('xmlNewTextReaderFilename() failed')
return xmlTextReader(_obj=ret) | def function[newTextReaderFilename, parameter[URI]]:
constant[Create an xmlTextReader structure fed with the resource at
@URI ]
variable[ret] assign[=] call[name[libxml2mod].xmlNewTextReaderFilename, parameter[name[URI]]]
if compare[name[ret] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1fa7b50>
return[call[name[xmlTextReader], parameter[]]] | keyword[def] identifier[newTextReaderFilename] ( identifier[URI] ):
literal[string]
identifier[ret] = identifier[libxml2mod] . identifier[xmlNewTextReaderFilename] ( identifier[URI] )
keyword[if] identifier[ret] keyword[is] keyword[None] : keyword[raise] identifier[treeError] ( literal[string] )
keyword[return] identifier[xmlTextReader] ( identifier[_obj] = identifier[ret] ) | def newTextReaderFilename(URI):
"""Create an xmlTextReader structure fed with the resource at
@URI """
ret = libxml2mod.xmlNewTextReaderFilename(URI)
if ret is None:
raise treeError('xmlNewTextReaderFilename() failed') # depends on [control=['if'], data=[]]
return xmlTextReader(_obj=ret) |
def update_config(self, config):
"""
Update the group's configuration.
@param config: Dictionary with configuration to update.
@return: Dictionary with updated configuration.
"""
path = self._path() + '/config'
resp = self._get_resource_root().put(path, data = config_to_json(config))
return json_to_config(resp) | def function[update_config, parameter[self, config]]:
constant[
Update the group's configuration.
@param config: Dictionary with configuration to update.
@return: Dictionary with updated configuration.
]
variable[path] assign[=] binary_operation[call[name[self]._path, parameter[]] + constant[/config]]
variable[resp] assign[=] call[call[name[self]._get_resource_root, parameter[]].put, parameter[name[path]]]
return[call[name[json_to_config], parameter[name[resp]]]] | keyword[def] identifier[update_config] ( identifier[self] , identifier[config] ):
literal[string]
identifier[path] = identifier[self] . identifier[_path] ()+ literal[string]
identifier[resp] = identifier[self] . identifier[_get_resource_root] (). identifier[put] ( identifier[path] , identifier[data] = identifier[config_to_json] ( identifier[config] ))
keyword[return] identifier[json_to_config] ( identifier[resp] ) | def update_config(self, config):
"""
Update the group's configuration.
@param config: Dictionary with configuration to update.
@return: Dictionary with updated configuration.
"""
path = self._path() + '/config'
resp = self._get_resource_root().put(path, data=config_to_json(config))
return json_to_config(resp) |
def resize_image_to_fit(image, dest_w, dest_h):
"""
Resize the image to fit inside dest rectangle. Resultant image may be smaller than target
:param image: PIL.Image
:param dest_w: Target width
:param dest_h: Target height
:return: Scaled image
"""
dest_w = float(dest_w)
dest_h = float(dest_h)
dest_ratio = dest_w / dest_h
# Calculate the apect ratio of the image
src_w = float(image.size[0])
src_h = float(image.size[1])
src_ratio = src_w / src_h
if src_ratio < dest_ratio:
# Image is tall and thin - we need to scale to the right height and then pad
scale = dest_h / src_h
scaled_h = dest_h
scaled_w = src_w * scale
else:
# Image is short and wide - we need to scale to the right height and then crop
scale = dest_w / src_w
scaled_w = dest_w
scaled_h = src_h * scale
scaled_image = image.resize((int(scaled_w), int(scaled_h)), PIL.Image.ANTIALIAS)
return scaled_image | def function[resize_image_to_fit, parameter[image, dest_w, dest_h]]:
constant[
Resize the image to fit inside dest rectangle. Resultant image may be smaller than target
:param image: PIL.Image
:param dest_w: Target width
:param dest_h: Target height
:return: Scaled image
]
variable[dest_w] assign[=] call[name[float], parameter[name[dest_w]]]
variable[dest_h] assign[=] call[name[float], parameter[name[dest_h]]]
variable[dest_ratio] assign[=] binary_operation[name[dest_w] / name[dest_h]]
variable[src_w] assign[=] call[name[float], parameter[call[name[image].size][constant[0]]]]
variable[src_h] assign[=] call[name[float], parameter[call[name[image].size][constant[1]]]]
variable[src_ratio] assign[=] binary_operation[name[src_w] / name[src_h]]
if compare[name[src_ratio] less[<] name[dest_ratio]] begin[:]
variable[scale] assign[=] binary_operation[name[dest_h] / name[src_h]]
variable[scaled_h] assign[=] name[dest_h]
variable[scaled_w] assign[=] binary_operation[name[src_w] * name[scale]]
variable[scaled_image] assign[=] call[name[image].resize, parameter[tuple[[<ast.Call object at 0x7da204622380>, <ast.Call object at 0x7da204620280>]], name[PIL].Image.ANTIALIAS]]
return[name[scaled_image]] | keyword[def] identifier[resize_image_to_fit] ( identifier[image] , identifier[dest_w] , identifier[dest_h] ):
literal[string]
identifier[dest_w] = identifier[float] ( identifier[dest_w] )
identifier[dest_h] = identifier[float] ( identifier[dest_h] )
identifier[dest_ratio] = identifier[dest_w] / identifier[dest_h]
identifier[src_w] = identifier[float] ( identifier[image] . identifier[size] [ literal[int] ])
identifier[src_h] = identifier[float] ( identifier[image] . identifier[size] [ literal[int] ])
identifier[src_ratio] = identifier[src_w] / identifier[src_h]
keyword[if] identifier[src_ratio] < identifier[dest_ratio] :
identifier[scale] = identifier[dest_h] / identifier[src_h]
identifier[scaled_h] = identifier[dest_h]
identifier[scaled_w] = identifier[src_w] * identifier[scale]
keyword[else] :
identifier[scale] = identifier[dest_w] / identifier[src_w]
identifier[scaled_w] = identifier[dest_w]
identifier[scaled_h] = identifier[src_h] * identifier[scale]
identifier[scaled_image] = identifier[image] . identifier[resize] (( identifier[int] ( identifier[scaled_w] ), identifier[int] ( identifier[scaled_h] )), identifier[PIL] . identifier[Image] . identifier[ANTIALIAS] )
keyword[return] identifier[scaled_image] | def resize_image_to_fit(image, dest_w, dest_h):
"""
Resize the image to fit inside dest rectangle. Resultant image may be smaller than target
:param image: PIL.Image
:param dest_w: Target width
:param dest_h: Target height
:return: Scaled image
"""
dest_w = float(dest_w)
dest_h = float(dest_h)
dest_ratio = dest_w / dest_h
# Calculate the apect ratio of the image
src_w = float(image.size[0])
src_h = float(image.size[1])
src_ratio = src_w / src_h
if src_ratio < dest_ratio:
# Image is tall and thin - we need to scale to the right height and then pad
scale = dest_h / src_h
scaled_h = dest_h
scaled_w = src_w * scale # depends on [control=['if'], data=[]]
else:
# Image is short and wide - we need to scale to the right height and then crop
scale = dest_w / src_w
scaled_w = dest_w
scaled_h = src_h * scale
scaled_image = image.resize((int(scaled_w), int(scaled_h)), PIL.Image.ANTIALIAS)
return scaled_image |
def make_coord_dict(coord):
"""helper function to make a dict from a coordinate for logging"""
return dict(
z=int_if_exact(coord.zoom),
x=int_if_exact(coord.column),
y=int_if_exact(coord.row),
) | def function[make_coord_dict, parameter[coord]]:
constant[helper function to make a dict from a coordinate for logging]
return[call[name[dict], parameter[]]] | keyword[def] identifier[make_coord_dict] ( identifier[coord] ):
literal[string]
keyword[return] identifier[dict] (
identifier[z] = identifier[int_if_exact] ( identifier[coord] . identifier[zoom] ),
identifier[x] = identifier[int_if_exact] ( identifier[coord] . identifier[column] ),
identifier[y] = identifier[int_if_exact] ( identifier[coord] . identifier[row] ),
) | def make_coord_dict(coord):
"""helper function to make a dict from a coordinate for logging"""
return dict(z=int_if_exact(coord.zoom), x=int_if_exact(coord.column), y=int_if_exact(coord.row)) |
def register(self, pattern, view=None):
'''Allow decorator-style construction of URL pattern lists.'''
if view is None:
return partial(self.register, pattern)
self.patterns.append(self._make_url((pattern, view)))
return view | def function[register, parameter[self, pattern, view]]:
constant[Allow decorator-style construction of URL pattern lists.]
if compare[name[view] is constant[None]] begin[:]
return[call[name[partial], parameter[name[self].register, name[pattern]]]]
call[name[self].patterns.append, parameter[call[name[self]._make_url, parameter[tuple[[<ast.Name object at 0x7da2054a5690>, <ast.Name object at 0x7da2054a5d80>]]]]]]
return[name[view]] | keyword[def] identifier[register] ( identifier[self] , identifier[pattern] , identifier[view] = keyword[None] ):
literal[string]
keyword[if] identifier[view] keyword[is] keyword[None] :
keyword[return] identifier[partial] ( identifier[self] . identifier[register] , identifier[pattern] )
identifier[self] . identifier[patterns] . identifier[append] ( identifier[self] . identifier[_make_url] (( identifier[pattern] , identifier[view] )))
keyword[return] identifier[view] | def register(self, pattern, view=None):
"""Allow decorator-style construction of URL pattern lists."""
if view is None:
return partial(self.register, pattern) # depends on [control=['if'], data=[]]
self.patterns.append(self._make_url((pattern, view)))
return view |
def get_rich_menu_list(self, timeout=None):
"""Call get rich menu list API.
https://developers.line.me/en/docs/messaging-api/reference/#get-rich-menu-list
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: list(T <= :py:class:`linebot.models.reponse.RichMenuResponse`)
:return: list[RichMenuResponse] instance
"""
response = self._get(
'/v2/bot/richmenu/list',
timeout=timeout
)
result = []
for richmenu in response.json['richmenus']:
result.append(RichMenuResponse.new_from_json_dict(richmenu))
return result | def function[get_rich_menu_list, parameter[self, timeout]]:
constant[Call get rich menu list API.
https://developers.line.me/en/docs/messaging-api/reference/#get-rich-menu-list
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: list(T <= :py:class:`linebot.models.reponse.RichMenuResponse`)
:return: list[RichMenuResponse] instance
]
variable[response] assign[=] call[name[self]._get, parameter[constant[/v2/bot/richmenu/list]]]
variable[result] assign[=] list[[]]
for taget[name[richmenu]] in starred[call[name[response].json][constant[richmenus]]] begin[:]
call[name[result].append, parameter[call[name[RichMenuResponse].new_from_json_dict, parameter[name[richmenu]]]]]
return[name[result]] | keyword[def] identifier[get_rich_menu_list] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[response] = identifier[self] . identifier[_get] (
literal[string] ,
identifier[timeout] = identifier[timeout]
)
identifier[result] =[]
keyword[for] identifier[richmenu] keyword[in] identifier[response] . identifier[json] [ literal[string] ]:
identifier[result] . identifier[append] ( identifier[RichMenuResponse] . identifier[new_from_json_dict] ( identifier[richmenu] ))
keyword[return] identifier[result] | def get_rich_menu_list(self, timeout=None):
"""Call get rich menu list API.
https://developers.line.me/en/docs/messaging-api/reference/#get-rich-menu-list
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: list(T <= :py:class:`linebot.models.reponse.RichMenuResponse`)
:return: list[RichMenuResponse] instance
"""
response = self._get('/v2/bot/richmenu/list', timeout=timeout)
result = []
for richmenu in response.json['richmenus']:
result.append(RichMenuResponse.new_from_json_dict(richmenu)) # depends on [control=['for'], data=['richmenu']]
return result |
def generate_account(self, services, resource_types, permission, expiry, start=None,
ip=None, protocol=None):
'''
Generates a shared access signature for the account.
Use the returned signature with the sas_token parameter of the service
or to create a new account object.
:param Services services:
Specifies the services accessible with the account SAS. You can
combine values to provide access to more than one service.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account
SAS. You can combine values to provide access to more than one
resource type.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy. You can combine
values to provide more than one permission.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
sas.add_account(services, resource_types)
sas.add_account_signature(self.account_name, self.account_key)
return sas.get_token() | def function[generate_account, parameter[self, services, resource_types, permission, expiry, start, ip, protocol]]:
constant[
Generates a shared access signature for the account.
Use the returned signature with the sas_token parameter of the service
or to create a new account object.
:param Services services:
Specifies the services accessible with the account SAS. You can
combine values to provide access to more than one service.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account
SAS. You can combine values to provide access to more than one
resource type.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy. You can combine
values to provide more than one permission.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
]
variable[sas] assign[=] call[name[_SharedAccessHelper], parameter[]]
call[name[sas].add_base, parameter[name[permission], name[expiry], name[start], name[ip], name[protocol], name[self].x_ms_version]]
call[name[sas].add_account, parameter[name[services], name[resource_types]]]
call[name[sas].add_account_signature, parameter[name[self].account_name, name[self].account_key]]
return[call[name[sas].get_token, parameter[]]] | keyword[def] identifier[generate_account] ( identifier[self] , identifier[services] , identifier[resource_types] , identifier[permission] , identifier[expiry] , identifier[start] = keyword[None] ,
identifier[ip] = keyword[None] , identifier[protocol] = keyword[None] ):
literal[string]
identifier[sas] = identifier[_SharedAccessHelper] ()
identifier[sas] . identifier[add_base] ( identifier[permission] , identifier[expiry] , identifier[start] , identifier[ip] , identifier[protocol] , identifier[self] . identifier[x_ms_version] )
identifier[sas] . identifier[add_account] ( identifier[services] , identifier[resource_types] )
identifier[sas] . identifier[add_account_signature] ( identifier[self] . identifier[account_name] , identifier[self] . identifier[account_key] )
keyword[return] identifier[sas] . identifier[get_token] () | def generate_account(self, services, resource_types, permission, expiry, start=None, ip=None, protocol=None):
"""
Generates a shared access signature for the account.
Use the returned signature with the sas_token parameter of the service
or to create a new account object.
:param Services services:
Specifies the services accessible with the account SAS. You can
combine values to provide access to more than one service.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account
SAS. You can combine values to provide access to more than one
resource type.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy. You can combine
values to provide more than one permission.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
"""
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
sas.add_account(services, resource_types)
sas.add_account_signature(self.account_name, self.account_key)
return sas.get_token() |
def _add_ubridge_connection(self, nio, port_number):
"""
Creates a connection in uBridge.
:param nio: NIO instance
:param port_number: port number
"""
port_info = None
for port in self._ports_mapping:
if port["port_number"] == port_number:
port_info = port
break
if not port_info:
raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(name=self.name,
port_number=port_number))
bridge_name = "{}-{}".format(self._id, port_number)
yield from self._ubridge_send("bridge create {name}".format(name=bridge_name))
if not isinstance(nio, NIOUDP):
raise NodeError("Source NIO is not UDP")
yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name,
lport=nio.lport,
rhost=nio.rhost,
rport=nio.rport))
yield from self._ubridge_apply_filters(bridge_name, nio.filters)
if port_info["type"] in ("ethernet", "tap"):
if sys.platform.startswith("win"):
yield from self._add_ubridge_ethernet_connection(bridge_name, port_info["interface"])
else:
if port_info["type"] == "ethernet":
network_interfaces = [interface["name"] for interface in self._interfaces()]
if not port_info["interface"] in network_interfaces:
raise NodeError("Interface '{}' could not be found on this system".format(port_info["interface"]))
if sys.platform.startswith("linux"):
yield from self._add_linux_ethernet(port_info, bridge_name)
elif sys.platform.startswith("darwin"):
yield from self._add_osx_ethernet(port_info, bridge_name)
else:
yield from self._add_windows_ethernet(port_info, bridge_name)
elif port_info["type"] == "tap":
yield from self._ubridge_send('bridge add_nio_tap {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"]))
elif port_info["type"] == "udp":
yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name,
lport=port_info["lport"],
rhost=port_info["rhost"],
rport=port_info["rport"]))
if nio.capturing:
yield from self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=bridge_name,
pcap_file=nio.pcap_output_file))
yield from self._ubridge_send('bridge start {name}'.format(name=bridge_name)) | def function[_add_ubridge_connection, parameter[self, nio, port_number]]:
constant[
Creates a connection in uBridge.
:param nio: NIO instance
:param port_number: port number
]
variable[port_info] assign[=] constant[None]
for taget[name[port]] in starred[name[self]._ports_mapping] begin[:]
if compare[call[name[port]][constant[port_number]] equal[==] name[port_number]] begin[:]
variable[port_info] assign[=] name[port]
break
if <ast.UnaryOp object at 0x7da18ede74c0> begin[:]
<ast.Raise object at 0x7da18ede4370>
variable[bridge_name] assign[=] call[constant[{}-{}].format, parameter[name[self]._id, name[port_number]]]
<ast.YieldFrom object at 0x7da18ede5b40>
if <ast.UnaryOp object at 0x7da18ede4550> begin[:]
<ast.Raise object at 0x7da18ede6a40>
<ast.YieldFrom object at 0x7da18ede4e50>
<ast.YieldFrom object at 0x7da18ede4520>
if compare[call[name[port_info]][constant[type]] in tuple[[<ast.Constant object at 0x7da18ede70d0>, <ast.Constant object at 0x7da18ede7e80>]]] begin[:]
if call[name[sys].platform.startswith, parameter[constant[win]]] begin[:]
<ast.YieldFrom object at 0x7da18ede7280>
if name[nio].capturing begin[:]
<ast.YieldFrom object at 0x7da18f721a80>
<ast.YieldFrom object at 0x7da18f722b90> | keyword[def] identifier[_add_ubridge_connection] ( identifier[self] , identifier[nio] , identifier[port_number] ):
literal[string]
identifier[port_info] = keyword[None]
keyword[for] identifier[port] keyword[in] identifier[self] . identifier[_ports_mapping] :
keyword[if] identifier[port] [ literal[string] ]== identifier[port_number] :
identifier[port_info] = identifier[port]
keyword[break]
keyword[if] keyword[not] identifier[port_info] :
keyword[raise] identifier[NodeError] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[name] ,
identifier[port_number] = identifier[port_number] ))
identifier[bridge_name] = literal[string] . identifier[format] ( identifier[self] . identifier[_id] , identifier[port_number] )
keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_send] ( literal[string] . identifier[format] ( identifier[name] = identifier[bridge_name] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[nio] , identifier[NIOUDP] ):
keyword[raise] identifier[NodeError] ( literal[string] )
keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_send] ( literal[string] . identifier[format] ( identifier[name] = identifier[bridge_name] ,
identifier[lport] = identifier[nio] . identifier[lport] ,
identifier[rhost] = identifier[nio] . identifier[rhost] ,
identifier[rport] = identifier[nio] . identifier[rport] ))
keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_apply_filters] ( identifier[bridge_name] , identifier[nio] . identifier[filters] )
keyword[if] identifier[port_info] [ literal[string] ] keyword[in] ( literal[string] , literal[string] ):
keyword[if] identifier[sys] . identifier[platform] . identifier[startswith] ( literal[string] ):
keyword[yield] keyword[from] identifier[self] . identifier[_add_ubridge_ethernet_connection] ( identifier[bridge_name] , identifier[port_info] [ literal[string] ])
keyword[else] :
keyword[if] identifier[port_info] [ literal[string] ]== literal[string] :
identifier[network_interfaces] =[ identifier[interface] [ literal[string] ] keyword[for] identifier[interface] keyword[in] identifier[self] . identifier[_interfaces] ()]
keyword[if] keyword[not] identifier[port_info] [ literal[string] ] keyword[in] identifier[network_interfaces] :
keyword[raise] identifier[NodeError] ( literal[string] . identifier[format] ( identifier[port_info] [ literal[string] ]))
keyword[if] identifier[sys] . identifier[platform] . identifier[startswith] ( literal[string] ):
keyword[yield] keyword[from] identifier[self] . identifier[_add_linux_ethernet] ( identifier[port_info] , identifier[bridge_name] )
keyword[elif] identifier[sys] . identifier[platform] . identifier[startswith] ( literal[string] ):
keyword[yield] keyword[from] identifier[self] . identifier[_add_osx_ethernet] ( identifier[port_info] , identifier[bridge_name] )
keyword[else] :
keyword[yield] keyword[from] identifier[self] . identifier[_add_windows_ethernet] ( identifier[port_info] , identifier[bridge_name] )
keyword[elif] identifier[port_info] [ literal[string] ]== literal[string] :
keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_send] ( literal[string] . identifier[format] ( identifier[name] = identifier[bridge_name] , identifier[interface] = identifier[port_info] [ literal[string] ]))
keyword[elif] identifier[port_info] [ literal[string] ]== literal[string] :
keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_send] ( literal[string] . identifier[format] ( identifier[name] = identifier[bridge_name] ,
identifier[lport] = identifier[port_info] [ literal[string] ],
identifier[rhost] = identifier[port_info] [ literal[string] ],
identifier[rport] = identifier[port_info] [ literal[string] ]))
keyword[if] identifier[nio] . identifier[capturing] :
keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_send] ( literal[string] . identifier[format] ( identifier[name] = identifier[bridge_name] ,
identifier[pcap_file] = identifier[nio] . identifier[pcap_output_file] ))
keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_send] ( literal[string] . identifier[format] ( identifier[name] = identifier[bridge_name] )) | def _add_ubridge_connection(self, nio, port_number):
"""
Creates a connection in uBridge.
:param nio: NIO instance
:param port_number: port number
"""
port_info = None
for port in self._ports_mapping:
if port['port_number'] == port_number:
port_info = port
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['port']]
if not port_info:
raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(name=self.name, port_number=port_number)) # depends on [control=['if'], data=[]]
bridge_name = '{}-{}'.format(self._id, port_number)
yield from self._ubridge_send('bridge create {name}'.format(name=bridge_name))
if not isinstance(nio, NIOUDP):
raise NodeError('Source NIO is not UDP') # depends on [control=['if'], data=[]]
yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, lport=nio.lport, rhost=nio.rhost, rport=nio.rport))
yield from self._ubridge_apply_filters(bridge_name, nio.filters)
if port_info['type'] in ('ethernet', 'tap'):
if sys.platform.startswith('win'):
yield from self._add_ubridge_ethernet_connection(bridge_name, port_info['interface']) # depends on [control=['if'], data=[]]
elif port_info['type'] == 'ethernet':
network_interfaces = [interface['name'] for interface in self._interfaces()]
if not port_info['interface'] in network_interfaces:
raise NodeError("Interface '{}' could not be found on this system".format(port_info['interface'])) # depends on [control=['if'], data=[]]
if sys.platform.startswith('linux'):
yield from self._add_linux_ethernet(port_info, bridge_name) # depends on [control=['if'], data=[]]
elif sys.platform.startswith('darwin'):
yield from self._add_osx_ethernet(port_info, bridge_name) # depends on [control=['if'], data=[]]
else:
yield from self._add_windows_ethernet(port_info, bridge_name) # depends on [control=['if'], data=[]]
elif port_info['type'] == 'tap':
yield from self._ubridge_send('bridge add_nio_tap {name} "{interface}"'.format(name=bridge_name, interface=port_info['interface'])) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif port_info['type'] == 'udp':
yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, lport=port_info['lport'], rhost=port_info['rhost'], rport=port_info['rport'])) # depends on [control=['if'], data=[]]
if nio.capturing:
yield from self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=bridge_name, pcap_file=nio.pcap_output_file)) # depends on [control=['if'], data=[]]
yield from self._ubridge_send('bridge start {name}'.format(name=bridge_name)) |
def handle_keypress(self, k):
"""Last resort for keypresses."""
if k == "esc":
self.save_file()
raise urwid.ExitMainLoop()
elif k == "delete":
# delete at end of line
self.walker.combine_focus_with_next()
elif k == "backspace":
# backspace at beginning of line
self.walker.combine_focus_with_prev()
elif k == "enter":
# start new line
self.walker.split_focus()
# move the cursor to the new line and reset pref_col
self.view.keypress(size, "down")
self.view.keypress(size, "home") | def function[handle_keypress, parameter[self, k]]:
constant[Last resort for keypresses.]
if compare[name[k] equal[==] constant[esc]] begin[:]
call[name[self].save_file, parameter[]]
<ast.Raise object at 0x7da18f09c310> | keyword[def] identifier[handle_keypress] ( identifier[self] , identifier[k] ):
literal[string]
keyword[if] identifier[k] == literal[string] :
identifier[self] . identifier[save_file] ()
keyword[raise] identifier[urwid] . identifier[ExitMainLoop] ()
keyword[elif] identifier[k] == literal[string] :
identifier[self] . identifier[walker] . identifier[combine_focus_with_next] ()
keyword[elif] identifier[k] == literal[string] :
identifier[self] . identifier[walker] . identifier[combine_focus_with_prev] ()
keyword[elif] identifier[k] == literal[string] :
identifier[self] . identifier[walker] . identifier[split_focus] ()
identifier[self] . identifier[view] . identifier[keypress] ( identifier[size] , literal[string] )
identifier[self] . identifier[view] . identifier[keypress] ( identifier[size] , literal[string] ) | def handle_keypress(self, k):
"""Last resort for keypresses."""
if k == 'esc':
self.save_file()
raise urwid.ExitMainLoop() # depends on [control=['if'], data=[]]
elif k == 'delete':
# delete at end of line
self.walker.combine_focus_with_next() # depends on [control=['if'], data=[]]
elif k == 'backspace':
# backspace at beginning of line
self.walker.combine_focus_with_prev() # depends on [control=['if'], data=[]]
elif k == 'enter':
# start new line
self.walker.split_focus()
# move the cursor to the new line and reset pref_col
self.view.keypress(size, 'down')
self.view.keypress(size, 'home') # depends on [control=['if'], data=[]] |
def _check_list_props(self, inst: "InstanceNode") -> None:
"""Check uniqueness of keys and "unique" properties, if applicable."""
if self.keys:
self._check_keys(inst)
for u in self.unique:
self._check_unique(u, inst) | def function[_check_list_props, parameter[self, inst]]:
constant[Check uniqueness of keys and "unique" properties, if applicable.]
if name[self].keys begin[:]
call[name[self]._check_keys, parameter[name[inst]]]
for taget[name[u]] in starred[name[self].unique] begin[:]
call[name[self]._check_unique, parameter[name[u], name[inst]]] | keyword[def] identifier[_check_list_props] ( identifier[self] , identifier[inst] : literal[string] )-> keyword[None] :
literal[string]
keyword[if] identifier[self] . identifier[keys] :
identifier[self] . identifier[_check_keys] ( identifier[inst] )
keyword[for] identifier[u] keyword[in] identifier[self] . identifier[unique] :
identifier[self] . identifier[_check_unique] ( identifier[u] , identifier[inst] ) | def _check_list_props(self, inst: 'InstanceNode') -> None:
"""Check uniqueness of keys and "unique" properties, if applicable."""
if self.keys:
self._check_keys(inst) # depends on [control=['if'], data=[]]
for u in self.unique:
self._check_unique(u, inst) # depends on [control=['for'], data=['u']] |
def warning(*args):
"""Display warning message via stderr or GUI."""
if sys.stdin.isatty():
print('WARNING:', *args, file=sys.stderr)
else:
notify_warning(*args) | def function[warning, parameter[]]:
constant[Display warning message via stderr or GUI.]
if call[name[sys].stdin.isatty, parameter[]] begin[:]
call[name[print], parameter[constant[WARNING:], <ast.Starred object at 0x7da1b1342260>]] | keyword[def] identifier[warning] (* identifier[args] ):
literal[string]
keyword[if] identifier[sys] . identifier[stdin] . identifier[isatty] ():
identifier[print] ( literal[string] ,* identifier[args] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[else] :
identifier[notify_warning] (* identifier[args] ) | def warning(*args):
"""Display warning message via stderr or GUI."""
if sys.stdin.isatty():
print('WARNING:', *args, file=sys.stderr) # depends on [control=['if'], data=[]]
else:
notify_warning(*args) |
def personality(self, category: str = 'mbti') -> Union[str, int]:
"""Generate a type of personality.
:param category: Category.
:return: Personality type.
:rtype: str or int
:Example:
ISFJ.
"""
mbtis = ('ISFJ', 'ISTJ', 'INFJ', 'INTJ',
'ISTP', 'ISFP', 'INFP', 'INTP',
'ESTP', 'ESFP', 'ENFP', 'ENTP',
'ESTJ', 'ESFJ', 'ENFJ', 'ENTJ')
if category.lower() == 'rheti':
return self.random.randint(1, 10)
return self.random.choice(mbtis) | def function[personality, parameter[self, category]]:
constant[Generate a type of personality.
:param category: Category.
:return: Personality type.
:rtype: str or int
:Example:
ISFJ.
]
variable[mbtis] assign[=] tuple[[<ast.Constant object at 0x7da18dc9b9a0>, <ast.Constant object at 0x7da18dc98eb0>, <ast.Constant object at 0x7da18dc9bd90>, <ast.Constant object at 0x7da18dc98f40>, <ast.Constant object at 0x7da18dc98490>, <ast.Constant object at 0x7da18dc98b80>, <ast.Constant object at 0x7da18dc9ad40>, <ast.Constant object at 0x7da18dc9b370>, <ast.Constant object at 0x7da18dc9a230>, <ast.Constant object at 0x7da18dc99840>, <ast.Constant object at 0x7da18dc9aa40>, <ast.Constant object at 0x7da18dc98400>, <ast.Constant object at 0x7da18dc98070>, <ast.Constant object at 0x7da18dc9a7a0>, <ast.Constant object at 0x7da18dc9ba90>, <ast.Constant object at 0x7da18dc992d0>]]
if compare[call[name[category].lower, parameter[]] equal[==] constant[rheti]] begin[:]
return[call[name[self].random.randint, parameter[constant[1], constant[10]]]]
return[call[name[self].random.choice, parameter[name[mbtis]]]] | keyword[def] identifier[personality] ( identifier[self] , identifier[category] : identifier[str] = literal[string] )-> identifier[Union] [ identifier[str] , identifier[int] ]:
literal[string]
identifier[mbtis] =( literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] )
keyword[if] identifier[category] . identifier[lower] ()== literal[string] :
keyword[return] identifier[self] . identifier[random] . identifier[randint] ( literal[int] , literal[int] )
keyword[return] identifier[self] . identifier[random] . identifier[choice] ( identifier[mbtis] ) | def personality(self, category: str='mbti') -> Union[str, int]:
"""Generate a type of personality.
:param category: Category.
:return: Personality type.
:rtype: str or int
:Example:
ISFJ.
"""
mbtis = ('ISFJ', 'ISTJ', 'INFJ', 'INTJ', 'ISTP', 'ISFP', 'INFP', 'INTP', 'ESTP', 'ESFP', 'ENFP', 'ENTP', 'ESTJ', 'ESFJ', 'ENFJ', 'ENTJ')
if category.lower() == 'rheti':
return self.random.randint(1, 10) # depends on [control=['if'], data=[]]
return self.random.choice(mbtis) |
def _find_bad_transition(self, mma, w_string):
"""
Checks for bad DFA transitions using the examined string
Args:
mma (DFA): The hypothesis automaton
w_string (str): The examined string to be consumed
Returns:
str: The prefix of the examined string that matches
"""
conj_out = mma.consume_input(w_string)
targ_out = self._membership_query(w_string)
# TODO: handle different length outputs from conjecture and target
# hypothesis.
length = min(len(conj_out), len(targ_out))
diff = [i for i in range(length)
if conj_out[i] != targ_out[i]]
if len(diff) == 0:
diff_index = len(targ_out)
else:
diff_index = diff[0]
low = 0
high = len(w_string)
while True:
i = (low + high) / 2
length = len(self._membership_query(w_string[:i]))
if length == diff_index + 1:
return w_string[:i]
elif length < diff_index + 1:
low = i + 1
else:
high = i - 1 | def function[_find_bad_transition, parameter[self, mma, w_string]]:
constant[
Checks for bad DFA transitions using the examined string
Args:
mma (DFA): The hypothesis automaton
w_string (str): The examined string to be consumed
Returns:
str: The prefix of the examined string that matches
]
variable[conj_out] assign[=] call[name[mma].consume_input, parameter[name[w_string]]]
variable[targ_out] assign[=] call[name[self]._membership_query, parameter[name[w_string]]]
variable[length] assign[=] call[name[min], parameter[call[name[len], parameter[name[conj_out]]], call[name[len], parameter[name[targ_out]]]]]
variable[diff] assign[=] <ast.ListComp object at 0x7da204567bb0>
if compare[call[name[len], parameter[name[diff]]] equal[==] constant[0]] begin[:]
variable[diff_index] assign[=] call[name[len], parameter[name[targ_out]]]
variable[low] assign[=] constant[0]
variable[high] assign[=] call[name[len], parameter[name[w_string]]]
while constant[True] begin[:]
variable[i] assign[=] binary_operation[binary_operation[name[low] + name[high]] / constant[2]]
variable[length] assign[=] call[name[len], parameter[call[name[self]._membership_query, parameter[call[name[w_string]][<ast.Slice object at 0x7da18bc732b0>]]]]]
if compare[name[length] equal[==] binary_operation[name[diff_index] + constant[1]]] begin[:]
return[call[name[w_string]][<ast.Slice object at 0x7da18bc72cb0>]] | keyword[def] identifier[_find_bad_transition] ( identifier[self] , identifier[mma] , identifier[w_string] ):
literal[string]
identifier[conj_out] = identifier[mma] . identifier[consume_input] ( identifier[w_string] )
identifier[targ_out] = identifier[self] . identifier[_membership_query] ( identifier[w_string] )
identifier[length] = identifier[min] ( identifier[len] ( identifier[conj_out] ), identifier[len] ( identifier[targ_out] ))
identifier[diff] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[length] )
keyword[if] identifier[conj_out] [ identifier[i] ]!= identifier[targ_out] [ identifier[i] ]]
keyword[if] identifier[len] ( identifier[diff] )== literal[int] :
identifier[diff_index] = identifier[len] ( identifier[targ_out] )
keyword[else] :
identifier[diff_index] = identifier[diff] [ literal[int] ]
identifier[low] = literal[int]
identifier[high] = identifier[len] ( identifier[w_string] )
keyword[while] keyword[True] :
identifier[i] =( identifier[low] + identifier[high] )/ literal[int]
identifier[length] = identifier[len] ( identifier[self] . identifier[_membership_query] ( identifier[w_string] [: identifier[i] ]))
keyword[if] identifier[length] == identifier[diff_index] + literal[int] :
keyword[return] identifier[w_string] [: identifier[i] ]
keyword[elif] identifier[length] < identifier[diff_index] + literal[int] :
identifier[low] = identifier[i] + literal[int]
keyword[else] :
identifier[high] = identifier[i] - literal[int] | def _find_bad_transition(self, mma, w_string):
"""
Checks for bad DFA transitions using the examined string
Args:
mma (DFA): The hypothesis automaton
w_string (str): The examined string to be consumed
Returns:
str: The prefix of the examined string that matches
"""
conj_out = mma.consume_input(w_string)
targ_out = self._membership_query(w_string)
# TODO: handle different length outputs from conjecture and target
# hypothesis.
length = min(len(conj_out), len(targ_out))
diff = [i for i in range(length) if conj_out[i] != targ_out[i]]
if len(diff) == 0:
diff_index = len(targ_out) # depends on [control=['if'], data=[]]
else:
diff_index = diff[0]
low = 0
high = len(w_string)
while True:
i = (low + high) / 2
length = len(self._membership_query(w_string[:i]))
if length == diff_index + 1:
return w_string[:i] # depends on [control=['if'], data=[]]
elif length < diff_index + 1:
low = i + 1 # depends on [control=['if'], data=[]]
else:
high = i - 1 # depends on [control=['while'], data=[]] |
async def tuple(self, elem=None, elem_type=None, params=None, obj=None):
"""
Loads/dumps tuple
:return:
"""
if hasattr(elem_type, 'kv_serialize'):
container = elem_type() if elem is None else elem
return await container.kv_serialize(self, elem=elem, elem_type=elem_type, params=params, obj=obj)
# TODO: if modeled return as 0=>, 1=>, ...
if self.writing:
return await self.dump_tuple(elem, elem_type, params, obj=obj)
else:
return await self.load_tuple(elem_type, params=params, elem=elem, obj=obj) | <ast.AsyncFunctionDef object at 0x7da1b253b1c0> | keyword[async] keyword[def] identifier[tuple] ( identifier[self] , identifier[elem] = keyword[None] , identifier[elem_type] = keyword[None] , identifier[params] = keyword[None] , identifier[obj] = keyword[None] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[elem_type] , literal[string] ):
identifier[container] = identifier[elem_type] () keyword[if] identifier[elem] keyword[is] keyword[None] keyword[else] identifier[elem]
keyword[return] keyword[await] identifier[container] . identifier[kv_serialize] ( identifier[self] , identifier[elem] = identifier[elem] , identifier[elem_type] = identifier[elem_type] , identifier[params] = identifier[params] , identifier[obj] = identifier[obj] )
keyword[if] identifier[self] . identifier[writing] :
keyword[return] keyword[await] identifier[self] . identifier[dump_tuple] ( identifier[elem] , identifier[elem_type] , identifier[params] , identifier[obj] = identifier[obj] )
keyword[else] :
keyword[return] keyword[await] identifier[self] . identifier[load_tuple] ( identifier[elem_type] , identifier[params] = identifier[params] , identifier[elem] = identifier[elem] , identifier[obj] = identifier[obj] ) | async def tuple(self, elem=None, elem_type=None, params=None, obj=None):
"""
Loads/dumps tuple
:return:
"""
if hasattr(elem_type, 'kv_serialize'):
container = elem_type() if elem is None else elem
return await container.kv_serialize(self, elem=elem, elem_type=elem_type, params=params, obj=obj) # depends on [control=['if'], data=[]]
# TODO: if modeled return as 0=>, 1=>, ...
if self.writing:
return await self.dump_tuple(elem, elem_type, params, obj=obj) # depends on [control=['if'], data=[]]
else:
return await self.load_tuple(elem_type, params=params, elem=elem, obj=obj) |
def rotate_direction(hexgrid_type, direction, ccw=True):
"""
Takes a direction string associated with a type of hexgrid element, and rotates it one tick in the given direction.
:param direction: string, eg 'NW', 'N', 'SE'
:param ccw: if True, rotates counter clockwise. Otherwise, rotates clockwise.
:return: the rotated direction string, eg 'SW', 'NW', 'S'
"""
if hexgrid_type in [TILE, EDGE]:
directions = ['NW', 'W', 'SW', 'SE', 'E', 'NE', 'NW'] if ccw \
else ['NW', 'NE', 'E', 'SE', 'SW', 'W', 'NW']
return directions[directions.index(direction) + 1]
elif hexgrid_type in [NODE]:
directions = ['N', 'NW', 'SW', 'S', 'SE', 'NE', 'N'] if ccw \
else ['N', 'NE', 'SE', 'S', 'SW', 'NW', 'N']
return directions[directions.index(direction) + 1]
else:
raise ValueError('Invalid hexgrid type={} passed to rotate direction'.format(hexgrid_type)) | def function[rotate_direction, parameter[hexgrid_type, direction, ccw]]:
constant[
Takes a direction string associated with a type of hexgrid element, and rotates it one tick in the given direction.
:param direction: string, eg 'NW', 'N', 'SE'
:param ccw: if True, rotates counter clockwise. Otherwise, rotates clockwise.
:return: the rotated direction string, eg 'SW', 'NW', 'S'
]
if compare[name[hexgrid_type] in list[[<ast.Name object at 0x7da20e956c20>, <ast.Name object at 0x7da20e956230>]]] begin[:]
variable[directions] assign[=] <ast.IfExp object at 0x7da20e955c30>
return[call[name[directions]][binary_operation[call[name[directions].index, parameter[name[direction]]] + constant[1]]]] | keyword[def] identifier[rotate_direction] ( identifier[hexgrid_type] , identifier[direction] , identifier[ccw] = keyword[True] ):
literal[string]
keyword[if] identifier[hexgrid_type] keyword[in] [ identifier[TILE] , identifier[EDGE] ]:
identifier[directions] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[if] identifier[ccw] keyword[else] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[return] identifier[directions] [ identifier[directions] . identifier[index] ( identifier[direction] )+ literal[int] ]
keyword[elif] identifier[hexgrid_type] keyword[in] [ identifier[NODE] ]:
identifier[directions] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[if] identifier[ccw] keyword[else] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[return] identifier[directions] [ identifier[directions] . identifier[index] ( identifier[direction] )+ literal[int] ]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[hexgrid_type] )) | def rotate_direction(hexgrid_type, direction, ccw=True):
"""
Takes a direction string associated with a type of hexgrid element, and rotates it one tick in the given direction.
:param direction: string, eg 'NW', 'N', 'SE'
:param ccw: if True, rotates counter clockwise. Otherwise, rotates clockwise.
:return: the rotated direction string, eg 'SW', 'NW', 'S'
"""
if hexgrid_type in [TILE, EDGE]:
directions = ['NW', 'W', 'SW', 'SE', 'E', 'NE', 'NW'] if ccw else ['NW', 'NE', 'E', 'SE', 'SW', 'W', 'NW']
return directions[directions.index(direction) + 1] # depends on [control=['if'], data=[]]
elif hexgrid_type in [NODE]:
directions = ['N', 'NW', 'SW', 'S', 'SE', 'NE', 'N'] if ccw else ['N', 'NE', 'SE', 'S', 'SW', 'NW', 'N']
return directions[directions.index(direction) + 1] # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid hexgrid type={} passed to rotate direction'.format(hexgrid_type)) |
def gameloop(self):
"""
A game loop that circles through the methods.
"""
try:
while True:
self.handle_events()
self.update()
self.render()
except KeyboardInterrupt:
pass | def function[gameloop, parameter[self]]:
constant[
A game loop that circles through the methods.
]
<ast.Try object at 0x7da2054a4190> | keyword[def] identifier[gameloop] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[while] keyword[True] :
identifier[self] . identifier[handle_events] ()
identifier[self] . identifier[update] ()
identifier[self] . identifier[render] ()
keyword[except] identifier[KeyboardInterrupt] :
keyword[pass] | def gameloop(self):
"""
A game loop that circles through the methods.
"""
try:
while True:
self.handle_events()
self.update()
self.render() # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
pass # depends on [control=['except'], data=[]] |
def match_examples(self, parse_fn, examples):
""" Given a parser instance and a dictionary mapping some label with
some malformed syntax examples, it'll return the label for the
example that bests matches the current error.
"""
assert self.state is not None, "Not supported for this exception"
candidate = None
for label, example in examples.items():
assert not isinstance(example, STRING_TYPE)
for malformed in example:
try:
parse_fn(malformed)
except UnexpectedInput as ut:
if ut.state == self.state:
try:
if ut.token == self.token: # Try exact match first
return label
except AttributeError:
pass
if not candidate:
candidate = label
return candidate | def function[match_examples, parameter[self, parse_fn, examples]]:
constant[ Given a parser instance and a dictionary mapping some label with
some malformed syntax examples, it'll return the label for the
example that bests matches the current error.
]
assert[compare[name[self].state is_not constant[None]]]
variable[candidate] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da2041da7d0>, <ast.Name object at 0x7da2041db730>]]] in starred[call[name[examples].items, parameter[]]] begin[:]
assert[<ast.UnaryOp object at 0x7da2041d8640>]
for taget[name[malformed]] in starred[name[example]] begin[:]
<ast.Try object at 0x7da2041d8850>
return[name[candidate]] | keyword[def] identifier[match_examples] ( identifier[self] , identifier[parse_fn] , identifier[examples] ):
literal[string]
keyword[assert] identifier[self] . identifier[state] keyword[is] keyword[not] keyword[None] , literal[string]
identifier[candidate] = keyword[None]
keyword[for] identifier[label] , identifier[example] keyword[in] identifier[examples] . identifier[items] ():
keyword[assert] keyword[not] identifier[isinstance] ( identifier[example] , identifier[STRING_TYPE] )
keyword[for] identifier[malformed] keyword[in] identifier[example] :
keyword[try] :
identifier[parse_fn] ( identifier[malformed] )
keyword[except] identifier[UnexpectedInput] keyword[as] identifier[ut] :
keyword[if] identifier[ut] . identifier[state] == identifier[self] . identifier[state] :
keyword[try] :
keyword[if] identifier[ut] . identifier[token] == identifier[self] . identifier[token] :
keyword[return] identifier[label]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[if] keyword[not] identifier[candidate] :
identifier[candidate] = identifier[label]
keyword[return] identifier[candidate] | def match_examples(self, parse_fn, examples):
""" Given a parser instance and a dictionary mapping some label with
some malformed syntax examples, it'll return the label for the
example that bests matches the current error.
"""
assert self.state is not None, 'Not supported for this exception'
candidate = None
for (label, example) in examples.items():
assert not isinstance(example, STRING_TYPE)
for malformed in example:
try:
parse_fn(malformed) # depends on [control=['try'], data=[]]
except UnexpectedInput as ut:
if ut.state == self.state:
try:
if ut.token == self.token: # Try exact match first
return label # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
if not candidate:
candidate = label # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['ut']] # depends on [control=['for'], data=['malformed']] # depends on [control=['for'], data=[]]
return candidate |
def adjoint(self):
"""Adjoint of this operator.
For example, if A and B are operators::
[[A, 0],
[0, B]]
The adjoint is given by::
[[A^*, 0],
[0, B^*]]
This is only well defined if each sub-operator has an adjoint
Returns
-------
adjoint : `DiagonalOperator`
The adjoint operator
See Also
--------
ProductSpaceOperator.adjoint
"""
adjoints = [op.adjoint for op in self.operators]
return DiagonalOperator(*adjoints,
domain=self.range, range=self.domain) | def function[adjoint, parameter[self]]:
constant[Adjoint of this operator.
For example, if A and B are operators::
[[A, 0],
[0, B]]
The adjoint is given by::
[[A^*, 0],
[0, B^*]]
This is only well defined if each sub-operator has an adjoint
Returns
-------
adjoint : `DiagonalOperator`
The adjoint operator
See Also
--------
ProductSpaceOperator.adjoint
]
variable[adjoints] assign[=] <ast.ListComp object at 0x7da1b1e04ca0>
return[call[name[DiagonalOperator], parameter[<ast.Starred object at 0x7da1b1e05240>]]] | keyword[def] identifier[adjoint] ( identifier[self] ):
literal[string]
identifier[adjoints] =[ identifier[op] . identifier[adjoint] keyword[for] identifier[op] keyword[in] identifier[self] . identifier[operators] ]
keyword[return] identifier[DiagonalOperator] (* identifier[adjoints] ,
identifier[domain] = identifier[self] . identifier[range] , identifier[range] = identifier[self] . identifier[domain] ) | def adjoint(self):
"""Adjoint of this operator.
For example, if A and B are operators::
[[A, 0],
[0, B]]
The adjoint is given by::
[[A^*, 0],
[0, B^*]]
This is only well defined if each sub-operator has an adjoint
Returns
-------
adjoint : `DiagonalOperator`
The adjoint operator
See Also
--------
ProductSpaceOperator.adjoint
"""
adjoints = [op.adjoint for op in self.operators]
return DiagonalOperator(*adjoints, domain=self.range, range=self.domain) |
def on_equalarea_specimen_select(self, event):
"""
Get mouse position on double click find the nearest interpretation
to the mouse
position then select that interpretation
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
"""
if not self.specimen_EA_xdata or not self.specimen_EA_ydata:
return
pos = event.GetPosition()
width, height = self.canvas2.get_width_height()
pos[1] = height - pos[1]
xpick_data, ypick_data = pos
xdata_org = self.specimen_EA_xdata
ydata_org = self.specimen_EA_ydata
data_corrected = self.specimen_eqarea.transData.transform(
vstack([xdata_org, ydata_org]).T)
xdata, ydata = data_corrected.T
xdata = list(map(float, xdata))
ydata = list(map(float, ydata))
e = 4e0
index = None
for i, (x, y) in enumerate(zip(xdata, ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index != None:
self.fit_box.SetSelection(index)
self.draw_figure(self.s, True)
self.on_select_fit(event) | def function[on_equalarea_specimen_select, parameter[self, event]]:
constant[
Get mouse position on double click find the nearest interpretation
to the mouse
position then select that interpretation
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
]
if <ast.BoolOp object at 0x7da18f00ead0> begin[:]
return[None]
variable[pos] assign[=] call[name[event].GetPosition, parameter[]]
<ast.Tuple object at 0x7da18f00d180> assign[=] call[name[self].canvas2.get_width_height, parameter[]]
call[name[pos]][constant[1]] assign[=] binary_operation[name[height] - call[name[pos]][constant[1]]]
<ast.Tuple object at 0x7da18f00f100> assign[=] name[pos]
variable[xdata_org] assign[=] name[self].specimen_EA_xdata
variable[ydata_org] assign[=] name[self].specimen_EA_ydata
variable[data_corrected] assign[=] call[name[self].specimen_eqarea.transData.transform, parameter[call[name[vstack], parameter[list[[<ast.Name object at 0x7da18f00e140>, <ast.Name object at 0x7da18f00d7e0>]]]].T]]
<ast.Tuple object at 0x7da18f00dab0> assign[=] name[data_corrected].T
variable[xdata] assign[=] call[name[list], parameter[call[name[map], parameter[name[float], name[xdata]]]]]
variable[ydata] assign[=] call[name[list], parameter[call[name[map], parameter[name[float], name[ydata]]]]]
variable[e] assign[=] constant[4.0]
variable[index] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da18f00dcf0>, <ast.Tuple object at 0x7da18f00f5b0>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[name[xdata], name[ydata]]]]]] begin[:]
if compare[constant[0] less[<] call[name[sqrt], parameter[binary_operation[binary_operation[binary_operation[name[x] - name[xpick_data]] ** constant[2.0]] + binary_operation[binary_operation[name[y] - name[ypick_data]] ** constant[2.0]]]]]] begin[:]
variable[index] assign[=] name[i]
break
if compare[name[index] not_equal[!=] constant[None]] begin[:]
call[name[self].fit_box.SetSelection, parameter[name[index]]]
call[name[self].draw_figure, parameter[name[self].s, constant[True]]]
call[name[self].on_select_fit, parameter[name[event]]] | keyword[def] identifier[on_equalarea_specimen_select] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[specimen_EA_xdata] keyword[or] keyword[not] identifier[self] . identifier[specimen_EA_ydata] :
keyword[return]
identifier[pos] = identifier[event] . identifier[GetPosition] ()
identifier[width] , identifier[height] = identifier[self] . identifier[canvas2] . identifier[get_width_height] ()
identifier[pos] [ literal[int] ]= identifier[height] - identifier[pos] [ literal[int] ]
identifier[xpick_data] , identifier[ypick_data] = identifier[pos]
identifier[xdata_org] = identifier[self] . identifier[specimen_EA_xdata]
identifier[ydata_org] = identifier[self] . identifier[specimen_EA_ydata]
identifier[data_corrected] = identifier[self] . identifier[specimen_eqarea] . identifier[transData] . identifier[transform] (
identifier[vstack] ([ identifier[xdata_org] , identifier[ydata_org] ]). identifier[T] )
identifier[xdata] , identifier[ydata] = identifier[data_corrected] . identifier[T]
identifier[xdata] = identifier[list] ( identifier[map] ( identifier[float] , identifier[xdata] ))
identifier[ydata] = identifier[list] ( identifier[map] ( identifier[float] , identifier[ydata] ))
identifier[e] = literal[int]
identifier[index] = keyword[None]
keyword[for] identifier[i] ,( identifier[x] , identifier[y] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[xdata] , identifier[ydata] )):
keyword[if] literal[int] < identifier[sqrt] (( identifier[x] - identifier[xpick_data] )** literal[int] +( identifier[y] - identifier[ypick_data] )** literal[int] )< identifier[e] :
identifier[index] = identifier[i]
keyword[break]
keyword[if] identifier[index] != keyword[None] :
identifier[self] . identifier[fit_box] . identifier[SetSelection] ( identifier[index] )
identifier[self] . identifier[draw_figure] ( identifier[self] . identifier[s] , keyword[True] )
identifier[self] . identifier[on_select_fit] ( identifier[event] ) | def on_equalarea_specimen_select(self, event):
"""
Get mouse position on double click find the nearest interpretation
to the mouse
position then select that interpretation
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
"""
if not self.specimen_EA_xdata or not self.specimen_EA_ydata:
return # depends on [control=['if'], data=[]]
pos = event.GetPosition()
(width, height) = self.canvas2.get_width_height()
pos[1] = height - pos[1]
(xpick_data, ypick_data) = pos
xdata_org = self.specimen_EA_xdata
ydata_org = self.specimen_EA_ydata
data_corrected = self.specimen_eqarea.transData.transform(vstack([xdata_org, ydata_org]).T)
(xdata, ydata) = data_corrected.T
xdata = list(map(float, xdata))
ydata = list(map(float, ydata))
e = 4.0
index = None
for (i, (x, y)) in enumerate(zip(xdata, ydata)):
if 0 < sqrt((x - xpick_data) ** 2.0 + (y - ypick_data) ** 2.0) < e:
index = i
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if index != None:
self.fit_box.SetSelection(index)
self.draw_figure(self.s, True)
self.on_select_fit(event) # depends on [control=['if'], data=['index']] |
def get_sheet_list(xl_path: str) -> List:
"""Return a list with the name of the sheets in
the Excel file in `xl_path`.
"""
wb = read_xl(xl_path)
if hasattr(wb, 'sheetnames'):
return wb.sheetnames
else:
return wb.sheet_names() | def function[get_sheet_list, parameter[xl_path]]:
constant[Return a list with the name of the sheets in
the Excel file in `xl_path`.
]
variable[wb] assign[=] call[name[read_xl], parameter[name[xl_path]]]
if call[name[hasattr], parameter[name[wb], constant[sheetnames]]] begin[:]
return[name[wb].sheetnames] | keyword[def] identifier[get_sheet_list] ( identifier[xl_path] : identifier[str] )-> identifier[List] :
literal[string]
identifier[wb] = identifier[read_xl] ( identifier[xl_path] )
keyword[if] identifier[hasattr] ( identifier[wb] , literal[string] ):
keyword[return] identifier[wb] . identifier[sheetnames]
keyword[else] :
keyword[return] identifier[wb] . identifier[sheet_names] () | def get_sheet_list(xl_path: str) -> List:
"""Return a list with the name of the sheets in
the Excel file in `xl_path`.
"""
wb = read_xl(xl_path)
if hasattr(wb, 'sheetnames'):
return wb.sheetnames # depends on [control=['if'], data=[]]
else:
return wb.sheet_names() |
def remove_profile(self, profile=None):
"""Remove profile from credentials file.
Args:
profile (str): Credentials profile to remove.
Returns:
list: List of affected document IDs.
"""
with self.db:
return self.db.remove(self.query.profile == profile) | def function[remove_profile, parameter[self, profile]]:
constant[Remove profile from credentials file.
Args:
profile (str): Credentials profile to remove.
Returns:
list: List of affected document IDs.
]
with name[self].db begin[:]
return[call[name[self].db.remove, parameter[compare[name[self].query.profile equal[==] name[profile]]]]] | keyword[def] identifier[remove_profile] ( identifier[self] , identifier[profile] = keyword[None] ):
literal[string]
keyword[with] identifier[self] . identifier[db] :
keyword[return] identifier[self] . identifier[db] . identifier[remove] ( identifier[self] . identifier[query] . identifier[profile] == identifier[profile] ) | def remove_profile(self, profile=None):
"""Remove profile from credentials file.
Args:
profile (str): Credentials profile to remove.
Returns:
list: List of affected document IDs.
"""
with self.db:
return self.db.remove(self.query.profile == profile) # depends on [control=['with'], data=[]] |
def get_contents(diff_part):
"""
Returns a tuple of old content and new content.
"""
old_sha = get_old_sha(diff_part)
old_filename = get_old_filename(diff_part)
old_contents = get_old_contents(old_sha, old_filename)
new_filename = get_new_filename(diff_part)
new_contents = get_new_contents(new_filename)
return old_contents, new_contents | def function[get_contents, parameter[diff_part]]:
constant[
Returns a tuple of old content and new content.
]
variable[old_sha] assign[=] call[name[get_old_sha], parameter[name[diff_part]]]
variable[old_filename] assign[=] call[name[get_old_filename], parameter[name[diff_part]]]
variable[old_contents] assign[=] call[name[get_old_contents], parameter[name[old_sha], name[old_filename]]]
variable[new_filename] assign[=] call[name[get_new_filename], parameter[name[diff_part]]]
variable[new_contents] assign[=] call[name[get_new_contents], parameter[name[new_filename]]]
return[tuple[[<ast.Name object at 0x7da1b26c8700>, <ast.Name object at 0x7da1b28d6740>]]] | keyword[def] identifier[get_contents] ( identifier[diff_part] ):
literal[string]
identifier[old_sha] = identifier[get_old_sha] ( identifier[diff_part] )
identifier[old_filename] = identifier[get_old_filename] ( identifier[diff_part] )
identifier[old_contents] = identifier[get_old_contents] ( identifier[old_sha] , identifier[old_filename] )
identifier[new_filename] = identifier[get_new_filename] ( identifier[diff_part] )
identifier[new_contents] = identifier[get_new_contents] ( identifier[new_filename] )
keyword[return] identifier[old_contents] , identifier[new_contents] | def get_contents(diff_part):
"""
Returns a tuple of old content and new content.
"""
old_sha = get_old_sha(diff_part)
old_filename = get_old_filename(diff_part)
old_contents = get_old_contents(old_sha, old_filename)
new_filename = get_new_filename(diff_part)
new_contents = get_new_contents(new_filename)
return (old_contents, new_contents) |
def ping():
'''
Ping the device on the other end of the connection
.. code-block: bash
salt '*' onyx.cmd ping
'''
if _worker_name() not in DETAILS:
init()
try:
return DETAILS[_worker_name()].conn.isalive()
except TerminalException as e:
log.error(e)
return False | def function[ping, parameter[]]:
constant[
Ping the device on the other end of the connection
.. code-block: bash
salt '*' onyx.cmd ping
]
if compare[call[name[_worker_name], parameter[]] <ast.NotIn object at 0x7da2590d7190> name[DETAILS]] begin[:]
call[name[init], parameter[]]
<ast.Try object at 0x7da1b21850c0> | keyword[def] identifier[ping] ():
literal[string]
keyword[if] identifier[_worker_name] () keyword[not] keyword[in] identifier[DETAILS] :
identifier[init] ()
keyword[try] :
keyword[return] identifier[DETAILS] [ identifier[_worker_name] ()]. identifier[conn] . identifier[isalive] ()
keyword[except] identifier[TerminalException] keyword[as] identifier[e] :
identifier[log] . identifier[error] ( identifier[e] )
keyword[return] keyword[False] | def ping():
"""
Ping the device on the other end of the connection
.. code-block: bash
salt '*' onyx.cmd ping
"""
if _worker_name() not in DETAILS:
init() # depends on [control=['if'], data=[]]
try:
return DETAILS[_worker_name()].conn.isalive() # depends on [control=['try'], data=[]]
except TerminalException as e:
log.error(e)
return False # depends on [control=['except'], data=['e']] |
def run(self):
"""
Executes a started pipeline by pulling results from it's output
``Pipers``. Processing nodes i.e. ``Pipers`` with the ``track``
attribute set ``True`` will have their returned results stored within
the ``Dagger.stats['pipers_tracked']`` dictionary. A running pipeline
can be paused.
"""
# remove non-block results for end tasks
if self._started.isSet() and \
not self._running.isSet() and \
not self._pausing.isSet() and \
not self._finished.isSet():
stride = 1 # FIXME
tasks = self.get_outputs()
wtasks = _Weave(tasks, repeats=stride)
self._plunger = Thread(target=self._plunge, args=(wtasks, \
self._pausing.isSet, self._finish))
self._plunger.deamon = True
self._plunger.start()
self._running.set()
else:
raise PlumberError | def function[run, parameter[self]]:
constant[
Executes a started pipeline by pulling results from it's output
``Pipers``. Processing nodes i.e. ``Pipers`` with the ``track``
attribute set ``True`` will have their returned results stored within
the ``Dagger.stats['pipers_tracked']`` dictionary. A running pipeline
can be paused.
]
if <ast.BoolOp object at 0x7da1b2518c40> begin[:]
variable[stride] assign[=] constant[1]
variable[tasks] assign[=] call[name[self].get_outputs, parameter[]]
variable[wtasks] assign[=] call[name[_Weave], parameter[name[tasks]]]
name[self]._plunger assign[=] call[name[Thread], parameter[]]
name[self]._plunger.deamon assign[=] constant[True]
call[name[self]._plunger.start, parameter[]]
call[name[self]._running.set, parameter[]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_started] . identifier[isSet] () keyword[and] keyword[not] identifier[self] . identifier[_running] . identifier[isSet] () keyword[and] keyword[not] identifier[self] . identifier[_pausing] . identifier[isSet] () keyword[and] keyword[not] identifier[self] . identifier[_finished] . identifier[isSet] ():
identifier[stride] = literal[int]
identifier[tasks] = identifier[self] . identifier[get_outputs] ()
identifier[wtasks] = identifier[_Weave] ( identifier[tasks] , identifier[repeats] = identifier[stride] )
identifier[self] . identifier[_plunger] = identifier[Thread] ( identifier[target] = identifier[self] . identifier[_plunge] , identifier[args] =( identifier[wtasks] , identifier[self] . identifier[_pausing] . identifier[isSet] , identifier[self] . identifier[_finish] ))
identifier[self] . identifier[_plunger] . identifier[deamon] = keyword[True]
identifier[self] . identifier[_plunger] . identifier[start] ()
identifier[self] . identifier[_running] . identifier[set] ()
keyword[else] :
keyword[raise] identifier[PlumberError] | def run(self):
"""
Executes a started pipeline by pulling results from it's output
``Pipers``. Processing nodes i.e. ``Pipers`` with the ``track``
attribute set ``True`` will have their returned results stored within
the ``Dagger.stats['pipers_tracked']`` dictionary. A running pipeline
can be paused.
"""
# remove non-block results for end tasks
if self._started.isSet() and (not self._running.isSet()) and (not self._pausing.isSet()) and (not self._finished.isSet()):
stride = 1 # FIXME
tasks = self.get_outputs()
wtasks = _Weave(tasks, repeats=stride)
self._plunger = Thread(target=self._plunge, args=(wtasks, self._pausing.isSet, self._finish))
self._plunger.deamon = True
self._plunger.start()
self._running.set() # depends on [control=['if'], data=[]]
else:
raise PlumberError |
def date_totals(entries, by):
"""Yield a user's name and a dictionary of their hours"""
date_dict = {}
for date, date_entries in groupby(entries, lambda x: x['date']):
if isinstance(date, datetime.datetime):
date = date.date()
d_entries = list(date_entries)
if by == 'user':
name = ' '.join((d_entries[0]['user__first_name'],
d_entries[0]['user__last_name']))
elif by == 'project':
name = d_entries[0]['project__name']
else:
name = d_entries[0][by]
pk = d_entries[0][by]
hours = get_hours_summary(d_entries)
date_dict[date] = hours
return name, pk, date_dict | def function[date_totals, parameter[entries, by]]:
constant[Yield a user's name and a dictionary of their hours]
variable[date_dict] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1043760>, <ast.Name object at 0x7da1b1042f50>]]] in starred[call[name[groupby], parameter[name[entries], <ast.Lambda object at 0x7da1b1041ea0>]]] begin[:]
if call[name[isinstance], parameter[name[date], name[datetime].datetime]] begin[:]
variable[date] assign[=] call[name[date].date, parameter[]]
variable[d_entries] assign[=] call[name[list], parameter[name[date_entries]]]
if compare[name[by] equal[==] constant[user]] begin[:]
variable[name] assign[=] call[constant[ ].join, parameter[tuple[[<ast.Subscript object at 0x7da1b1040070>, <ast.Subscript object at 0x7da1b10418d0>]]]]
variable[pk] assign[=] call[call[name[d_entries]][constant[0]]][name[by]]
variable[hours] assign[=] call[name[get_hours_summary], parameter[name[d_entries]]]
call[name[date_dict]][name[date]] assign[=] name[hours]
return[tuple[[<ast.Name object at 0x7da1b106b820>, <ast.Name object at 0x7da1b106ad10>, <ast.Name object at 0x7da1b1069e40>]]] | keyword[def] identifier[date_totals] ( identifier[entries] , identifier[by] ):
literal[string]
identifier[date_dict] ={}
keyword[for] identifier[date] , identifier[date_entries] keyword[in] identifier[groupby] ( identifier[entries] , keyword[lambda] identifier[x] : identifier[x] [ literal[string] ]):
keyword[if] identifier[isinstance] ( identifier[date] , identifier[datetime] . identifier[datetime] ):
identifier[date] = identifier[date] . identifier[date] ()
identifier[d_entries] = identifier[list] ( identifier[date_entries] )
keyword[if] identifier[by] == literal[string] :
identifier[name] = literal[string] . identifier[join] (( identifier[d_entries] [ literal[int] ][ literal[string] ],
identifier[d_entries] [ literal[int] ][ literal[string] ]))
keyword[elif] identifier[by] == literal[string] :
identifier[name] = identifier[d_entries] [ literal[int] ][ literal[string] ]
keyword[else] :
identifier[name] = identifier[d_entries] [ literal[int] ][ identifier[by] ]
identifier[pk] = identifier[d_entries] [ literal[int] ][ identifier[by] ]
identifier[hours] = identifier[get_hours_summary] ( identifier[d_entries] )
identifier[date_dict] [ identifier[date] ]= identifier[hours]
keyword[return] identifier[name] , identifier[pk] , identifier[date_dict] | def date_totals(entries, by):
"""Yield a user's name and a dictionary of their hours"""
date_dict = {}
for (date, date_entries) in groupby(entries, lambda x: x['date']):
if isinstance(date, datetime.datetime):
date = date.date() # depends on [control=['if'], data=[]]
d_entries = list(date_entries)
if by == 'user':
name = ' '.join((d_entries[0]['user__first_name'], d_entries[0]['user__last_name'])) # depends on [control=['if'], data=[]]
elif by == 'project':
name = d_entries[0]['project__name'] # depends on [control=['if'], data=[]]
else:
name = d_entries[0][by]
pk = d_entries[0][by]
hours = get_hours_summary(d_entries)
date_dict[date] = hours # depends on [control=['for'], data=[]]
return (name, pk, date_dict) |
def set_tuning(self, tuning):
"""Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object.
"""
if self.instrument:
self.instrument.tuning = tuning
self.tuning = tuning
return self | def function[set_tuning, parameter[self, tuning]]:
constant[Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object.
]
if name[self].instrument begin[:]
name[self].instrument.tuning assign[=] name[tuning]
name[self].tuning assign[=] name[tuning]
return[name[self]] | keyword[def] identifier[set_tuning] ( identifier[self] , identifier[tuning] ):
literal[string]
keyword[if] identifier[self] . identifier[instrument] :
identifier[self] . identifier[instrument] . identifier[tuning] = identifier[tuning]
identifier[self] . identifier[tuning] = identifier[tuning]
keyword[return] identifier[self] | def set_tuning(self, tuning):
"""Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object.
"""
if self.instrument:
self.instrument.tuning = tuning # depends on [control=['if'], data=[]]
self.tuning = tuning
return self |
def _read_version(material_description_bytes):
# type: (io.BytesIO) -> None
"""Read the version from the serialized material description and raise an error if it is unknown.
:param material_description_bytes: serializezd material description
:type material_description_bytes: io.BytesIO
:raises InvalidMaterialDescriptionError: if malformed version
:raises InvalidMaterialDescriptionVersionError: if unknown version is found
"""
try:
(version,) = unpack_value(">4s", material_description_bytes)
except struct.error:
message = "Malformed material description version"
_LOGGER.exception(message)
raise InvalidMaterialDescriptionError(message)
if version != _MATERIAL_DESCRIPTION_VERSION:
raise InvalidMaterialDescriptionVersionError("Invalid material description version: {}".format(repr(version))) | def function[_read_version, parameter[material_description_bytes]]:
constant[Read the version from the serialized material description and raise an error if it is unknown.
:param material_description_bytes: serializezd material description
:type material_description_bytes: io.BytesIO
:raises InvalidMaterialDescriptionError: if malformed version
:raises InvalidMaterialDescriptionVersionError: if unknown version is found
]
<ast.Try object at 0x7da18bc701f0>
if compare[name[version] not_equal[!=] name[_MATERIAL_DESCRIPTION_VERSION]] begin[:]
<ast.Raise object at 0x7da1b26ad2a0> | keyword[def] identifier[_read_version] ( identifier[material_description_bytes] ):
literal[string]
keyword[try] :
( identifier[version] ,)= identifier[unpack_value] ( literal[string] , identifier[material_description_bytes] )
keyword[except] identifier[struct] . identifier[error] :
identifier[message] = literal[string]
identifier[_LOGGER] . identifier[exception] ( identifier[message] )
keyword[raise] identifier[InvalidMaterialDescriptionError] ( identifier[message] )
keyword[if] identifier[version] != identifier[_MATERIAL_DESCRIPTION_VERSION] :
keyword[raise] identifier[InvalidMaterialDescriptionVersionError] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[version] ))) | def _read_version(material_description_bytes):
# type: (io.BytesIO) -> None
'Read the version from the serialized material description and raise an error if it is unknown.\n\n :param material_description_bytes: serializezd material description\n :type material_description_bytes: io.BytesIO\n :raises InvalidMaterialDescriptionError: if malformed version\n :raises InvalidMaterialDescriptionVersionError: if unknown version is found\n '
try:
(version,) = unpack_value('>4s', material_description_bytes) # depends on [control=['try'], data=[]]
except struct.error:
message = 'Malformed material description version'
_LOGGER.exception(message)
raise InvalidMaterialDescriptionError(message) # depends on [control=['except'], data=[]]
if version != _MATERIAL_DESCRIPTION_VERSION:
raise InvalidMaterialDescriptionVersionError('Invalid material description version: {}'.format(repr(version))) # depends on [control=['if'], data=['version']] |
def _partition(str_arg, sep):
"""
_partition(str_arg, sep) -> (head, sep, tail)
Searches for the first occurrence of the separator sep in str_arg, and
returns the, part before it, the separator itself, and the part after it.
If the separator is not found, returns str_arg and two empty strings.
"""
try:
return str_arg.partition(sep)
except AttributeError:
try:
idx = str_arg.index(sep)
except ValueError:
return (str_arg, '', '')
return (str_arg[:idx], sep, str_arg[idx + len(sep):]) | def function[_partition, parameter[str_arg, sep]]:
constant[
_partition(str_arg, sep) -> (head, sep, tail)
Searches for the first occurrence of the separator sep in str_arg, and
returns the, part before it, the separator itself, and the part after it.
If the separator is not found, returns str_arg and two empty strings.
]
<ast.Try object at 0x7da20e9b0df0> | keyword[def] identifier[_partition] ( identifier[str_arg] , identifier[sep] ):
literal[string]
keyword[try] :
keyword[return] identifier[str_arg] . identifier[partition] ( identifier[sep] )
keyword[except] identifier[AttributeError] :
keyword[try] :
identifier[idx] = identifier[str_arg] . identifier[index] ( identifier[sep] )
keyword[except] identifier[ValueError] :
keyword[return] ( identifier[str_arg] , literal[string] , literal[string] )
keyword[return] ( identifier[str_arg] [: identifier[idx] ], identifier[sep] , identifier[str_arg] [ identifier[idx] + identifier[len] ( identifier[sep] ):]) | def _partition(str_arg, sep):
"""
_partition(str_arg, sep) -> (head, sep, tail)
Searches for the first occurrence of the separator sep in str_arg, and
returns the, part before it, the separator itself, and the part after it.
If the separator is not found, returns str_arg and two empty strings.
"""
try:
return str_arg.partition(sep) # depends on [control=['try'], data=[]]
except AttributeError:
try:
idx = str_arg.index(sep) # depends on [control=['try'], data=[]]
except ValueError:
return (str_arg, '', '') # depends on [control=['except'], data=[]]
return (str_arg[:idx], sep, str_arg[idx + len(sep):]) # depends on [control=['except'], data=[]] |
def find_minimum_spanning_forest(graph):
"""Calculates the minimum spanning forest of a disconnected graph.
Returns a list of lists, each containing the edges that define that tree.
Returns an empty list for an empty graph.
"""
msf = []
if graph.num_nodes() == 0:
return msf
if graph.num_edges() == 0:
return msf
connected_components = get_connected_components_as_subgraphs(graph)
for subgraph in connected_components:
edge_list = kruskal_mst(subgraph)
msf.append(edge_list)
return msf | def function[find_minimum_spanning_forest, parameter[graph]]:
constant[Calculates the minimum spanning forest of a disconnected graph.
Returns a list of lists, each containing the edges that define that tree.
Returns an empty list for an empty graph.
]
variable[msf] assign[=] list[[]]
if compare[call[name[graph].num_nodes, parameter[]] equal[==] constant[0]] begin[:]
return[name[msf]]
if compare[call[name[graph].num_edges, parameter[]] equal[==] constant[0]] begin[:]
return[name[msf]]
variable[connected_components] assign[=] call[name[get_connected_components_as_subgraphs], parameter[name[graph]]]
for taget[name[subgraph]] in starred[name[connected_components]] begin[:]
variable[edge_list] assign[=] call[name[kruskal_mst], parameter[name[subgraph]]]
call[name[msf].append, parameter[name[edge_list]]]
return[name[msf]] | keyword[def] identifier[find_minimum_spanning_forest] ( identifier[graph] ):
literal[string]
identifier[msf] =[]
keyword[if] identifier[graph] . identifier[num_nodes] ()== literal[int] :
keyword[return] identifier[msf]
keyword[if] identifier[graph] . identifier[num_edges] ()== literal[int] :
keyword[return] identifier[msf]
identifier[connected_components] = identifier[get_connected_components_as_subgraphs] ( identifier[graph] )
keyword[for] identifier[subgraph] keyword[in] identifier[connected_components] :
identifier[edge_list] = identifier[kruskal_mst] ( identifier[subgraph] )
identifier[msf] . identifier[append] ( identifier[edge_list] )
keyword[return] identifier[msf] | def find_minimum_spanning_forest(graph):
"""Calculates the minimum spanning forest of a disconnected graph.
Returns a list of lists, each containing the edges that define that tree.
Returns an empty list for an empty graph.
"""
msf = []
if graph.num_nodes() == 0:
return msf # depends on [control=['if'], data=[]]
if graph.num_edges() == 0:
return msf # depends on [control=['if'], data=[]]
connected_components = get_connected_components_as_subgraphs(graph)
for subgraph in connected_components:
edge_list = kruskal_mst(subgraph)
msf.append(edge_list) # depends on [control=['for'], data=['subgraph']]
return msf |
def vis_splitting(Verts, splitting, output='vtk', fname='output.vtu'):
"""Coarse grid visualization for C/F splittings.
Parameters
----------
Verts : {array}
coordinate array (N x D)
splitting : {array}
coarse(1)/fine(0) flags
fname : {string, file object}
file to be written, e.g. 'output.vtu'
output : {string}
'vtk' or 'matplotlib'
Returns
-------
- Displays in screen or writes data to .vtu file for use in paraview
(xml 0.1 format)
Notes
-----
D :
dimension of coordinate space
N :
# of vertices in the mesh represented in Verts
Ndof :
# of dof (= ldof * N)
- simply color different points with different colors. This works
best with classical AMG.
- writes a file (or opens a window) for each dof
- for Ndof>1, they are assumed orderd [...dof1..., ...dof2..., etc]
Examples
--------
>>> import numpy as np
>>> from pyamg.vis.vis_coarse import vis_splitting
>>> Verts = np.array([[0.0,0.0],
... [1.0,0.0],
... [0.0,1.0],
... [1.0,1.0]])
>>> splitting = np.array([0,1,0,1,1,0,1,0]) # two variables
>>> vis_splitting(Verts,splitting,output='vtk',fname='output.vtu')
>>> from pyamg.classical import RS
>>> from pyamg.vis.vis_coarse import vis_splitting
>>> from pyamg.gallery import load_example
>>> data = load_example('unit_square')
>>> A = data['A'].tocsr()
>>> V = data['vertices']
>>> E2V = data['elements']
>>> splitting = RS(A)
>>> vis_splitting(Verts=V,splitting=splitting,output='vtk',
fname='output.vtu')
"""
check_input(Verts, splitting)
N = Verts.shape[0]
Ndof = int(len(splitting) / N)
E2V = np.arange(0, N, dtype=int)
# adjust name in case of multiple variables
a = fname.split('.')
if len(a) < 2:
fname1 = a[0]
fname2 = '.vtu'
elif len(a) >= 2:
fname1 = "".join(a[:-1])
fname2 = a[-1]
else:
raise ValueError('problem with fname')
new_fname = fname
for d in range(0, Ndof):
# for each variables, write a file or open a figure
if Ndof > 1:
new_fname = fname1 + '_%d.' % (d+1) + fname2
cdata = splitting[(d*N):((d+1)*N)]
if output == 'vtk':
write_basic_mesh(Verts=Verts, E2V=E2V, mesh_type='vertex',
cdata=cdata, fname=new_fname)
elif output == 'matplotlib':
from pylab import figure, show, plot, xlabel, ylabel, title, axis
cdataF = np.where(cdata == 0)[0]
cdataC = np.where(cdata == 1)[0]
xC = Verts[cdataC, 0]
yC = Verts[cdataC, 1]
xF = Verts[cdataF, 0]
yF = Verts[cdataF, 1]
figure()
plot(xC, yC, 'r.', xF, yF, 'b.', clip_on=True)
title('C/F splitting (red=coarse, blue=fine)')
xlabel('x')
ylabel('y')
axis('off')
show()
else:
raise ValueError('problem with outputtype') | def function[vis_splitting, parameter[Verts, splitting, output, fname]]:
constant[Coarse grid visualization for C/F splittings.
Parameters
----------
Verts : {array}
coordinate array (N x D)
splitting : {array}
coarse(1)/fine(0) flags
fname : {string, file object}
file to be written, e.g. 'output.vtu'
output : {string}
'vtk' or 'matplotlib'
Returns
-------
- Displays in screen or writes data to .vtu file for use in paraview
(xml 0.1 format)
Notes
-----
D :
dimension of coordinate space
N :
# of vertices in the mesh represented in Verts
Ndof :
# of dof (= ldof * N)
- simply color different points with different colors. This works
best with classical AMG.
- writes a file (or opens a window) for each dof
- for Ndof>1, they are assumed orderd [...dof1..., ...dof2..., etc]
Examples
--------
>>> import numpy as np
>>> from pyamg.vis.vis_coarse import vis_splitting
>>> Verts = np.array([[0.0,0.0],
... [1.0,0.0],
... [0.0,1.0],
... [1.0,1.0]])
>>> splitting = np.array([0,1,0,1,1,0,1,0]) # two variables
>>> vis_splitting(Verts,splitting,output='vtk',fname='output.vtu')
>>> from pyamg.classical import RS
>>> from pyamg.vis.vis_coarse import vis_splitting
>>> from pyamg.gallery import load_example
>>> data = load_example('unit_square')
>>> A = data['A'].tocsr()
>>> V = data['vertices']
>>> E2V = data['elements']
>>> splitting = RS(A)
>>> vis_splitting(Verts=V,splitting=splitting,output='vtk',
fname='output.vtu')
]
call[name[check_input], parameter[name[Verts], name[splitting]]]
variable[N] assign[=] call[name[Verts].shape][constant[0]]
variable[Ndof] assign[=] call[name[int], parameter[binary_operation[call[name[len], parameter[name[splitting]]] / name[N]]]]
variable[E2V] assign[=] call[name[np].arange, parameter[constant[0], name[N]]]
variable[a] assign[=] call[name[fname].split, parameter[constant[.]]]
if compare[call[name[len], parameter[name[a]]] less[<] constant[2]] begin[:]
variable[fname1] assign[=] call[name[a]][constant[0]]
variable[fname2] assign[=] constant[.vtu]
variable[new_fname] assign[=] name[fname]
for taget[name[d]] in starred[call[name[range], parameter[constant[0], name[Ndof]]]] begin[:]
if compare[name[Ndof] greater[>] constant[1]] begin[:]
variable[new_fname] assign[=] binary_operation[binary_operation[name[fname1] + binary_operation[constant[_%d.] <ast.Mod object at 0x7da2590d6920> binary_operation[name[d] + constant[1]]]] + name[fname2]]
variable[cdata] assign[=] call[name[splitting]][<ast.Slice object at 0x7da207f01900>]
if compare[name[output] equal[==] constant[vtk]] begin[:]
call[name[write_basic_mesh], parameter[]] | keyword[def] identifier[vis_splitting] ( identifier[Verts] , identifier[splitting] , identifier[output] = literal[string] , identifier[fname] = literal[string] ):
literal[string]
identifier[check_input] ( identifier[Verts] , identifier[splitting] )
identifier[N] = identifier[Verts] . identifier[shape] [ literal[int] ]
identifier[Ndof] = identifier[int] ( identifier[len] ( identifier[splitting] )/ identifier[N] )
identifier[E2V] = identifier[np] . identifier[arange] ( literal[int] , identifier[N] , identifier[dtype] = identifier[int] )
identifier[a] = identifier[fname] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[a] )< literal[int] :
identifier[fname1] = identifier[a] [ literal[int] ]
identifier[fname2] = literal[string]
keyword[elif] identifier[len] ( identifier[a] )>= literal[int] :
identifier[fname1] = literal[string] . identifier[join] ( identifier[a] [:- literal[int] ])
identifier[fname2] = identifier[a] [- literal[int] ]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[new_fname] = identifier[fname]
keyword[for] identifier[d] keyword[in] identifier[range] ( literal[int] , identifier[Ndof] ):
keyword[if] identifier[Ndof] > literal[int] :
identifier[new_fname] = identifier[fname1] + literal[string] %( identifier[d] + literal[int] )+ identifier[fname2]
identifier[cdata] = identifier[splitting] [( identifier[d] * identifier[N] ):(( identifier[d] + literal[int] )* identifier[N] )]
keyword[if] identifier[output] == literal[string] :
identifier[write_basic_mesh] ( identifier[Verts] = identifier[Verts] , identifier[E2V] = identifier[E2V] , identifier[mesh_type] = literal[string] ,
identifier[cdata] = identifier[cdata] , identifier[fname] = identifier[new_fname] )
keyword[elif] identifier[output] == literal[string] :
keyword[from] identifier[pylab] keyword[import] identifier[figure] , identifier[show] , identifier[plot] , identifier[xlabel] , identifier[ylabel] , identifier[title] , identifier[axis]
identifier[cdataF] = identifier[np] . identifier[where] ( identifier[cdata] == literal[int] )[ literal[int] ]
identifier[cdataC] = identifier[np] . identifier[where] ( identifier[cdata] == literal[int] )[ literal[int] ]
identifier[xC] = identifier[Verts] [ identifier[cdataC] , literal[int] ]
identifier[yC] = identifier[Verts] [ identifier[cdataC] , literal[int] ]
identifier[xF] = identifier[Verts] [ identifier[cdataF] , literal[int] ]
identifier[yF] = identifier[Verts] [ identifier[cdataF] , literal[int] ]
identifier[figure] ()
identifier[plot] ( identifier[xC] , identifier[yC] , literal[string] , identifier[xF] , identifier[yF] , literal[string] , identifier[clip_on] = keyword[True] )
identifier[title] ( literal[string] )
identifier[xlabel] ( literal[string] )
identifier[ylabel] ( literal[string] )
identifier[axis] ( literal[string] )
identifier[show] ()
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def vis_splitting(Verts, splitting, output='vtk', fname='output.vtu'):
"""Coarse grid visualization for C/F splittings.
Parameters
----------
Verts : {array}
coordinate array (N x D)
splitting : {array}
coarse(1)/fine(0) flags
fname : {string, file object}
file to be written, e.g. 'output.vtu'
output : {string}
'vtk' or 'matplotlib'
Returns
-------
- Displays in screen or writes data to .vtu file for use in paraview
(xml 0.1 format)
Notes
-----
D :
dimension of coordinate space
N :
# of vertices in the mesh represented in Verts
Ndof :
# of dof (= ldof * N)
- simply color different points with different colors. This works
best with classical AMG.
- writes a file (or opens a window) for each dof
- for Ndof>1, they are assumed orderd [...dof1..., ...dof2..., etc]
Examples
--------
>>> import numpy as np
>>> from pyamg.vis.vis_coarse import vis_splitting
>>> Verts = np.array([[0.0,0.0],
... [1.0,0.0],
... [0.0,1.0],
... [1.0,1.0]])
>>> splitting = np.array([0,1,0,1,1,0,1,0]) # two variables
>>> vis_splitting(Verts,splitting,output='vtk',fname='output.vtu')
>>> from pyamg.classical import RS
>>> from pyamg.vis.vis_coarse import vis_splitting
>>> from pyamg.gallery import load_example
>>> data = load_example('unit_square')
>>> A = data['A'].tocsr()
>>> V = data['vertices']
>>> E2V = data['elements']
>>> splitting = RS(A)
>>> vis_splitting(Verts=V,splitting=splitting,output='vtk',
fname='output.vtu')
"""
check_input(Verts, splitting)
N = Verts.shape[0]
Ndof = int(len(splitting) / N)
E2V = np.arange(0, N, dtype=int)
# adjust name in case of multiple variables
a = fname.split('.')
if len(a) < 2:
fname1 = a[0]
fname2 = '.vtu' # depends on [control=['if'], data=[]]
elif len(a) >= 2:
fname1 = ''.join(a[:-1])
fname2 = a[-1] # depends on [control=['if'], data=[]]
else:
raise ValueError('problem with fname')
new_fname = fname
for d in range(0, Ndof):
# for each variables, write a file or open a figure
if Ndof > 1:
new_fname = fname1 + '_%d.' % (d + 1) + fname2 # depends on [control=['if'], data=[]]
cdata = splitting[d * N:(d + 1) * N]
if output == 'vtk':
write_basic_mesh(Verts=Verts, E2V=E2V, mesh_type='vertex', cdata=cdata, fname=new_fname) # depends on [control=['if'], data=[]]
elif output == 'matplotlib':
from pylab import figure, show, plot, xlabel, ylabel, title, axis
cdataF = np.where(cdata == 0)[0]
cdataC = np.where(cdata == 1)[0]
xC = Verts[cdataC, 0]
yC = Verts[cdataC, 1]
xF = Verts[cdataF, 0]
yF = Verts[cdataF, 1]
figure()
plot(xC, yC, 'r.', xF, yF, 'b.', clip_on=True)
title('C/F splitting (red=coarse, blue=fine)')
xlabel('x')
ylabel('y')
axis('off')
show() # depends on [control=['if'], data=[]]
else:
raise ValueError('problem with outputtype') # depends on [control=['for'], data=['d']] |
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value) | def function[read_uic_image_property, parameter[fh]]:
constant[Read UIC ImagePropertyEx tag from file and return as dict.]
variable[size] assign[=] call[call[name[struct].unpack, parameter[constant[B], call[name[fh].read, parameter[constant[1]]]]]][constant[0]]
variable[name] assign[=] call[call[call[name[struct].unpack, parameter[binary_operation[constant[%is] <ast.Mod object at 0x7da2590d6920> name[size]], call[name[fh].read, parameter[name[size]]]]]][constant[0]]][<ast.Slice object at 0x7da1b1972170>]
<ast.Tuple object at 0x7da1b19f3250> assign[=] call[name[struct].unpack, parameter[constant[<IB], call[name[fh].read, parameter[constant[5]]]]]
if compare[name[prop] equal[==] constant[1]] begin[:]
variable[value] assign[=] call[name[struct].unpack, parameter[constant[II], call[name[fh].read, parameter[constant[8]]]]]
variable[value] assign[=] binary_operation[call[name[value]][constant[0]] / call[name[value]][constant[1]]]
return[call[name[dict], parameter[]]] | keyword[def] identifier[read_uic_image_property] ( identifier[fh] ):
literal[string]
identifier[size] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[fh] . identifier[read] ( literal[int] ))[ literal[int] ]
identifier[name] = identifier[struct] . identifier[unpack] ( literal[string] % identifier[size] , identifier[fh] . identifier[read] ( identifier[size] ))[ literal[int] ][:- literal[int] ]
identifier[flags] , identifier[prop] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[fh] . identifier[read] ( literal[int] ))
keyword[if] identifier[prop] == literal[int] :
identifier[value] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[fh] . identifier[read] ( literal[int] ))
identifier[value] = identifier[value] [ literal[int] ]/ identifier[value] [ literal[int] ]
keyword[else] :
identifier[size] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[fh] . identifier[read] ( literal[int] ))[ literal[int] ]
identifier[value] = identifier[struct] . identifier[unpack] ( literal[string] % identifier[size] , identifier[fh] . identifier[read] ( identifier[size] ))[ literal[int] ]
keyword[return] identifier[dict] ( identifier[name] = identifier[name] , identifier[flags] = identifier[flags] , identifier[value] = identifier[value] ) | def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
(flags, prop) = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1] # depends on [control=['if'], data=[]]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value) |
def _delete_partition(self, org_name, partition_name):
"""Send partition delete request to DCNM.
:param org_name: name of organization
:param partition_name: name of partition
"""
url = self._del_part % (org_name, partition_name)
return self._send_request('DELETE', url, '', 'partition') | def function[_delete_partition, parameter[self, org_name, partition_name]]:
constant[Send partition delete request to DCNM.
:param org_name: name of organization
:param partition_name: name of partition
]
variable[url] assign[=] binary_operation[name[self]._del_part <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1c63070>, <ast.Name object at 0x7da1b1c60d90>]]]
return[call[name[self]._send_request, parameter[constant[DELETE], name[url], constant[], constant[partition]]]] | keyword[def] identifier[_delete_partition] ( identifier[self] , identifier[org_name] , identifier[partition_name] ):
literal[string]
identifier[url] = identifier[self] . identifier[_del_part] %( identifier[org_name] , identifier[partition_name] )
keyword[return] identifier[self] . identifier[_send_request] ( literal[string] , identifier[url] , literal[string] , literal[string] ) | def _delete_partition(self, org_name, partition_name):
"""Send partition delete request to DCNM.
:param org_name: name of organization
:param partition_name: name of partition
"""
url = self._del_part % (org_name, partition_name)
return self._send_request('DELETE', url, '', 'partition') |
def get_schedules(profile='pagerduty', subdomain=None, api_key=None):
'''
List schedules belonging to this account
CLI Example:
salt myminion pagerduty.get_schedules
'''
return _list_items(
'schedules',
'id',
profile=profile,
subdomain=subdomain,
api_key=api_key,
) | def function[get_schedules, parameter[profile, subdomain, api_key]]:
constant[
List schedules belonging to this account
CLI Example:
salt myminion pagerduty.get_schedules
]
return[call[name[_list_items], parameter[constant[schedules], constant[id]]]] | keyword[def] identifier[get_schedules] ( identifier[profile] = literal[string] , identifier[subdomain] = keyword[None] , identifier[api_key] = keyword[None] ):
literal[string]
keyword[return] identifier[_list_items] (
literal[string] ,
literal[string] ,
identifier[profile] = identifier[profile] ,
identifier[subdomain] = identifier[subdomain] ,
identifier[api_key] = identifier[api_key] ,
) | def get_schedules(profile='pagerduty', subdomain=None, api_key=None):
"""
List schedules belonging to this account
CLI Example:
salt myminion pagerduty.get_schedules
"""
return _list_items('schedules', 'id', profile=profile, subdomain=subdomain, api_key=api_key) |
def get_proteininfo_fields(poolnames=False, genecentric=False):
"""Returns header fields for protein (group) information."""
allfields = OrderedDict()
basefields = [peptabledata.HEADER_PROTEINS,
peptabledata.HEADER_GENES,
peptabledata.HEADER_ASSOCIATED,
peptabledata.HEADER_DESCRIPTIONS,
peptabledata.HEADER_COVERAGES,
peptabledata.HEADER_NO_CONTENTPROTEINS,
]
for field in basefields:
allfields[field] = False
allfields[peptabledata.HEADER_NO_PSM] = poolnames
return allfields | def function[get_proteininfo_fields, parameter[poolnames, genecentric]]:
constant[Returns header fields for protein (group) information.]
variable[allfields] assign[=] call[name[OrderedDict], parameter[]]
variable[basefields] assign[=] list[[<ast.Attribute object at 0x7da1b24b0d90>, <ast.Attribute object at 0x7da1b24b2d40>, <ast.Attribute object at 0x7da1b24b2140>, <ast.Attribute object at 0x7da1b24b2770>, <ast.Attribute object at 0x7da1b24b2590>, <ast.Attribute object at 0x7da1b24b1ed0>]]
for taget[name[field]] in starred[name[basefields]] begin[:]
call[name[allfields]][name[field]] assign[=] constant[False]
call[name[allfields]][name[peptabledata].HEADER_NO_PSM] assign[=] name[poolnames]
return[name[allfields]] | keyword[def] identifier[get_proteininfo_fields] ( identifier[poolnames] = keyword[False] , identifier[genecentric] = keyword[False] ):
literal[string]
identifier[allfields] = identifier[OrderedDict] ()
identifier[basefields] =[ identifier[peptabledata] . identifier[HEADER_PROTEINS] ,
identifier[peptabledata] . identifier[HEADER_GENES] ,
identifier[peptabledata] . identifier[HEADER_ASSOCIATED] ,
identifier[peptabledata] . identifier[HEADER_DESCRIPTIONS] ,
identifier[peptabledata] . identifier[HEADER_COVERAGES] ,
identifier[peptabledata] . identifier[HEADER_NO_CONTENTPROTEINS] ,
]
keyword[for] identifier[field] keyword[in] identifier[basefields] :
identifier[allfields] [ identifier[field] ]= keyword[False]
identifier[allfields] [ identifier[peptabledata] . identifier[HEADER_NO_PSM] ]= identifier[poolnames]
keyword[return] identifier[allfields] | def get_proteininfo_fields(poolnames=False, genecentric=False):
"""Returns header fields for protein (group) information."""
allfields = OrderedDict()
basefields = [peptabledata.HEADER_PROTEINS, peptabledata.HEADER_GENES, peptabledata.HEADER_ASSOCIATED, peptabledata.HEADER_DESCRIPTIONS, peptabledata.HEADER_COVERAGES, peptabledata.HEADER_NO_CONTENTPROTEINS]
for field in basefields:
allfields[field] = False # depends on [control=['for'], data=['field']]
allfields[peptabledata.HEADER_NO_PSM] = poolnames
return allfields |
def get_learning_objectives(self):
""" This method also mirrors that in the Item."""
# This is pretty much identicial to the method in assessment.Item!
mgr = self._get_provider_manager('LEARNING')
lookup_session = mgr.get_objective_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_objective_bank_view()
return lookup_session.get_objectives_by_ids(self.get_learning_objective_ids()) | def function[get_learning_objectives, parameter[self]]:
constant[ This method also mirrors that in the Item.]
variable[mgr] assign[=] call[name[self]._get_provider_manager, parameter[constant[LEARNING]]]
variable[lookup_session] assign[=] call[name[mgr].get_objective_lookup_session, parameter[]]
call[name[lookup_session].use_federated_objective_bank_view, parameter[]]
return[call[name[lookup_session].get_objectives_by_ids, parameter[call[name[self].get_learning_objective_ids, parameter[]]]]] | keyword[def] identifier[get_learning_objectives] ( identifier[self] ):
literal[string]
identifier[mgr] = identifier[self] . identifier[_get_provider_manager] ( literal[string] )
identifier[lookup_session] = identifier[mgr] . identifier[get_objective_lookup_session] ( identifier[proxy] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ))
identifier[lookup_session] . identifier[use_federated_objective_bank_view] ()
keyword[return] identifier[lookup_session] . identifier[get_objectives_by_ids] ( identifier[self] . identifier[get_learning_objective_ids] ()) | def get_learning_objectives(self):
""" This method also mirrors that in the Item."""
# This is pretty much identicial to the method in assessment.Item!
mgr = self._get_provider_manager('LEARNING')
lookup_session = mgr.get_objective_lookup_session(proxy=getattr(self, '_proxy', None))
lookup_session.use_federated_objective_bank_view()
return lookup_session.get_objectives_by_ids(self.get_learning_objective_ids()) |
def background_reader(stream, loop: asyncio.AbstractEventLoop, callback):
"""
Reads a stream and forwards each line to an async callback.
"""
for line in iter(stream.readline, b''):
loop.call_soon_threadsafe(loop.create_task, callback(line)) | def function[background_reader, parameter[stream, loop, callback]]:
constant[
Reads a stream and forwards each line to an async callback.
]
for taget[name[line]] in starred[call[name[iter], parameter[name[stream].readline, constant[b'']]]] begin[:]
call[name[loop].call_soon_threadsafe, parameter[name[loop].create_task, call[name[callback], parameter[name[line]]]]] | keyword[def] identifier[background_reader] ( identifier[stream] , identifier[loop] : identifier[asyncio] . identifier[AbstractEventLoop] , identifier[callback] ):
literal[string]
keyword[for] identifier[line] keyword[in] identifier[iter] ( identifier[stream] . identifier[readline] , literal[string] ):
identifier[loop] . identifier[call_soon_threadsafe] ( identifier[loop] . identifier[create_task] , identifier[callback] ( identifier[line] )) | def background_reader(stream, loop: asyncio.AbstractEventLoop, callback):
"""
Reads a stream and forwards each line to an async callback.
"""
for line in iter(stream.readline, b''):
loop.call_soon_threadsafe(loop.create_task, callback(line)) # depends on [control=['for'], data=['line']] |
def clipPolygons(self, polygons):
"""
Recursively remove all polygons in `polygons` that are inside this BSP
tree.
"""
if not self.plane:
return polygons[:]
front = []
back = []
for poly in polygons:
self.plane.splitPolygon(poly, front, back, front, back)
if self.front:
front = self.front.clipPolygons(front)
if self.back:
back = self.back.clipPolygons(back)
else:
back = []
front.extend(back)
return front | def function[clipPolygons, parameter[self, polygons]]:
constant[
Recursively remove all polygons in `polygons` that are inside this BSP
tree.
]
if <ast.UnaryOp object at 0x7da1b10a4c40> begin[:]
return[call[name[polygons]][<ast.Slice object at 0x7da1b10a67a0>]]
variable[front] assign[=] list[[]]
variable[back] assign[=] list[[]]
for taget[name[poly]] in starred[name[polygons]] begin[:]
call[name[self].plane.splitPolygon, parameter[name[poly], name[front], name[back], name[front], name[back]]]
if name[self].front begin[:]
variable[front] assign[=] call[name[self].front.clipPolygons, parameter[name[front]]]
if name[self].back begin[:]
variable[back] assign[=] call[name[self].back.clipPolygons, parameter[name[back]]]
call[name[front].extend, parameter[name[back]]]
return[name[front]] | keyword[def] identifier[clipPolygons] ( identifier[self] , identifier[polygons] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[plane] :
keyword[return] identifier[polygons] [:]
identifier[front] =[]
identifier[back] =[]
keyword[for] identifier[poly] keyword[in] identifier[polygons] :
identifier[self] . identifier[plane] . identifier[splitPolygon] ( identifier[poly] , identifier[front] , identifier[back] , identifier[front] , identifier[back] )
keyword[if] identifier[self] . identifier[front] :
identifier[front] = identifier[self] . identifier[front] . identifier[clipPolygons] ( identifier[front] )
keyword[if] identifier[self] . identifier[back] :
identifier[back] = identifier[self] . identifier[back] . identifier[clipPolygons] ( identifier[back] )
keyword[else] :
identifier[back] =[]
identifier[front] . identifier[extend] ( identifier[back] )
keyword[return] identifier[front] | def clipPolygons(self, polygons):
"""
Recursively remove all polygons in `polygons` that are inside this BSP
tree.
"""
if not self.plane:
return polygons[:] # depends on [control=['if'], data=[]]
front = []
back = []
for poly in polygons:
self.plane.splitPolygon(poly, front, back, front, back) # depends on [control=['for'], data=['poly']]
if self.front:
front = self.front.clipPolygons(front) # depends on [control=['if'], data=[]]
if self.back:
back = self.back.clipPolygons(back) # depends on [control=['if'], data=[]]
else:
back = []
front.extend(back)
return front |
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler) | def function[logger_file, parameter[self, value]]:
constant[The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
]
name[self].__logger_file assign[=] name[value]
if name[self].__logger_file begin[:]
name[self].logger_file_handler assign[=] call[name[logging].FileHandler, parameter[name[self].__logger_file]]
call[name[self].logger_file_handler.setFormatter, parameter[name[self].logger_formatter]]
for taget[tuple[[<ast.Name object at 0x7da18f09e6b0>, <ast.Name object at 0x7da18f09e050>]]] in starred[call[name[six].iteritems, parameter[name[self].logger]]] begin[:]
call[name[logger].addHandler, parameter[name[self].logger_file_handler]] | keyword[def] identifier[logger_file] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[__logger_file] = identifier[value]
keyword[if] identifier[self] . identifier[__logger_file] :
identifier[self] . identifier[logger_file_handler] = identifier[logging] . identifier[FileHandler] ( identifier[self] . identifier[__logger_file] )
identifier[self] . identifier[logger_file_handler] . identifier[setFormatter] ( identifier[self] . identifier[logger_formatter] )
keyword[for] identifier[_] , identifier[logger] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] . identifier[logger] ):
identifier[logger] . identifier[addHandler] ( identifier[self] . identifier[logger_file_handler] ) | def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for (_, logger) in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def add_entry(self, path_object):
"""Adds a child FakeFile to this directory.
Args:
path_object: FakeFile instance to add as a child of this directory.
Raises:
OSError: if the directory has no write permission (Posix only)
OSError: if the file or directory to be added already exists
"""
if (not is_root() and not self.st_mode & PERM_WRITE and
not self.filesystem.is_windows_fs):
exception = IOError if IS_PY2 else OSError
raise exception(errno.EACCES, 'Permission Denied', self.path)
if path_object.name in self.contents:
self.filesystem.raise_os_error(errno.EEXIST, self.path)
self.contents[path_object.name] = path_object
path_object.parent_dir = self
self.st_nlink += 1
path_object.st_nlink += 1
path_object.st_dev = self.st_dev
if path_object.st_nlink == 1:
self.filesystem.change_disk_usage(
path_object.size, path_object.name, self.st_dev) | def function[add_entry, parameter[self, path_object]]:
constant[Adds a child FakeFile to this directory.
Args:
path_object: FakeFile instance to add as a child of this directory.
Raises:
OSError: if the directory has no write permission (Posix only)
OSError: if the file or directory to be added already exists
]
if <ast.BoolOp object at 0x7da18dc041f0> begin[:]
variable[exception] assign[=] <ast.IfExp object at 0x7da18f00d900>
<ast.Raise object at 0x7da18f00de70>
if compare[name[path_object].name in name[self].contents] begin[:]
call[name[self].filesystem.raise_os_error, parameter[name[errno].EEXIST, name[self].path]]
call[name[self].contents][name[path_object].name] assign[=] name[path_object]
name[path_object].parent_dir assign[=] name[self]
<ast.AugAssign object at 0x7da18dc066e0>
<ast.AugAssign object at 0x7da18dc04f40>
name[path_object].st_dev assign[=] name[self].st_dev
if compare[name[path_object].st_nlink equal[==] constant[1]] begin[:]
call[name[self].filesystem.change_disk_usage, parameter[name[path_object].size, name[path_object].name, name[self].st_dev]] | keyword[def] identifier[add_entry] ( identifier[self] , identifier[path_object] ):
literal[string]
keyword[if] ( keyword[not] identifier[is_root] () keyword[and] keyword[not] identifier[self] . identifier[st_mode] & identifier[PERM_WRITE] keyword[and]
keyword[not] identifier[self] . identifier[filesystem] . identifier[is_windows_fs] ):
identifier[exception] = identifier[IOError] keyword[if] identifier[IS_PY2] keyword[else] identifier[OSError]
keyword[raise] identifier[exception] ( identifier[errno] . identifier[EACCES] , literal[string] , identifier[self] . identifier[path] )
keyword[if] identifier[path_object] . identifier[name] keyword[in] identifier[self] . identifier[contents] :
identifier[self] . identifier[filesystem] . identifier[raise_os_error] ( identifier[errno] . identifier[EEXIST] , identifier[self] . identifier[path] )
identifier[self] . identifier[contents] [ identifier[path_object] . identifier[name] ]= identifier[path_object]
identifier[path_object] . identifier[parent_dir] = identifier[self]
identifier[self] . identifier[st_nlink] += literal[int]
identifier[path_object] . identifier[st_nlink] += literal[int]
identifier[path_object] . identifier[st_dev] = identifier[self] . identifier[st_dev]
keyword[if] identifier[path_object] . identifier[st_nlink] == literal[int] :
identifier[self] . identifier[filesystem] . identifier[change_disk_usage] (
identifier[path_object] . identifier[size] , identifier[path_object] . identifier[name] , identifier[self] . identifier[st_dev] ) | def add_entry(self, path_object):
"""Adds a child FakeFile to this directory.
Args:
path_object: FakeFile instance to add as a child of this directory.
Raises:
OSError: if the directory has no write permission (Posix only)
OSError: if the file or directory to be added already exists
"""
if not is_root() and (not self.st_mode & PERM_WRITE) and (not self.filesystem.is_windows_fs):
exception = IOError if IS_PY2 else OSError
raise exception(errno.EACCES, 'Permission Denied', self.path) # depends on [control=['if'], data=[]]
if path_object.name in self.contents:
self.filesystem.raise_os_error(errno.EEXIST, self.path) # depends on [control=['if'], data=[]]
self.contents[path_object.name] = path_object
path_object.parent_dir = self
self.st_nlink += 1
path_object.st_nlink += 1
path_object.st_dev = self.st_dev
if path_object.st_nlink == 1:
self.filesystem.change_disk_usage(path_object.size, path_object.name, self.st_dev) # depends on [control=['if'], data=[]] |
def dataframes_to_variant_collection(
dataframes,
source_path,
info_parser=None,
only_passing=True,
max_variants=None,
sample_names=None,
sample_info_parser=None,
variant_kwargs={},
variant_collection_kwargs={}):
"""
Load a VariantCollection from an iterable of pandas dataframes.
This takes an iterable of dataframes instead of a single dataframe to avoid
having to load huge dataframes at once into memory. If you have a single
dataframe, just pass it in a single-element list.
Parameters
----------
dataframes
Iterable of dataframes (e.g. a generator). Expected columns are:
["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"]
and 'INFO' if `info_parser` is not Null. Columns must be in this
order.
source_path : str
Path of VCF file from which DataFrame chunks were generated.
info_parser : string -> object, optional
Callable to parse INFO strings.
only_passing : boolean, optional
If true, any entries whose FILTER field is not one of "." or "PASS" is
dropped.
max_variants : int, optional
If specified, return only the first max_variants variants.
sample_names : list of strings, optional
Sample names. The final columns of the dataframe should match these.
If specified, the per-sample info columns will be parsed. You must
also specify sample_info_parser.
sample_info_parser : string list * string -> dict, optional
Callable to parse per-sample info columns.
variant_kwargs : dict, optional
Additional keyword paramters to pass to Variant.__init__
variant_collection_kwargs : dict, optional
Additional keyword parameters to pass to VariantCollection.__init__.
"""
expected_columns = (
["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"] +
(["INFO"] if info_parser else []))
if info_parser and sample_names:
if sample_info_parser is None:
raise TypeError(
"Must specify sample_info_parser if specifying sample_names")
expected_columns.append("FORMAT")
expected_columns.extend(sample_names)
variants = []
metadata = {}
try:
for chunk in dataframes:
assert chunk.columns.tolist() == expected_columns,\
"dataframe columns (%s) do not match expected columns (%s)" % (
chunk.columns, expected_columns)
for tpl in chunk.itertuples():
(i, chrom, pos, id_, ref, alts, qual, flter) = tpl[:8]
if flter == ".":
flter = None
elif flter == "PASS":
flter = []
elif only_passing:
continue
else:
flter = flter.split(';')
if id_ == ".":
id_ = None
qual = float(qual) if qual != "." else None
alt_num = 0
info = sample_info = None
for alt in alts.split(","):
if alt != ".":
if info_parser is not None and info is None:
info = info_parser(tpl[8]) # INFO column
if sample_names:
# Sample name -> field -> value dict.
sample_info = sample_info_parser(
list(tpl[10:]), # sample info columns
tpl[9], # FORMAT column
)
variant = Variant(
chrom,
int(pos), # want a Python int not numpy.int64
ref,
alt,
**variant_kwargs)
variants.append(variant)
metadata[variant] = {
'id': id_,
'qual': qual,
'filter': flter,
'info': info,
'sample_info': sample_info,
'alt_allele_index': alt_num,
}
if max_variants and len(variants) > max_variants:
raise StopIteration
alt_num += 1
except StopIteration:
pass
return VariantCollection(
variants=variants,
source_to_metadata_dict={source_path: metadata},
**variant_collection_kwargs) | def function[dataframes_to_variant_collection, parameter[dataframes, source_path, info_parser, only_passing, max_variants, sample_names, sample_info_parser, variant_kwargs, variant_collection_kwargs]]:
constant[
Load a VariantCollection from an iterable of pandas dataframes.
This takes an iterable of dataframes instead of a single dataframe to avoid
having to load huge dataframes at once into memory. If you have a single
dataframe, just pass it in a single-element list.
Parameters
----------
dataframes
Iterable of dataframes (e.g. a generator). Expected columns are:
["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"]
and 'INFO' if `info_parser` is not Null. Columns must be in this
order.
source_path : str
Path of VCF file from which DataFrame chunks were generated.
info_parser : string -> object, optional
Callable to parse INFO strings.
only_passing : boolean, optional
If true, any entries whose FILTER field is not one of "." or "PASS" is
dropped.
max_variants : int, optional
If specified, return only the first max_variants variants.
sample_names : list of strings, optional
Sample names. The final columns of the dataframe should match these.
If specified, the per-sample info columns will be parsed. You must
also specify sample_info_parser.
sample_info_parser : string list * string -> dict, optional
Callable to parse per-sample info columns.
variant_kwargs : dict, optional
Additional keyword paramters to pass to Variant.__init__
variant_collection_kwargs : dict, optional
Additional keyword parameters to pass to VariantCollection.__init__.
]
variable[expected_columns] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b04d3460>, <ast.Constant object at 0x7da1b04d0fa0>, <ast.Constant object at 0x7da1b04d3a60>, <ast.Constant object at 0x7da1b04d2fe0>, <ast.Constant object at 0x7da1b04d01f0>, <ast.Constant object at 0x7da1b04d1a50>, <ast.Constant object at 0x7da1b04d3010>]] + <ast.IfExp object at 0x7da1b04d0d60>]
if <ast.BoolOp object at 0x7da1b04f9030> begin[:]
if compare[name[sample_info_parser] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b04f8790>
call[name[expected_columns].append, parameter[constant[FORMAT]]]
call[name[expected_columns].extend, parameter[name[sample_names]]]
variable[variants] assign[=] list[[]]
variable[metadata] assign[=] dictionary[[], []]
<ast.Try object at 0x7da1b04fa260>
return[call[name[VariantCollection], parameter[]]] | keyword[def] identifier[dataframes_to_variant_collection] (
identifier[dataframes] ,
identifier[source_path] ,
identifier[info_parser] = keyword[None] ,
identifier[only_passing] = keyword[True] ,
identifier[max_variants] = keyword[None] ,
identifier[sample_names] = keyword[None] ,
identifier[sample_info_parser] = keyword[None] ,
identifier[variant_kwargs] ={},
identifier[variant_collection_kwargs] ={}):
literal[string]
identifier[expected_columns] =(
[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]+
([ literal[string] ] keyword[if] identifier[info_parser] keyword[else] []))
keyword[if] identifier[info_parser] keyword[and] identifier[sample_names] :
keyword[if] identifier[sample_info_parser] keyword[is] keyword[None] :
keyword[raise] identifier[TypeError] (
literal[string] )
identifier[expected_columns] . identifier[append] ( literal[string] )
identifier[expected_columns] . identifier[extend] ( identifier[sample_names] )
identifier[variants] =[]
identifier[metadata] ={}
keyword[try] :
keyword[for] identifier[chunk] keyword[in] identifier[dataframes] :
keyword[assert] identifier[chunk] . identifier[columns] . identifier[tolist] ()== identifier[expected_columns] , literal[string] %(
identifier[chunk] . identifier[columns] , identifier[expected_columns] )
keyword[for] identifier[tpl] keyword[in] identifier[chunk] . identifier[itertuples] ():
( identifier[i] , identifier[chrom] , identifier[pos] , identifier[id_] , identifier[ref] , identifier[alts] , identifier[qual] , identifier[flter] )= identifier[tpl] [: literal[int] ]
keyword[if] identifier[flter] == literal[string] :
identifier[flter] = keyword[None]
keyword[elif] identifier[flter] == literal[string] :
identifier[flter] =[]
keyword[elif] identifier[only_passing] :
keyword[continue]
keyword[else] :
identifier[flter] = identifier[flter] . identifier[split] ( literal[string] )
keyword[if] identifier[id_] == literal[string] :
identifier[id_] = keyword[None]
identifier[qual] = identifier[float] ( identifier[qual] ) keyword[if] identifier[qual] != literal[string] keyword[else] keyword[None]
identifier[alt_num] = literal[int]
identifier[info] = identifier[sample_info] = keyword[None]
keyword[for] identifier[alt] keyword[in] identifier[alts] . identifier[split] ( literal[string] ):
keyword[if] identifier[alt] != literal[string] :
keyword[if] identifier[info_parser] keyword[is] keyword[not] keyword[None] keyword[and] identifier[info] keyword[is] keyword[None] :
identifier[info] = identifier[info_parser] ( identifier[tpl] [ literal[int] ])
keyword[if] identifier[sample_names] :
identifier[sample_info] = identifier[sample_info_parser] (
identifier[list] ( identifier[tpl] [ literal[int] :]),
identifier[tpl] [ literal[int] ],
)
identifier[variant] = identifier[Variant] (
identifier[chrom] ,
identifier[int] ( identifier[pos] ),
identifier[ref] ,
identifier[alt] ,
** identifier[variant_kwargs] )
identifier[variants] . identifier[append] ( identifier[variant] )
identifier[metadata] [ identifier[variant] ]={
literal[string] : identifier[id_] ,
literal[string] : identifier[qual] ,
literal[string] : identifier[flter] ,
literal[string] : identifier[info] ,
literal[string] : identifier[sample_info] ,
literal[string] : identifier[alt_num] ,
}
keyword[if] identifier[max_variants] keyword[and] identifier[len] ( identifier[variants] )> identifier[max_variants] :
keyword[raise] identifier[StopIteration]
identifier[alt_num] += literal[int]
keyword[except] identifier[StopIteration] :
keyword[pass]
keyword[return] identifier[VariantCollection] (
identifier[variants] = identifier[variants] ,
identifier[source_to_metadata_dict] ={ identifier[source_path] : identifier[metadata] },
** identifier[variant_collection_kwargs] ) | def dataframes_to_variant_collection(dataframes, source_path, info_parser=None, only_passing=True, max_variants=None, sample_names=None, sample_info_parser=None, variant_kwargs={}, variant_collection_kwargs={}):
"""
Load a VariantCollection from an iterable of pandas dataframes.
This takes an iterable of dataframes instead of a single dataframe to avoid
having to load huge dataframes at once into memory. If you have a single
dataframe, just pass it in a single-element list.
Parameters
----------
dataframes
Iterable of dataframes (e.g. a generator). Expected columns are:
["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"]
and 'INFO' if `info_parser` is not Null. Columns must be in this
order.
source_path : str
Path of VCF file from which DataFrame chunks were generated.
info_parser : string -> object, optional
Callable to parse INFO strings.
only_passing : boolean, optional
If true, any entries whose FILTER field is not one of "." or "PASS" is
dropped.
max_variants : int, optional
If specified, return only the first max_variants variants.
sample_names : list of strings, optional
Sample names. The final columns of the dataframe should match these.
If specified, the per-sample info columns will be parsed. You must
also specify sample_info_parser.
sample_info_parser : string list * string -> dict, optional
Callable to parse per-sample info columns.
variant_kwargs : dict, optional
Additional keyword paramters to pass to Variant.__init__
variant_collection_kwargs : dict, optional
Additional keyword parameters to pass to VariantCollection.__init__.
"""
expected_columns = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER'] + (['INFO'] if info_parser else [])
if info_parser and sample_names:
if sample_info_parser is None:
raise TypeError('Must specify sample_info_parser if specifying sample_names') # depends on [control=['if'], data=[]]
expected_columns.append('FORMAT')
expected_columns.extend(sample_names) # depends on [control=['if'], data=[]]
variants = []
metadata = {}
try:
for chunk in dataframes:
assert chunk.columns.tolist() == expected_columns, 'dataframe columns (%s) do not match expected columns (%s)' % (chunk.columns, expected_columns)
for tpl in chunk.itertuples():
(i, chrom, pos, id_, ref, alts, qual, flter) = tpl[:8]
if flter == '.':
flter = None # depends on [control=['if'], data=['flter']]
elif flter == 'PASS':
flter = [] # depends on [control=['if'], data=['flter']]
elif only_passing:
continue # depends on [control=['if'], data=[]]
else:
flter = flter.split(';')
if id_ == '.':
id_ = None # depends on [control=['if'], data=['id_']]
qual = float(qual) if qual != '.' else None
alt_num = 0
info = sample_info = None
for alt in alts.split(','):
if alt != '.':
if info_parser is not None and info is None:
info = info_parser(tpl[8]) # INFO column
if sample_names:
# Sample name -> field -> value dict.
# sample info columns
# FORMAT column
sample_info = sample_info_parser(list(tpl[10:]), tpl[9]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # want a Python int not numpy.int64
variant = Variant(chrom, int(pos), ref, alt, **variant_kwargs)
variants.append(variant)
metadata[variant] = {'id': id_, 'qual': qual, 'filter': flter, 'info': info, 'sample_info': sample_info, 'alt_allele_index': alt_num}
if max_variants and len(variants) > max_variants:
raise StopIteration # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['alt']]
alt_num += 1 # depends on [control=['for'], data=['alt']] # depends on [control=['for'], data=['tpl']] # depends on [control=['for'], data=['chunk']] # depends on [control=['try'], data=[]]
except StopIteration:
pass # depends on [control=['except'], data=[]]
return VariantCollection(variants=variants, source_to_metadata_dict={source_path: metadata}, **variant_collection_kwargs) |
def start(self):
"""Start the background emulation loop."""
if self._started is True:
raise ArgumentError("EmulationLoop.start() called multiple times")
self._thread = threading.Thread(target=self._loop_thread_main)
self._thread.start()
self._started = True | def function[start, parameter[self]]:
constant[Start the background emulation loop.]
if compare[name[self]._started is constant[True]] begin[:]
<ast.Raise object at 0x7da18f721150>
name[self]._thread assign[=] call[name[threading].Thread, parameter[]]
call[name[self]._thread.start, parameter[]]
name[self]._started assign[=] constant[True] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_started] keyword[is] keyword[True] :
keyword[raise] identifier[ArgumentError] ( literal[string] )
identifier[self] . identifier[_thread] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[self] . identifier[_loop_thread_main] )
identifier[self] . identifier[_thread] . identifier[start] ()
identifier[self] . identifier[_started] = keyword[True] | def start(self):
"""Start the background emulation loop."""
if self._started is True:
raise ArgumentError('EmulationLoop.start() called multiple times') # depends on [control=['if'], data=[]]
self._thread = threading.Thread(target=self._loop_thread_main)
self._thread.start()
self._started = True |
def make_tasker(func):
"""make_tasker takes a callable (function, method, etc.) and returns
a new factory function for generating tasks. Each factory function is
designed to consume its arguments and return a task that, when executed,
will call the function upon the arguments.
TODO: deprecate this and just use FuncTask, which is easier to
understand--must change a number of programs first.
"""
def anonFunc(*args, **kwdargs):
class anonTask(Task):
def execute(self):
self.logger.debug("Executing fn %s" % func)
try:
val = func(*args, **kwdargs)
self.logger.debug("Done executing fn %s" % func)
return val
except Exception as e:
# Log error message and re-raise exception.
self.logger.error("fn %s raised exception: %s" % (
func, str(e)))
raise e
return anonTask()
return anonFunc | def function[make_tasker, parameter[func]]:
constant[make_tasker takes a callable (function, method, etc.) and returns
a new factory function for generating tasks. Each factory function is
designed to consume its arguments and return a task that, when executed,
will call the function upon the arguments.
TODO: deprecate this and just use FuncTask, which is easier to
understand--must change a number of programs first.
]
def function[anonFunc, parameter[]]:
class class[anonTask, parameter[]] begin[:]
def function[execute, parameter[self]]:
call[name[self].logger.debug, parameter[binary_operation[constant[Executing fn %s] <ast.Mod object at 0x7da2590d6920> name[func]]]]
<ast.Try object at 0x7da1b0c27460>
return[call[name[anonTask], parameter[]]]
return[name[anonFunc]] | keyword[def] identifier[make_tasker] ( identifier[func] ):
literal[string]
keyword[def] identifier[anonFunc] (* identifier[args] ,** identifier[kwdargs] ):
keyword[class] identifier[anonTask] ( identifier[Task] ):
keyword[def] identifier[execute] ( identifier[self] ):
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] % identifier[func] )
keyword[try] :
identifier[val] = identifier[func] (* identifier[args] ,** identifier[kwdargs] )
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] % identifier[func] )
keyword[return] identifier[val]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] %(
identifier[func] , identifier[str] ( identifier[e] )))
keyword[raise] identifier[e]
keyword[return] identifier[anonTask] ()
keyword[return] identifier[anonFunc] | def make_tasker(func):
"""make_tasker takes a callable (function, method, etc.) and returns
a new factory function for generating tasks. Each factory function is
designed to consume its arguments and return a task that, when executed,
will call the function upon the arguments.
TODO: deprecate this and just use FuncTask, which is easier to
understand--must change a number of programs first.
"""
def anonFunc(*args, **kwdargs):
class anonTask(Task):
def execute(self):
self.logger.debug('Executing fn %s' % func)
try:
val = func(*args, **kwdargs)
self.logger.debug('Done executing fn %s' % func)
return val # depends on [control=['try'], data=[]]
except Exception as e:
# Log error message and re-raise exception.
self.logger.error('fn %s raised exception: %s' % (func, str(e)))
raise e # depends on [control=['except'], data=['e']]
return anonTask()
return anonFunc |
def to_html(self, wrap_slash=False):
"""Render a Text MessageElement as html.
:param wrap_slash: Whether to replace slashes with the slash plus the
html <wbr> tag which will help to e.g. wrap html in small cells if
it contains a long filename. Disabled by default as it may cause
side effects if the text contains html markup.
:type wrap_slash: bool
:returns: Html representation of the Text MessageElement.
:rtype: str
"""
if self.text is None:
return
else:
text = ''
for t in self.text:
text += t.to_html() + ' '
text = ' '.join(text.split())
if wrap_slash:
# This is a hack to make text wrappable with long filenames TS 3.3
text = text.replace('/', '/<wbr>')
text = text.replace('\\', '\\<wbr>')
return text | def function[to_html, parameter[self, wrap_slash]]:
constant[Render a Text MessageElement as html.
:param wrap_slash: Whether to replace slashes with the slash plus the
html <wbr> tag which will help to e.g. wrap html in small cells if
it contains a long filename. Disabled by default as it may cause
side effects if the text contains html markup.
:type wrap_slash: bool
:returns: Html representation of the Text MessageElement.
:rtype: str
]
if compare[name[self].text is constant[None]] begin[:]
return[None]
if name[wrap_slash] begin[:]
variable[text] assign[=] call[name[text].replace, parameter[constant[/], constant[/<wbr>]]]
variable[text] assign[=] call[name[text].replace, parameter[constant[\], constant[\<wbr>]]]
return[name[text]] | keyword[def] identifier[to_html] ( identifier[self] , identifier[wrap_slash] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[text] keyword[is] keyword[None] :
keyword[return]
keyword[else] :
identifier[text] = literal[string]
keyword[for] identifier[t] keyword[in] identifier[self] . identifier[text] :
identifier[text] += identifier[t] . identifier[to_html] ()+ literal[string]
identifier[text] = literal[string] . identifier[join] ( identifier[text] . identifier[split] ())
keyword[if] identifier[wrap_slash] :
identifier[text] = identifier[text] . identifier[replace] ( literal[string] , literal[string] )
identifier[text] = identifier[text] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[text] | def to_html(self, wrap_slash=False):
"""Render a Text MessageElement as html.
:param wrap_slash: Whether to replace slashes with the slash plus the
html <wbr> tag which will help to e.g. wrap html in small cells if
it contains a long filename. Disabled by default as it may cause
side effects if the text contains html markup.
:type wrap_slash: bool
:returns: Html representation of the Text MessageElement.
:rtype: str
"""
if self.text is None:
return # depends on [control=['if'], data=[]]
else:
text = ''
for t in self.text:
text += t.to_html() + ' ' # depends on [control=['for'], data=['t']]
text = ' '.join(text.split())
if wrap_slash:
# This is a hack to make text wrappable with long filenames TS 3.3
text = text.replace('/', '/<wbr>')
text = text.replace('\\', '\\<wbr>') # depends on [control=['if'], data=[]]
return text |
def set_install_id(filename, install_id):
""" Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
"""
if get_install_id(filename) is None:
raise InstallNameError('{0} has no install id'.format(filename))
back_tick(['install_name_tool', '-id', install_id, filename]) | def function[set_install_id, parameter[filename, install_id]]:
constant[ Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
]
if compare[call[name[get_install_id], parameter[name[filename]]] is constant[None]] begin[:]
<ast.Raise object at 0x7da18f58eda0>
call[name[back_tick], parameter[list[[<ast.Constant object at 0x7da18f58ed70>, <ast.Constant object at 0x7da18f58e170>, <ast.Name object at 0x7da18f58ece0>, <ast.Name object at 0x7da18f58f250>]]]] | keyword[def] identifier[set_install_id] ( identifier[filename] , identifier[install_id] ):
literal[string]
keyword[if] identifier[get_install_id] ( identifier[filename] ) keyword[is] keyword[None] :
keyword[raise] identifier[InstallNameError] ( literal[string] . identifier[format] ( identifier[filename] ))
identifier[back_tick] ([ literal[string] , literal[string] , identifier[install_id] , identifier[filename] ]) | def set_install_id(filename, install_id):
""" Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
"""
if get_install_id(filename) is None:
raise InstallNameError('{0} has no install id'.format(filename)) # depends on [control=['if'], data=[]]
back_tick(['install_name_tool', '-id', install_id, filename]) |
def run(self, host, port, debug=True, validate_requests=True):
"""Utility method to quickly get a server up and running.
:param debug: turns on Werkzeug debugger, code reloading, and full
logging.
:param validate_requests: whether or not to ensure that requests are
sent by Amazon. This can be usefulfor manually testing the server.
"""
if debug:
# Turn on all alexandra log output
logging.basicConfig(level=logging.DEBUG)
app = self.create_wsgi_app(validate_requests)
run_simple(host, port, app, use_reloader=debug, use_debugger=debug) | def function[run, parameter[self, host, port, debug, validate_requests]]:
constant[Utility method to quickly get a server up and running.
:param debug: turns on Werkzeug debugger, code reloading, and full
logging.
:param validate_requests: whether or not to ensure that requests are
sent by Amazon. This can be usefulfor manually testing the server.
]
if name[debug] begin[:]
call[name[logging].basicConfig, parameter[]]
variable[app] assign[=] call[name[self].create_wsgi_app, parameter[name[validate_requests]]]
call[name[run_simple], parameter[name[host], name[port], name[app]]] | keyword[def] identifier[run] ( identifier[self] , identifier[host] , identifier[port] , identifier[debug] = keyword[True] , identifier[validate_requests] = keyword[True] ):
literal[string]
keyword[if] identifier[debug] :
identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[logging] . identifier[DEBUG] )
identifier[app] = identifier[self] . identifier[create_wsgi_app] ( identifier[validate_requests] )
identifier[run_simple] ( identifier[host] , identifier[port] , identifier[app] , identifier[use_reloader] = identifier[debug] , identifier[use_debugger] = identifier[debug] ) | def run(self, host, port, debug=True, validate_requests=True):
"""Utility method to quickly get a server up and running.
:param debug: turns on Werkzeug debugger, code reloading, and full
logging.
:param validate_requests: whether or not to ensure that requests are
sent by Amazon. This can be usefulfor manually testing the server.
"""
if debug:
# Turn on all alexandra log output
logging.basicConfig(level=logging.DEBUG) # depends on [control=['if'], data=[]]
app = self.create_wsgi_app(validate_requests)
run_simple(host, port, app, use_reloader=debug, use_debugger=debug) |
def merge_ids(self, token, channel, ids, delete=False):
"""
Call the restful endpoint to merge two RAMON objects into one.
Arguments:
token (str): The token to inspect
channel (str): The channel to inspect
ids (int[]): the list of the IDs to merge
delete (bool : False): Whether to delete after merging.
Returns:
json: The ID as returned by ndstore
"""
url = self.url() + "/merge/{}/".format(','.join([str(i) for i in ids]))
req = self.remote_utils.get_url(url)
if req.status_code is not 200:
raise RemoteDataUploadError('Could not merge ids {}'.format(
','.join([str(i) for i in ids])))
if delete:
self.delete_ramon(token, channel, ids[1:])
return True | def function[merge_ids, parameter[self, token, channel, ids, delete]]:
constant[
Call the restful endpoint to merge two RAMON objects into one.
Arguments:
token (str): The token to inspect
channel (str): The channel to inspect
ids (int[]): the list of the IDs to merge
delete (bool : False): Whether to delete after merging.
Returns:
json: The ID as returned by ndstore
]
variable[url] assign[=] binary_operation[call[name[self].url, parameter[]] + call[constant[/merge/{}/].format, parameter[call[constant[,].join, parameter[<ast.ListComp object at 0x7da1b02b8f70>]]]]]
variable[req] assign[=] call[name[self].remote_utils.get_url, parameter[name[url]]]
if compare[name[req].status_code is_not constant[200]] begin[:]
<ast.Raise object at 0x7da1b020c220>
if name[delete] begin[:]
call[name[self].delete_ramon, parameter[name[token], name[channel], call[name[ids]][<ast.Slice object at 0x7da1b020e1d0>]]]
return[constant[True]] | keyword[def] identifier[merge_ids] ( identifier[self] , identifier[token] , identifier[channel] , identifier[ids] , identifier[delete] = keyword[False] ):
literal[string]
identifier[url] = identifier[self] . identifier[url] ()+ literal[string] . identifier[format] ( literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[ids] ]))
identifier[req] = identifier[self] . identifier[remote_utils] . identifier[get_url] ( identifier[url] )
keyword[if] identifier[req] . identifier[status_code] keyword[is] keyword[not] literal[int] :
keyword[raise] identifier[RemoteDataUploadError] ( literal[string] . identifier[format] (
literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[ids] ])))
keyword[if] identifier[delete] :
identifier[self] . identifier[delete_ramon] ( identifier[token] , identifier[channel] , identifier[ids] [ literal[int] :])
keyword[return] keyword[True] | def merge_ids(self, token, channel, ids, delete=False):
"""
Call the restful endpoint to merge two RAMON objects into one.
Arguments:
token (str): The token to inspect
channel (str): The channel to inspect
ids (int[]): the list of the IDs to merge
delete (bool : False): Whether to delete after merging.
Returns:
json: The ID as returned by ndstore
"""
url = self.url() + '/merge/{}/'.format(','.join([str(i) for i in ids]))
req = self.remote_utils.get_url(url)
if req.status_code is not 200:
raise RemoteDataUploadError('Could not merge ids {}'.format(','.join([str(i) for i in ids]))) # depends on [control=['if'], data=[]]
if delete:
self.delete_ramon(token, channel, ids[1:]) # depends on [control=['if'], data=[]]
return True |
def fill_gaps(lat,lon,sla,mask,remove_edges=False):
"""
# FILL_GAPS
# @summary: This function allow interpolating data in gaps, depending on gap size. Data must be regularly gridded
# @param lat {type:numeric} : latitude
# @param lon {type:numeric} : longitude
# @param sla {type:numeric} : data
# @return:
# outdst : resampled distance
# outlon : resampled longitude
# outlat : resampled latitude
# outsla : resampled data
# gaplen : length of the longest gap in data
# ngaps : number of detected gaps in data
# dx : average spatial sampling
# interpolated : True when data was interpolated (empty bin)
#
# @author: Renaud DUSSURGET (RD) - LER/PAC, Ifremer
# @change: Created by RD, July 2012
# 29/08/2012 : Major change -> number of output variables changes (added INTERPOLATED), and rebinning modified
# 06/11/2012 : Included in alti_tools lib
"""
dst=calcul_distance(lat,lon)
#Find gaps in data
dx = dst[1:] - dst[:-1]
mn_dx = np.median(dx)
nx=len(sla)
flag=~mask
#Get filled bins indices
outsla = sla.copy()
outlon = lon.copy()
outlat = lat.copy()
outind = np.arange(nx)
#Replace missing data on edges by the latest valid point
first=np.where((flag))[0].min()
last=np.where((flag))[0].max()
if remove_edges :
outsla=outsla[first:last+1]
outlon=outlon[first:last+1]
outlat=outlat[first:last+1]
outind=outind[first:last+1]
mask=mask[first:last+1]
flag=flag[first:last+1]
else:
outsla[0:first] = outsla[first]
outsla[last:] = outsla[last]
#Get gap properties
hist=np.ones(nx,dtype=int)
hist[outsla.mask]=0
while hist[0] == 0 :
hist=np.delete(hist,[0])
while hist[-1] == 0 :
hist=np.delete(hist,[len(hist)-1])
ind=np.arange(len(hist))
dhist=(hist[1:] - hist[:-1])
st=ind.compress(dhist==-1)+1
en=ind.compress(dhist==1)
gaplen=(en-st) + 1
ngaps=len(st)
gapedges=np.array([st,en])
ok = np.where(flag)[0]
empty = np.where(mask)[0]
#Fill the gaps if there are some
if len(empty) > 0 :
#Interpolate lon,lat @ empty positions
outsla[empty] = interp1d(ok, outsla[ok], empty)
#Get empty bin flag
interpolated=~hist.astype('bool')
return outsla, outlon, outlat, outind, ngaps, gapedges, gaplen, interpolated | def function[fill_gaps, parameter[lat, lon, sla, mask, remove_edges]]:
constant[
# FILL_GAPS
# @summary: This function allow interpolating data in gaps, depending on gap size. Data must be regularly gridded
# @param lat {type:numeric} : latitude
# @param lon {type:numeric} : longitude
# @param sla {type:numeric} : data
# @return:
# outdst : resampled distance
# outlon : resampled longitude
# outlat : resampled latitude
# outsla : resampled data
# gaplen : length of the longest gap in data
# ngaps : number of detected gaps in data
# dx : average spatial sampling
# interpolated : True when data was interpolated (empty bin)
#
# @author: Renaud DUSSURGET (RD) - LER/PAC, Ifremer
# @change: Created by RD, July 2012
# 29/08/2012 : Major change -> number of output variables changes (added INTERPOLATED), and rebinning modified
# 06/11/2012 : Included in alti_tools lib
]
variable[dst] assign[=] call[name[calcul_distance], parameter[name[lat], name[lon]]]
variable[dx] assign[=] binary_operation[call[name[dst]][<ast.Slice object at 0x7da1b094b5b0>] - call[name[dst]][<ast.Slice object at 0x7da1b094b490>]]
variable[mn_dx] assign[=] call[name[np].median, parameter[name[dx]]]
variable[nx] assign[=] call[name[len], parameter[name[sla]]]
variable[flag] assign[=] <ast.UnaryOp object at 0x7da1b094b730>
variable[outsla] assign[=] call[name[sla].copy, parameter[]]
variable[outlon] assign[=] call[name[lon].copy, parameter[]]
variable[outlat] assign[=] call[name[lat].copy, parameter[]]
variable[outind] assign[=] call[name[np].arange, parameter[name[nx]]]
variable[first] assign[=] call[call[call[name[np].where, parameter[name[flag]]]][constant[0]].min, parameter[]]
variable[last] assign[=] call[call[call[name[np].where, parameter[name[flag]]]][constant[0]].max, parameter[]]
if name[remove_edges] begin[:]
variable[outsla] assign[=] call[name[outsla]][<ast.Slice object at 0x7da1b094a350>]
variable[outlon] assign[=] call[name[outlon]][<ast.Slice object at 0x7da1b0948400>]
variable[outlat] assign[=] call[name[outlat]][<ast.Slice object at 0x7da1b0949b10>]
variable[outind] assign[=] call[name[outind]][<ast.Slice object at 0x7da1b0949ab0>]
variable[mask] assign[=] call[name[mask]][<ast.Slice object at 0x7da1b0949390>]
variable[flag] assign[=] call[name[flag]][<ast.Slice object at 0x7da1b0949540>]
variable[hist] assign[=] call[name[np].ones, parameter[name[nx]]]
call[name[hist]][name[outsla].mask] assign[=] constant[0]
while compare[call[name[hist]][constant[0]] equal[==] constant[0]] begin[:]
variable[hist] assign[=] call[name[np].delete, parameter[name[hist], list[[<ast.Constant object at 0x7da1b094a5c0>]]]]
while compare[call[name[hist]][<ast.UnaryOp object at 0x7da1b0948ac0>] equal[==] constant[0]] begin[:]
variable[hist] assign[=] call[name[np].delete, parameter[name[hist], list[[<ast.BinOp object at 0x7da1b0948880>]]]]
variable[ind] assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[hist]]]]]
variable[dhist] assign[=] binary_operation[call[name[hist]][<ast.Slice object at 0x7da1b09488e0>] - call[name[hist]][<ast.Slice object at 0x7da1b0949120>]]
variable[st] assign[=] binary_operation[call[name[ind].compress, parameter[compare[name[dhist] equal[==] <ast.UnaryOp object at 0x7da1b094aa10>]]] + constant[1]]
variable[en] assign[=] call[name[ind].compress, parameter[compare[name[dhist] equal[==] constant[1]]]]
variable[gaplen] assign[=] binary_operation[binary_operation[name[en] - name[st]] + constant[1]]
variable[ngaps] assign[=] call[name[len], parameter[name[st]]]
variable[gapedges] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b09498a0>, <ast.Name object at 0x7da1b0949900>]]]]
variable[ok] assign[=] call[call[name[np].where, parameter[name[flag]]]][constant[0]]
variable[empty] assign[=] call[call[name[np].where, parameter[name[mask]]]][constant[0]]
if compare[call[name[len], parameter[name[empty]]] greater[>] constant[0]] begin[:]
call[name[outsla]][name[empty]] assign[=] call[name[interp1d], parameter[name[ok], call[name[outsla]][name[ok]], name[empty]]]
variable[interpolated] assign[=] <ast.UnaryOp object at 0x7da1b0807e20>
return[tuple[[<ast.Name object at 0x7da1b0804160>, <ast.Name object at 0x7da1b08048b0>, <ast.Name object at 0x7da1b0805030>, <ast.Name object at 0x7da1b0807bb0>, <ast.Name object at 0x7da1b0804c10>, <ast.Name object at 0x7da1b0805270>, <ast.Name object at 0x7da1b0804b50>, <ast.Name object at 0x7da1b0805210>]]] | keyword[def] identifier[fill_gaps] ( identifier[lat] , identifier[lon] , identifier[sla] , identifier[mask] , identifier[remove_edges] = keyword[False] ):
literal[string]
identifier[dst] = identifier[calcul_distance] ( identifier[lat] , identifier[lon] )
identifier[dx] = identifier[dst] [ literal[int] :]- identifier[dst] [:- literal[int] ]
identifier[mn_dx] = identifier[np] . identifier[median] ( identifier[dx] )
identifier[nx] = identifier[len] ( identifier[sla] )
identifier[flag] =~ identifier[mask]
identifier[outsla] = identifier[sla] . identifier[copy] ()
identifier[outlon] = identifier[lon] . identifier[copy] ()
identifier[outlat] = identifier[lat] . identifier[copy] ()
identifier[outind] = identifier[np] . identifier[arange] ( identifier[nx] )
identifier[first] = identifier[np] . identifier[where] (( identifier[flag] ))[ literal[int] ]. identifier[min] ()
identifier[last] = identifier[np] . identifier[where] (( identifier[flag] ))[ literal[int] ]. identifier[max] ()
keyword[if] identifier[remove_edges] :
identifier[outsla] = identifier[outsla] [ identifier[first] : identifier[last] + literal[int] ]
identifier[outlon] = identifier[outlon] [ identifier[first] : identifier[last] + literal[int] ]
identifier[outlat] = identifier[outlat] [ identifier[first] : identifier[last] + literal[int] ]
identifier[outind] = identifier[outind] [ identifier[first] : identifier[last] + literal[int] ]
identifier[mask] = identifier[mask] [ identifier[first] : identifier[last] + literal[int] ]
identifier[flag] = identifier[flag] [ identifier[first] : identifier[last] + literal[int] ]
keyword[else] :
identifier[outsla] [ literal[int] : identifier[first] ]= identifier[outsla] [ identifier[first] ]
identifier[outsla] [ identifier[last] :]= identifier[outsla] [ identifier[last] ]
identifier[hist] = identifier[np] . identifier[ones] ( identifier[nx] , identifier[dtype] = identifier[int] )
identifier[hist] [ identifier[outsla] . identifier[mask] ]= literal[int]
keyword[while] identifier[hist] [ literal[int] ]== literal[int] :
identifier[hist] = identifier[np] . identifier[delete] ( identifier[hist] ,[ literal[int] ])
keyword[while] identifier[hist] [- literal[int] ]== literal[int] :
identifier[hist] = identifier[np] . identifier[delete] ( identifier[hist] ,[ identifier[len] ( identifier[hist] )- literal[int] ])
identifier[ind] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[hist] ))
identifier[dhist] =( identifier[hist] [ literal[int] :]- identifier[hist] [:- literal[int] ])
identifier[st] = identifier[ind] . identifier[compress] ( identifier[dhist] ==- literal[int] )+ literal[int]
identifier[en] = identifier[ind] . identifier[compress] ( identifier[dhist] == literal[int] )
identifier[gaplen] =( identifier[en] - identifier[st] )+ literal[int]
identifier[ngaps] = identifier[len] ( identifier[st] )
identifier[gapedges] = identifier[np] . identifier[array] ([ identifier[st] , identifier[en] ])
identifier[ok] = identifier[np] . identifier[where] ( identifier[flag] )[ literal[int] ]
identifier[empty] = identifier[np] . identifier[where] ( identifier[mask] )[ literal[int] ]
keyword[if] identifier[len] ( identifier[empty] )> literal[int] :
identifier[outsla] [ identifier[empty] ]= identifier[interp1d] ( identifier[ok] , identifier[outsla] [ identifier[ok] ], identifier[empty] )
identifier[interpolated] =~ identifier[hist] . identifier[astype] ( literal[string] )
keyword[return] identifier[outsla] , identifier[outlon] , identifier[outlat] , identifier[outind] , identifier[ngaps] , identifier[gapedges] , identifier[gaplen] , identifier[interpolated] | def fill_gaps(lat, lon, sla, mask, remove_edges=False):
"""
# FILL_GAPS
# @summary: This function allow interpolating data in gaps, depending on gap size. Data must be regularly gridded
# @param lat {type:numeric} : latitude
# @param lon {type:numeric} : longitude
# @param sla {type:numeric} : data
# @return:
# outdst : resampled distance
# outlon : resampled longitude
# outlat : resampled latitude
# outsla : resampled data
# gaplen : length of the longest gap in data
# ngaps : number of detected gaps in data
# dx : average spatial sampling
# interpolated : True when data was interpolated (empty bin)
#
# @author: Renaud DUSSURGET (RD) - LER/PAC, Ifremer
# @change: Created by RD, July 2012
# 29/08/2012 : Major change -> number of output variables changes (added INTERPOLATED), and rebinning modified
# 06/11/2012 : Included in alti_tools lib
"""
dst = calcul_distance(lat, lon) #Find gaps in data
dx = dst[1:] - dst[:-1]
mn_dx = np.median(dx)
nx = len(sla)
flag = ~mask #Get filled bins indices
outsla = sla.copy()
outlon = lon.copy()
outlat = lat.copy()
outind = np.arange(nx) #Replace missing data on edges by the latest valid point
first = np.where(flag)[0].min()
last = np.where(flag)[0].max()
if remove_edges:
outsla = outsla[first:last + 1]
outlon = outlon[first:last + 1]
outlat = outlat[first:last + 1]
outind = outind[first:last + 1]
mask = mask[first:last + 1]
flag = flag[first:last + 1] # depends on [control=['if'], data=[]]
else:
outsla[0:first] = outsla[first]
outsla[last:] = outsla[last] #Get gap properties
hist = np.ones(nx, dtype=int)
hist[outsla.mask] = 0
while hist[0] == 0:
hist = np.delete(hist, [0]) # depends on [control=['while'], data=[]]
while hist[-1] == 0:
hist = np.delete(hist, [len(hist) - 1]) # depends on [control=['while'], data=[]]
ind = np.arange(len(hist))
dhist = hist[1:] - hist[:-1]
st = ind.compress(dhist == -1) + 1
en = ind.compress(dhist == 1)
gaplen = en - st + 1
ngaps = len(st)
gapedges = np.array([st, en])
ok = np.where(flag)[0]
empty = np.where(mask)[0] #Fill the gaps if there are some
if len(empty) > 0: #Interpolate lon,lat @ empty positions
outsla[empty] = interp1d(ok, outsla[ok], empty) # depends on [control=['if'], data=[]] #Get empty bin flag
interpolated = ~hist.astype('bool')
return (outsla, outlon, outlat, outind, ngaps, gapedges, gaplen, interpolated) |
def fetchPageInfo(self, *page_ids):
"""
Get pages' info from IDs, unordered
.. warning::
Sends two requests, to fetch all available info!
:param page_ids: One or more page ID(s) to query
:return: :class:`models.Page` objects, labeled by their ID
:rtype: dict
:raises: FBchatException if request failed
"""
threads = self.fetchThreadInfo(*page_ids)
pages = {}
for id_, thread in threads.items():
if thread.type == ThreadType.PAGE:
pages[id_] = thread
else:
raise FBchatUserError("Thread {} was not a page".format(thread))
return pages | def function[fetchPageInfo, parameter[self]]:
constant[
Get pages' info from IDs, unordered
.. warning::
Sends two requests, to fetch all available info!
:param page_ids: One or more page ID(s) to query
:return: :class:`models.Page` objects, labeled by their ID
:rtype: dict
:raises: FBchatException if request failed
]
variable[threads] assign[=] call[name[self].fetchThreadInfo, parameter[<ast.Starred object at 0x7da1b19cdb70>]]
variable[pages] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b19cd6c0>, <ast.Name object at 0x7da1b19cc1c0>]]] in starred[call[name[threads].items, parameter[]]] begin[:]
if compare[name[thread].type equal[==] name[ThreadType].PAGE] begin[:]
call[name[pages]][name[id_]] assign[=] name[thread]
return[name[pages]] | keyword[def] identifier[fetchPageInfo] ( identifier[self] ,* identifier[page_ids] ):
literal[string]
identifier[threads] = identifier[self] . identifier[fetchThreadInfo] (* identifier[page_ids] )
identifier[pages] ={}
keyword[for] identifier[id_] , identifier[thread] keyword[in] identifier[threads] . identifier[items] ():
keyword[if] identifier[thread] . identifier[type] == identifier[ThreadType] . identifier[PAGE] :
identifier[pages] [ identifier[id_] ]= identifier[thread]
keyword[else] :
keyword[raise] identifier[FBchatUserError] ( literal[string] . identifier[format] ( identifier[thread] ))
keyword[return] identifier[pages] | def fetchPageInfo(self, *page_ids):
"""
Get pages' info from IDs, unordered
.. warning::
Sends two requests, to fetch all available info!
:param page_ids: One or more page ID(s) to query
:return: :class:`models.Page` objects, labeled by their ID
:rtype: dict
:raises: FBchatException if request failed
"""
threads = self.fetchThreadInfo(*page_ids)
pages = {}
for (id_, thread) in threads.items():
if thread.type == ThreadType.PAGE:
pages[id_] = thread # depends on [control=['if'], data=[]]
else:
raise FBchatUserError('Thread {} was not a page'.format(thread)) # depends on [control=['for'], data=[]]
return pages |
def enable_host_svc_notifications(self, host):
"""Enable services notifications for a host
Format of the line that triggers function call::
ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
for service_id in host.services:
if service_id in self.daemon.services:
service = self.daemon.services[service_id]
self.enable_svc_notifications(service)
self.send_an_element(service.get_update_status_brok()) | def function[enable_host_svc_notifications, parameter[self, host]]:
constant[Enable services notifications for a host
Format of the line that triggers function call::
ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
]
for taget[name[service_id]] in starred[name[host].services] begin[:]
if compare[name[service_id] in name[self].daemon.services] begin[:]
variable[service] assign[=] call[name[self].daemon.services][name[service_id]]
call[name[self].enable_svc_notifications, parameter[name[service]]]
call[name[self].send_an_element, parameter[call[name[service].get_update_status_brok, parameter[]]]] | keyword[def] identifier[enable_host_svc_notifications] ( identifier[self] , identifier[host] ):
literal[string]
keyword[for] identifier[service_id] keyword[in] identifier[host] . identifier[services] :
keyword[if] identifier[service_id] keyword[in] identifier[self] . identifier[daemon] . identifier[services] :
identifier[service] = identifier[self] . identifier[daemon] . identifier[services] [ identifier[service_id] ]
identifier[self] . identifier[enable_svc_notifications] ( identifier[service] )
identifier[self] . identifier[send_an_element] ( identifier[service] . identifier[get_update_status_brok] ()) | def enable_host_svc_notifications(self, host):
"""Enable services notifications for a host
Format of the line that triggers function call::
ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
for service_id in host.services:
if service_id in self.daemon.services:
service = self.daemon.services[service_id]
self.enable_svc_notifications(service)
self.send_an_element(service.get_update_status_brok()) # depends on [control=['if'], data=['service_id']] # depends on [control=['for'], data=['service_id']] |
def _save_cache(self, filename, section_number_of_pages, page_references):
"""Save the current state of the page references to `<filename>.rtc`"""
cache_path = Path(filename).with_suffix(self.CACHE_EXTENSION)
with cache_path.open('wb') as file:
cache = (section_number_of_pages, page_references)
pickle.dump(cache, file) | def function[_save_cache, parameter[self, filename, section_number_of_pages, page_references]]:
constant[Save the current state of the page references to `<filename>.rtc`]
variable[cache_path] assign[=] call[call[name[Path], parameter[name[filename]]].with_suffix, parameter[name[self].CACHE_EXTENSION]]
with call[name[cache_path].open, parameter[constant[wb]]] begin[:]
variable[cache] assign[=] tuple[[<ast.Name object at 0x7da18f58d1e0>, <ast.Name object at 0x7da18f58ebc0>]]
call[name[pickle].dump, parameter[name[cache], name[file]]] | keyword[def] identifier[_save_cache] ( identifier[self] , identifier[filename] , identifier[section_number_of_pages] , identifier[page_references] ):
literal[string]
identifier[cache_path] = identifier[Path] ( identifier[filename] ). identifier[with_suffix] ( identifier[self] . identifier[CACHE_EXTENSION] )
keyword[with] identifier[cache_path] . identifier[open] ( literal[string] ) keyword[as] identifier[file] :
identifier[cache] =( identifier[section_number_of_pages] , identifier[page_references] )
identifier[pickle] . identifier[dump] ( identifier[cache] , identifier[file] ) | def _save_cache(self, filename, section_number_of_pages, page_references):
"""Save the current state of the page references to `<filename>.rtc`"""
cache_path = Path(filename).with_suffix(self.CACHE_EXTENSION)
with cache_path.open('wb') as file:
cache = (section_number_of_pages, page_references)
pickle.dump(cache, file) # depends on [control=['with'], data=['file']] |
def rs(self):
"""
Return the radius of each element (either vertices or centers
depending on the setting in the mesh) with respect to the center of
the star.
NOTE: unscaled
(ComputedColumn)
"""
rs = np.linalg.norm(self.coords_for_computations, axis=1)
return ComputedColumn(self, rs) | def function[rs, parameter[self]]:
constant[
Return the radius of each element (either vertices or centers
depending on the setting in the mesh) with respect to the center of
the star.
NOTE: unscaled
(ComputedColumn)
]
variable[rs] assign[=] call[name[np].linalg.norm, parameter[name[self].coords_for_computations]]
return[call[name[ComputedColumn], parameter[name[self], name[rs]]]] | keyword[def] identifier[rs] ( identifier[self] ):
literal[string]
identifier[rs] = identifier[np] . identifier[linalg] . identifier[norm] ( identifier[self] . identifier[coords_for_computations] , identifier[axis] = literal[int] )
keyword[return] identifier[ComputedColumn] ( identifier[self] , identifier[rs] ) | def rs(self):
"""
Return the radius of each element (either vertices or centers
depending on the setting in the mesh) with respect to the center of
the star.
NOTE: unscaled
(ComputedColumn)
"""
rs = np.linalg.norm(self.coords_for_computations, axis=1)
return ComputedColumn(self, rs) |
def coerce_value(
value: Any, type_: GraphQLInputType, blame_node: Node = None, path: Path = None
) -> CoercedValue:
"""Coerce a Python value given a GraphQL Type.
Returns either a value which is valid for the provided type or a list of encountered
coercion errors.
"""
# A value must be provided if the type is non-null.
if is_non_null_type(type_):
if value is None or value is INVALID:
return of_errors(
[
coercion_error(
f"Expected non-nullable type {type_} not to be null",
blame_node,
path,
)
]
)
type_ = cast(GraphQLNonNull, type_)
return coerce_value(value, type_.of_type, blame_node, path)
if value is None or value is INVALID:
# Explicitly return the value null.
return of_value(None)
if is_scalar_type(type_):
# Scalars determine if a value is valid via `parse_value()`, which can throw to
# indicate failure. If it throws, maintain a reference to the original error.
type_ = cast(GraphQLScalarType, type_)
try:
parse_result = type_.parse_value(value)
if is_invalid(parse_result):
return of_errors(
[coercion_error(f"Expected type {type_.name}", blame_node, path)]
)
return of_value(parse_result)
except (TypeError, ValueError) as error:
return of_errors(
[
coercion_error(
f"Expected type {type_.name}",
blame_node,
path,
str(error),
error,
)
]
)
if is_enum_type(type_):
type_ = cast(GraphQLEnumType, type_)
values = type_.values
if isinstance(value, str):
enum_value = values.get(value)
if enum_value:
return of_value(value if enum_value.value is None else enum_value.value)
suggestions = suggestion_list(str(value), values)
did_you_mean = f"did you mean {or_list(suggestions)}?" if suggestions else None
return of_errors(
[
coercion_error(
f"Expected type {type_.name}", blame_node, path, did_you_mean
)
]
)
if is_list_type(type_):
type_ = cast(GraphQLList, type_)
item_type = type_.of_type
if isinstance(value, Iterable) and not isinstance(value, str):
errors = None
coerced_value_list: List[Any] = []
append_item = coerced_value_list.append
for index, item_value in enumerate(value):
coerced_item = coerce_value(
item_value, item_type, blame_node, at_path(path, index)
)
if coerced_item.errors:
errors = add(errors, *coerced_item.errors)
elif not errors:
append_item(coerced_item.value)
return of_errors(errors) if errors else of_value(coerced_value_list)
# Lists accept a non-list value as a list of one.
coerced_item = coerce_value(value, item_type, blame_node)
return coerced_item if coerced_item.errors else of_value([coerced_item.value])
if is_input_object_type(type_):
type_ = cast(GraphQLInputObjectType, type_)
if not isinstance(value, dict):
return of_errors(
[
coercion_error(
f"Expected type {type_.name} to be a dict", blame_node, path
)
]
)
errors = None
coerced_value_dict: Dict[str, Any] = {}
fields = type_.fields
# Ensure every defined field is valid.
for field_name, field in fields.items():
field_value = value.get(field_name, INVALID)
if is_invalid(field_value):
if not is_invalid(field.default_value):
coerced_value_dict[field_name] = field.default_value
elif is_non_null_type(field.type):
errors = add(
errors,
coercion_error(
f"Field {print_path(at_path(path, field_name))}"
f" of required type {field.type} was not provided",
blame_node,
),
)
else:
coerced_field = coerce_value(
field_value, field.type, blame_node, at_path(path, field_name)
)
if coerced_field.errors:
errors = add(errors, *coerced_field.errors)
else:
coerced_value_dict[field_name] = coerced_field.value
# Ensure every provided field is defined.
for field_name in value:
if field_name not in fields:
suggestions = suggestion_list(field_name, fields)
did_you_mean = (
f"did you mean {or_list(suggestions)}?" if suggestions else None
)
errors = add(
errors,
coercion_error(
f"Field '{field_name}' is not defined by type {type_.name}",
blame_node,
path,
did_you_mean,
),
)
return of_errors(errors) if errors else of_value(coerced_value_dict)
# Not reachable. All possible input types have been considered.
raise TypeError(f"Unexpected input type: '{inspect(type_)}'.") | def function[coerce_value, parameter[value, type_, blame_node, path]]:
constant[Coerce a Python value given a GraphQL Type.
Returns either a value which is valid for the provided type or a list of encountered
coercion errors.
]
if call[name[is_non_null_type], parameter[name[type_]]] begin[:]
if <ast.BoolOp object at 0x7da1b1d83a60> begin[:]
return[call[name[of_errors], parameter[list[[<ast.Call object at 0x7da1b1d81db0>]]]]]
variable[type_] assign[=] call[name[cast], parameter[name[GraphQLNonNull], name[type_]]]
return[call[name[coerce_value], parameter[name[value], name[type_].of_type, name[blame_node], name[path]]]]
if <ast.BoolOp object at 0x7da1b1d81bd0> begin[:]
return[call[name[of_value], parameter[constant[None]]]]
if call[name[is_scalar_type], parameter[name[type_]]] begin[:]
variable[type_] assign[=] call[name[cast], parameter[name[GraphQLScalarType], name[type_]]]
<ast.Try object at 0x7da1b1d82da0>
if call[name[is_enum_type], parameter[name[type_]]] begin[:]
variable[type_] assign[=] call[name[cast], parameter[name[GraphQLEnumType], name[type_]]]
variable[values] assign[=] name[type_].values
if call[name[isinstance], parameter[name[value], name[str]]] begin[:]
variable[enum_value] assign[=] call[name[values].get, parameter[name[value]]]
if name[enum_value] begin[:]
return[call[name[of_value], parameter[<ast.IfExp object at 0x7da1b1d83100>]]]
variable[suggestions] assign[=] call[name[suggestion_list], parameter[call[name[str], parameter[name[value]]], name[values]]]
variable[did_you_mean] assign[=] <ast.IfExp object at 0x7da1b1d804c0>
return[call[name[of_errors], parameter[list[[<ast.Call object at 0x7da1b1d83610>]]]]]
if call[name[is_list_type], parameter[name[type_]]] begin[:]
variable[type_] assign[=] call[name[cast], parameter[name[GraphQLList], name[type_]]]
variable[item_type] assign[=] name[type_].of_type
if <ast.BoolOp object at 0x7da1b1d83d90> begin[:]
variable[errors] assign[=] constant[None]
<ast.AnnAssign object at 0x7da1b1d81720>
variable[append_item] assign[=] name[coerced_value_list].append
for taget[tuple[[<ast.Name object at 0x7da1b1d81b10>, <ast.Name object at 0x7da1b1d82500>]]] in starred[call[name[enumerate], parameter[name[value]]]] begin[:]
variable[coerced_item] assign[=] call[name[coerce_value], parameter[name[item_value], name[item_type], name[blame_node], call[name[at_path], parameter[name[path], name[index]]]]]
if name[coerced_item].errors begin[:]
variable[errors] assign[=] call[name[add], parameter[name[errors], <ast.Starred object at 0x7da1b1d823b0>]]
return[<ast.IfExp object at 0x7da1b1d80250>]
variable[coerced_item] assign[=] call[name[coerce_value], parameter[name[value], name[item_type], name[blame_node]]]
return[<ast.IfExp object at 0x7da1b2234af0>]
if call[name[is_input_object_type], parameter[name[type_]]] begin[:]
variable[type_] assign[=] call[name[cast], parameter[name[GraphQLInputObjectType], name[type_]]]
if <ast.UnaryOp object at 0x7da1b2237f70> begin[:]
return[call[name[of_errors], parameter[list[[<ast.Call object at 0x7da1b2234f70>]]]]]
variable[errors] assign[=] constant[None]
<ast.AnnAssign object at 0x7da1b2234be0>
variable[fields] assign[=] name[type_].fields
for taget[tuple[[<ast.Name object at 0x7da1b22356f0>, <ast.Name object at 0x7da1b22348b0>]]] in starred[call[name[fields].items, parameter[]]] begin[:]
variable[field_value] assign[=] call[name[value].get, parameter[name[field_name], name[INVALID]]]
if call[name[is_invalid], parameter[name[field_value]]] begin[:]
if <ast.UnaryOp object at 0x7da1b2234ee0> begin[:]
call[name[coerced_value_dict]][name[field_name]] assign[=] name[field].default_value
for taget[name[field_name]] in starred[name[value]] begin[:]
if compare[name[field_name] <ast.NotIn object at 0x7da2590d7190> name[fields]] begin[:]
variable[suggestions] assign[=] call[name[suggestion_list], parameter[name[field_name], name[fields]]]
variable[did_you_mean] assign[=] <ast.IfExp object at 0x7da1b1ddac80>
variable[errors] assign[=] call[name[add], parameter[name[errors], call[name[coercion_error], parameter[<ast.JoinedStr object at 0x7da1b1dd9b70>, name[blame_node], name[path], name[did_you_mean]]]]]
return[<ast.IfExp object at 0x7da1b1dd9c90>]
<ast.Raise object at 0x7da1b1dda740> | keyword[def] identifier[coerce_value] (
identifier[value] : identifier[Any] , identifier[type_] : identifier[GraphQLInputType] , identifier[blame_node] : identifier[Node] = keyword[None] , identifier[path] : identifier[Path] = keyword[None]
)-> identifier[CoercedValue] :
literal[string]
keyword[if] identifier[is_non_null_type] ( identifier[type_] ):
keyword[if] identifier[value] keyword[is] keyword[None] keyword[or] identifier[value] keyword[is] identifier[INVALID] :
keyword[return] identifier[of_errors] (
[
identifier[coercion_error] (
literal[string] ,
identifier[blame_node] ,
identifier[path] ,
)
]
)
identifier[type_] = identifier[cast] ( identifier[GraphQLNonNull] , identifier[type_] )
keyword[return] identifier[coerce_value] ( identifier[value] , identifier[type_] . identifier[of_type] , identifier[blame_node] , identifier[path] )
keyword[if] identifier[value] keyword[is] keyword[None] keyword[or] identifier[value] keyword[is] identifier[INVALID] :
keyword[return] identifier[of_value] ( keyword[None] )
keyword[if] identifier[is_scalar_type] ( identifier[type_] ):
identifier[type_] = identifier[cast] ( identifier[GraphQLScalarType] , identifier[type_] )
keyword[try] :
identifier[parse_result] = identifier[type_] . identifier[parse_value] ( identifier[value] )
keyword[if] identifier[is_invalid] ( identifier[parse_result] ):
keyword[return] identifier[of_errors] (
[ identifier[coercion_error] ( literal[string] , identifier[blame_node] , identifier[path] )]
)
keyword[return] identifier[of_value] ( identifier[parse_result] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ) keyword[as] identifier[error] :
keyword[return] identifier[of_errors] (
[
identifier[coercion_error] (
literal[string] ,
identifier[blame_node] ,
identifier[path] ,
identifier[str] ( identifier[error] ),
identifier[error] ,
)
]
)
keyword[if] identifier[is_enum_type] ( identifier[type_] ):
identifier[type_] = identifier[cast] ( identifier[GraphQLEnumType] , identifier[type_] )
identifier[values] = identifier[type_] . identifier[values]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[str] ):
identifier[enum_value] = identifier[values] . identifier[get] ( identifier[value] )
keyword[if] identifier[enum_value] :
keyword[return] identifier[of_value] ( identifier[value] keyword[if] identifier[enum_value] . identifier[value] keyword[is] keyword[None] keyword[else] identifier[enum_value] . identifier[value] )
identifier[suggestions] = identifier[suggestion_list] ( identifier[str] ( identifier[value] ), identifier[values] )
identifier[did_you_mean] = literal[string] keyword[if] identifier[suggestions] keyword[else] keyword[None]
keyword[return] identifier[of_errors] (
[
identifier[coercion_error] (
literal[string] , identifier[blame_node] , identifier[path] , identifier[did_you_mean]
)
]
)
keyword[if] identifier[is_list_type] ( identifier[type_] ):
identifier[type_] = identifier[cast] ( identifier[GraphQLList] , identifier[type_] )
identifier[item_type] = identifier[type_] . identifier[of_type]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[Iterable] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[value] , identifier[str] ):
identifier[errors] = keyword[None]
identifier[coerced_value_list] : identifier[List] [ identifier[Any] ]=[]
identifier[append_item] = identifier[coerced_value_list] . identifier[append]
keyword[for] identifier[index] , identifier[item_value] keyword[in] identifier[enumerate] ( identifier[value] ):
identifier[coerced_item] = identifier[coerce_value] (
identifier[item_value] , identifier[item_type] , identifier[blame_node] , identifier[at_path] ( identifier[path] , identifier[index] )
)
keyword[if] identifier[coerced_item] . identifier[errors] :
identifier[errors] = identifier[add] ( identifier[errors] ,* identifier[coerced_item] . identifier[errors] )
keyword[elif] keyword[not] identifier[errors] :
identifier[append_item] ( identifier[coerced_item] . identifier[value] )
keyword[return] identifier[of_errors] ( identifier[errors] ) keyword[if] identifier[errors] keyword[else] identifier[of_value] ( identifier[coerced_value_list] )
identifier[coerced_item] = identifier[coerce_value] ( identifier[value] , identifier[item_type] , identifier[blame_node] )
keyword[return] identifier[coerced_item] keyword[if] identifier[coerced_item] . identifier[errors] keyword[else] identifier[of_value] ([ identifier[coerced_item] . identifier[value] ])
keyword[if] identifier[is_input_object_type] ( identifier[type_] ):
identifier[type_] = identifier[cast] ( identifier[GraphQLInputObjectType] , identifier[type_] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[dict] ):
keyword[return] identifier[of_errors] (
[
identifier[coercion_error] (
literal[string] , identifier[blame_node] , identifier[path]
)
]
)
identifier[errors] = keyword[None]
identifier[coerced_value_dict] : identifier[Dict] [ identifier[str] , identifier[Any] ]={}
identifier[fields] = identifier[type_] . identifier[fields]
keyword[for] identifier[field_name] , identifier[field] keyword[in] identifier[fields] . identifier[items] ():
identifier[field_value] = identifier[value] . identifier[get] ( identifier[field_name] , identifier[INVALID] )
keyword[if] identifier[is_invalid] ( identifier[field_value] ):
keyword[if] keyword[not] identifier[is_invalid] ( identifier[field] . identifier[default_value] ):
identifier[coerced_value_dict] [ identifier[field_name] ]= identifier[field] . identifier[default_value]
keyword[elif] identifier[is_non_null_type] ( identifier[field] . identifier[type] ):
identifier[errors] = identifier[add] (
identifier[errors] ,
identifier[coercion_error] (
literal[string]
literal[string] ,
identifier[blame_node] ,
),
)
keyword[else] :
identifier[coerced_field] = identifier[coerce_value] (
identifier[field_value] , identifier[field] . identifier[type] , identifier[blame_node] , identifier[at_path] ( identifier[path] , identifier[field_name] )
)
keyword[if] identifier[coerced_field] . identifier[errors] :
identifier[errors] = identifier[add] ( identifier[errors] ,* identifier[coerced_field] . identifier[errors] )
keyword[else] :
identifier[coerced_value_dict] [ identifier[field_name] ]= identifier[coerced_field] . identifier[value]
keyword[for] identifier[field_name] keyword[in] identifier[value] :
keyword[if] identifier[field_name] keyword[not] keyword[in] identifier[fields] :
identifier[suggestions] = identifier[suggestion_list] ( identifier[field_name] , identifier[fields] )
identifier[did_you_mean] =(
literal[string] keyword[if] identifier[suggestions] keyword[else] keyword[None]
)
identifier[errors] = identifier[add] (
identifier[errors] ,
identifier[coercion_error] (
literal[string] ,
identifier[blame_node] ,
identifier[path] ,
identifier[did_you_mean] ,
),
)
keyword[return] identifier[of_errors] ( identifier[errors] ) keyword[if] identifier[errors] keyword[else] identifier[of_value] ( identifier[coerced_value_dict] )
keyword[raise] identifier[TypeError] ( literal[string] ) | def coerce_value(value: Any, type_: GraphQLInputType, blame_node: Node=None, path: Path=None) -> CoercedValue:
"""Coerce a Python value given a GraphQL Type.
Returns either a value which is valid for the provided type or a list of encountered
coercion errors.
"""
# A value must be provided if the type is non-null.
if is_non_null_type(type_):
if value is None or value is INVALID:
return of_errors([coercion_error(f'Expected non-nullable type {type_} not to be null', blame_node, path)]) # depends on [control=['if'], data=[]]
type_ = cast(GraphQLNonNull, type_)
return coerce_value(value, type_.of_type, blame_node, path) # depends on [control=['if'], data=[]]
if value is None or value is INVALID:
# Explicitly return the value null.
return of_value(None) # depends on [control=['if'], data=[]]
if is_scalar_type(type_):
# Scalars determine if a value is valid via `parse_value()`, which can throw to
# indicate failure. If it throws, maintain a reference to the original error.
type_ = cast(GraphQLScalarType, type_)
try:
parse_result = type_.parse_value(value)
if is_invalid(parse_result):
return of_errors([coercion_error(f'Expected type {type_.name}', blame_node, path)]) # depends on [control=['if'], data=[]]
return of_value(parse_result) # depends on [control=['try'], data=[]]
except (TypeError, ValueError) as error:
return of_errors([coercion_error(f'Expected type {type_.name}', blame_node, path, str(error), error)]) # depends on [control=['except'], data=['error']] # depends on [control=['if'], data=[]]
if is_enum_type(type_):
type_ = cast(GraphQLEnumType, type_)
values = type_.values
if isinstance(value, str):
enum_value = values.get(value)
if enum_value:
return of_value(value if enum_value.value is None else enum_value.value) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
suggestions = suggestion_list(str(value), values)
did_you_mean = f'did you mean {or_list(suggestions)}?' if suggestions else None
return of_errors([coercion_error(f'Expected type {type_.name}', blame_node, path, did_you_mean)]) # depends on [control=['if'], data=[]]
if is_list_type(type_):
type_ = cast(GraphQLList, type_)
item_type = type_.of_type
if isinstance(value, Iterable) and (not isinstance(value, str)):
errors = None
coerced_value_list: List[Any] = []
append_item = coerced_value_list.append
for (index, item_value) in enumerate(value):
coerced_item = coerce_value(item_value, item_type, blame_node, at_path(path, index))
if coerced_item.errors:
errors = add(errors, *coerced_item.errors) # depends on [control=['if'], data=[]]
elif not errors:
append_item(coerced_item.value) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return of_errors(errors) if errors else of_value(coerced_value_list) # depends on [control=['if'], data=[]]
# Lists accept a non-list value as a list of one.
coerced_item = coerce_value(value, item_type, blame_node)
return coerced_item if coerced_item.errors else of_value([coerced_item.value]) # depends on [control=['if'], data=[]]
if is_input_object_type(type_):
type_ = cast(GraphQLInputObjectType, type_)
if not isinstance(value, dict):
return of_errors([coercion_error(f'Expected type {type_.name} to be a dict', blame_node, path)]) # depends on [control=['if'], data=[]]
errors = None
coerced_value_dict: Dict[str, Any] = {}
fields = type_.fields
# Ensure every defined field is valid.
for (field_name, field) in fields.items():
field_value = value.get(field_name, INVALID)
if is_invalid(field_value):
if not is_invalid(field.default_value):
coerced_value_dict[field_name] = field.default_value # depends on [control=['if'], data=[]]
elif is_non_null_type(field.type):
errors = add(errors, coercion_error(f'Field {print_path(at_path(path, field_name))} of required type {field.type} was not provided', blame_node)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
coerced_field = coerce_value(field_value, field.type, blame_node, at_path(path, field_name))
if coerced_field.errors:
errors = add(errors, *coerced_field.errors) # depends on [control=['if'], data=[]]
else:
coerced_value_dict[field_name] = coerced_field.value # depends on [control=['for'], data=[]]
# Ensure every provided field is defined.
for field_name in value:
if field_name not in fields:
suggestions = suggestion_list(field_name, fields)
did_you_mean = f'did you mean {or_list(suggestions)}?' if suggestions else None
errors = add(errors, coercion_error(f"Field '{field_name}' is not defined by type {type_.name}", blame_node, path, did_you_mean)) # depends on [control=['if'], data=['field_name', 'fields']] # depends on [control=['for'], data=['field_name']]
return of_errors(errors) if errors else of_value(coerced_value_dict) # depends on [control=['if'], data=[]]
# Not reachable. All possible input types have been considered.
raise TypeError(f"Unexpected input type: '{inspect(type_)}'.") |
def scroll(self, scroll_id, params={}, callback=None, **kwargs):
"""
Scroll a search request created by specifying the scroll parameter.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_
:arg scroll_id: The scroll ID
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
"""
query_params = ('scroll',)
params = self._filter_params(query_params, params)
url = self.mk_url(*['/_search/scroll'], **params)
self.client.fetch(
self.mk_req(url, method='GET', body=scroll_id, **kwargs),
callback = callback
) | def function[scroll, parameter[self, scroll_id, params, callback]]:
constant[
Scroll a search request created by specifying the scroll parameter.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_
:arg scroll_id: The scroll ID
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
]
variable[query_params] assign[=] tuple[[<ast.Constant object at 0x7da18dc040a0>]]
variable[params] assign[=] call[name[self]._filter_params, parameter[name[query_params], name[params]]]
variable[url] assign[=] call[name[self].mk_url, parameter[<ast.Starred object at 0x7da18dc04df0>]]
call[name[self].client.fetch, parameter[call[name[self].mk_req, parameter[name[url]]]]] | keyword[def] identifier[scroll] ( identifier[self] , identifier[scroll_id] , identifier[params] ={}, identifier[callback] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[query_params] =( literal[string] ,)
identifier[params] = identifier[self] . identifier[_filter_params] ( identifier[query_params] , identifier[params] )
identifier[url] = identifier[self] . identifier[mk_url] (*[ literal[string] ],** identifier[params] )
identifier[self] . identifier[client] . identifier[fetch] (
identifier[self] . identifier[mk_req] ( identifier[url] , identifier[method] = literal[string] , identifier[body] = identifier[scroll_id] ,** identifier[kwargs] ),
identifier[callback] = identifier[callback]
) | def scroll(self, scroll_id, params={}, callback=None, **kwargs):
"""
Scroll a search request created by specifying the scroll parameter.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_
:arg scroll_id: The scroll ID
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
"""
query_params = ('scroll',)
params = self._filter_params(query_params, params)
url = self.mk_url(*['/_search/scroll'], **params)
self.client.fetch(self.mk_req(url, method='GET', body=scroll_id, **kwargs), callback=callback) |
def base_args(parser):
"""Add the generic command line options"""
generic_args(parser)
parser.add_argument('--monochrome',
dest='monochrome',
help='Whether or not to use colors',
action='store_true')
parser.add_argument('--metadata',
dest='metadata',
help='A series of key=value pairs for token metadata.',
default='')
parser.add_argument('--lease',
dest='lease',
help='Lease time for intermediary token.',
default='10s')
parser.add_argument('--reuse-token',
dest='reuse_token',
help='Whether to reuse the existing token. Note'
' this will cause metadata to not be preserved',
action='store_true') | def function[base_args, parameter[parser]]:
constant[Add the generic command line options]
call[name[generic_args], parameter[name[parser]]]
call[name[parser].add_argument, parameter[constant[--monochrome]]]
call[name[parser].add_argument, parameter[constant[--metadata]]]
call[name[parser].add_argument, parameter[constant[--lease]]]
call[name[parser].add_argument, parameter[constant[--reuse-token]]] | keyword[def] identifier[base_args] ( identifier[parser] ):
literal[string]
identifier[generic_args] ( identifier[parser] )
identifier[parser] . identifier[add_argument] ( literal[string] ,
identifier[dest] = literal[string] ,
identifier[help] = literal[string] ,
identifier[action] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] ,
identifier[dest] = literal[string] ,
identifier[help] = literal[string] ,
identifier[default] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] ,
identifier[dest] = literal[string] ,
identifier[help] = literal[string] ,
identifier[default] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] ,
identifier[dest] = literal[string] ,
identifier[help] = literal[string]
literal[string] ,
identifier[action] = literal[string] ) | def base_args(parser):
"""Add the generic command line options"""
generic_args(parser)
parser.add_argument('--monochrome', dest='monochrome', help='Whether or not to use colors', action='store_true')
parser.add_argument('--metadata', dest='metadata', help='A series of key=value pairs for token metadata.', default='')
parser.add_argument('--lease', dest='lease', help='Lease time for intermediary token.', default='10s')
parser.add_argument('--reuse-token', dest='reuse_token', help='Whether to reuse the existing token. Note this will cause metadata to not be preserved', action='store_true') |
def get_changesets(self, start=None, end=None, start_date=None,
end_date=None, branch_name=None, reverse=False):
"""
Returns iterator of ``MercurialChangeset`` objects from start to end
not inclusive This should behave just like a list, ie. end is not
inclusive
:param start: None or str
:param end: None or str
:param start_date:
:param end_date:
:param branch_name:
:param reversed:
"""
raise NotImplementedError | def function[get_changesets, parameter[self, start, end, start_date, end_date, branch_name, reverse]]:
constant[
Returns iterator of ``MercurialChangeset`` objects from start to end
not inclusive This should behave just like a list, ie. end is not
inclusive
:param start: None or str
:param end: None or str
:param start_date:
:param end_date:
:param branch_name:
:param reversed:
]
<ast.Raise object at 0x7da18bccae30> | keyword[def] identifier[get_changesets] ( identifier[self] , identifier[start] = keyword[None] , identifier[end] = keyword[None] , identifier[start_date] = keyword[None] ,
identifier[end_date] = keyword[None] , identifier[branch_name] = keyword[None] , identifier[reverse] = keyword[False] ):
literal[string]
keyword[raise] identifier[NotImplementedError] | def get_changesets(self, start=None, end=None, start_date=None, end_date=None, branch_name=None, reverse=False):
"""
Returns iterator of ``MercurialChangeset`` objects from start to end
not inclusive This should behave just like a list, ie. end is not
inclusive
:param start: None or str
:param end: None or str
:param start_date:
:param end_date:
:param branch_name:
:param reversed:
"""
raise NotImplementedError |
def predict(self, X):
"""
Apply transforms to the data, and predict with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
yp : array-like
Predicted transformed target
"""
Xt, _, _ = self._transform(X)
return self._final_estimator.predict(Xt) | def function[predict, parameter[self, X]]:
constant[
Apply transforms to the data, and predict with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
yp : array-like
Predicted transformed target
]
<ast.Tuple object at 0x7da1b1791d20> assign[=] call[name[self]._transform, parameter[name[X]]]
return[call[name[self]._final_estimator.predict, parameter[name[Xt]]]] | keyword[def] identifier[predict] ( identifier[self] , identifier[X] ):
literal[string]
identifier[Xt] , identifier[_] , identifier[_] = identifier[self] . identifier[_transform] ( identifier[X] )
keyword[return] identifier[self] . identifier[_final_estimator] . identifier[predict] ( identifier[Xt] ) | def predict(self, X):
"""
Apply transforms to the data, and predict with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
yp : array-like
Predicted transformed target
"""
(Xt, _, _) = self._transform(X)
return self._final_estimator.predict(Xt) |
def get_sql(self):
"""
Generates the JOIN sql for the join tables and join condition
:rtype: str
:return: the JOIN sql for the join tables and join condition
"""
return '{0} {1} ON {2}'.format(self.join_type, self.right_table.get_sql(), self.get_condition()) | def function[get_sql, parameter[self]]:
constant[
Generates the JOIN sql for the join tables and join condition
:rtype: str
:return: the JOIN sql for the join tables and join condition
]
return[call[constant[{0} {1} ON {2}].format, parameter[name[self].join_type, call[name[self].right_table.get_sql, parameter[]], call[name[self].get_condition, parameter[]]]]] | keyword[def] identifier[get_sql] ( identifier[self] ):
literal[string]
keyword[return] literal[string] . identifier[format] ( identifier[self] . identifier[join_type] , identifier[self] . identifier[right_table] . identifier[get_sql] (), identifier[self] . identifier[get_condition] ()) | def get_sql(self):
"""
Generates the JOIN sql for the join tables and join condition
:rtype: str
:return: the JOIN sql for the join tables and join condition
"""
return '{0} {1} ON {2}'.format(self.join_type, self.right_table.get_sql(), self.get_condition()) |
def process_stream_error(self, error):
"""Process stream error element received.
:Parameters:
- `error`: error received
:Types:
- `error`: `StreamErrorElement`
"""
# pylint: disable-msg=R0201
logger.debug("Unhandled stream error: condition: {0} {1!r}"
.format(error.condition_name, error.serialize())) | def function[process_stream_error, parameter[self, error]]:
constant[Process stream error element received.
:Parameters:
- `error`: error received
:Types:
- `error`: `StreamErrorElement`
]
call[name[logger].debug, parameter[call[constant[Unhandled stream error: condition: {0} {1!r}].format, parameter[name[error].condition_name, call[name[error].serialize, parameter[]]]]]] | keyword[def] identifier[process_stream_error] ( identifier[self] , identifier[error] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string]
. identifier[format] ( identifier[error] . identifier[condition_name] , identifier[error] . identifier[serialize] ())) | def process_stream_error(self, error):
"""Process stream error element received.
:Parameters:
- `error`: error received
:Types:
- `error`: `StreamErrorElement`
"""
# pylint: disable-msg=R0201
logger.debug('Unhandled stream error: condition: {0} {1!r}'.format(error.condition_name, error.serialize())) |
def otp(password, seed, sequence):
"""
Calculates a one-time password hash using the given password, seed, and
sequence number and returns it.
Uses the MD4/sixword algorithm as supported by TACACS+ servers.
:type password: str
:param password: A password.
:type seed: str
:param seed: A cryptographic seed.
:type sequence: int
:param sequence: A sequence number.
:rtype: string
:return: A hash.
"""
if len(password) not in list(range(4, 64)):
raise ValueError('passphrase length')
if len(seed) not in list(range(1, 17)):
raise ValueError('seed length')
for x in seed:
if not x in _VALIDSEEDCHARACTERS:
raise ValueError('seed composition')
if sequence < 0:
raise ValueError('sequence')
# Pycryptodome only supports byte strings.
seed = seed.encode('utf-8')
password = password.encode('utf-8')
# Discard the first <sequence> keys
thehash = MD4.new(seed + password).digest()
thehash = _fold_md4_or_md5(thehash)
for i in range(0, sequence):
thehash = _fold_md4_or_md5(MD4.new(thehash).digest())
# Generate the result
return _sixword_from_raw(thehash) | def function[otp, parameter[password, seed, sequence]]:
constant[
Calculates a one-time password hash using the given password, seed, and
sequence number and returns it.
Uses the MD4/sixword algorithm as supported by TACACS+ servers.
:type password: str
:param password: A password.
:type seed: str
:param seed: A cryptographic seed.
:type sequence: int
:param sequence: A sequence number.
:rtype: string
:return: A hash.
]
if compare[call[name[len], parameter[name[password]]] <ast.NotIn object at 0x7da2590d7190> call[name[list], parameter[call[name[range], parameter[constant[4], constant[64]]]]]] begin[:]
<ast.Raise object at 0x7da1b07144f0>
if compare[call[name[len], parameter[name[seed]]] <ast.NotIn object at 0x7da2590d7190> call[name[list], parameter[call[name[range], parameter[constant[1], constant[17]]]]]] begin[:]
<ast.Raise object at 0x7da1b0716ce0>
for taget[name[x]] in starred[name[seed]] begin[:]
if <ast.UnaryOp object at 0x7da1b0716e60> begin[:]
<ast.Raise object at 0x7da1b0715360>
if compare[name[sequence] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da1b0715180>
variable[seed] assign[=] call[name[seed].encode, parameter[constant[utf-8]]]
variable[password] assign[=] call[name[password].encode, parameter[constant[utf-8]]]
variable[thehash] assign[=] call[call[name[MD4].new, parameter[binary_operation[name[seed] + name[password]]]].digest, parameter[]]
variable[thehash] assign[=] call[name[_fold_md4_or_md5], parameter[name[thehash]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[sequence]]]] begin[:]
variable[thehash] assign[=] call[name[_fold_md4_or_md5], parameter[call[call[name[MD4].new, parameter[name[thehash]]].digest, parameter[]]]]
return[call[name[_sixword_from_raw], parameter[name[thehash]]]] | keyword[def] identifier[otp] ( identifier[password] , identifier[seed] , identifier[sequence] ):
literal[string]
keyword[if] identifier[len] ( identifier[password] ) keyword[not] keyword[in] identifier[list] ( identifier[range] ( literal[int] , literal[int] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[seed] ) keyword[not] keyword[in] identifier[list] ( identifier[range] ( literal[int] , literal[int] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[x] keyword[in] identifier[seed] :
keyword[if] keyword[not] identifier[x] keyword[in] identifier[_VALIDSEEDCHARACTERS] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[sequence] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[seed] = identifier[seed] . identifier[encode] ( literal[string] )
identifier[password] = identifier[password] . identifier[encode] ( literal[string] )
identifier[thehash] = identifier[MD4] . identifier[new] ( identifier[seed] + identifier[password] ). identifier[digest] ()
identifier[thehash] = identifier[_fold_md4_or_md5] ( identifier[thehash] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[sequence] ):
identifier[thehash] = identifier[_fold_md4_or_md5] ( identifier[MD4] . identifier[new] ( identifier[thehash] ). identifier[digest] ())
keyword[return] identifier[_sixword_from_raw] ( identifier[thehash] ) | def otp(password, seed, sequence):
"""
Calculates a one-time password hash using the given password, seed, and
sequence number and returns it.
Uses the MD4/sixword algorithm as supported by TACACS+ servers.
:type password: str
:param password: A password.
:type seed: str
:param seed: A cryptographic seed.
:type sequence: int
:param sequence: A sequence number.
:rtype: string
:return: A hash.
"""
if len(password) not in list(range(4, 64)):
raise ValueError('passphrase length') # depends on [control=['if'], data=[]]
if len(seed) not in list(range(1, 17)):
raise ValueError('seed length') # depends on [control=['if'], data=[]]
for x in seed:
if not x in _VALIDSEEDCHARACTERS:
raise ValueError('seed composition') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']]
if sequence < 0:
raise ValueError('sequence') # depends on [control=['if'], data=[]]
# Pycryptodome only supports byte strings.
seed = seed.encode('utf-8')
password = password.encode('utf-8')
# Discard the first <sequence> keys
thehash = MD4.new(seed + password).digest()
thehash = _fold_md4_or_md5(thehash)
for i in range(0, sequence):
thehash = _fold_md4_or_md5(MD4.new(thehash).digest()) # depends on [control=['for'], data=[]]
# Generate the result
return _sixword_from_raw(thehash) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.