code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def reftrack_elementgrp_data(rt, role):
"""Return the data for the elementgrp (e.g. the Assettype or Sequence)
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the elementgrp
:rtype: depending on role
:raises: TypeError
"""
element = rt.get_element()
if isinstance(element, djadapter.models.Shot):
egrp = element.sequence
elif isinstance(element, djadapter.models.Asset):
egrp = element.atype
elif element is not None:
raise TypeError("Expected the element to be either Asset or Shot. Got %s" % type(element))
else:
return
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return egrp.name | def function[reftrack_elementgrp_data, parameter[rt, role]]:
constant[Return the data for the elementgrp (e.g. the Assettype or Sequence)
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the elementgrp
:rtype: depending on role
:raises: TypeError
]
variable[element] assign[=] call[name[rt].get_element, parameter[]]
if call[name[isinstance], parameter[name[element], name[djadapter].models.Shot]] begin[:]
variable[egrp] assign[=] name[element].sequence
if <ast.BoolOp object at 0x7da1b1641060> begin[:]
return[name[egrp].name] | keyword[def] identifier[reftrack_elementgrp_data] ( identifier[rt] , identifier[role] ):
literal[string]
identifier[element] = identifier[rt] . identifier[get_element] ()
keyword[if] identifier[isinstance] ( identifier[element] , identifier[djadapter] . identifier[models] . identifier[Shot] ):
identifier[egrp] = identifier[element] . identifier[sequence]
keyword[elif] identifier[isinstance] ( identifier[element] , identifier[djadapter] . identifier[models] . identifier[Asset] ):
identifier[egrp] = identifier[element] . identifier[atype]
keyword[elif] identifier[element] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[type] ( identifier[element] ))
keyword[else] :
keyword[return]
keyword[if] identifier[role] == identifier[QtCore] . identifier[Qt] . identifier[DisplayRole] keyword[or] identifier[role] == identifier[QtCore] . identifier[Qt] . identifier[EditRole] :
keyword[return] identifier[egrp] . identifier[name] | def reftrack_elementgrp_data(rt, role):
"""Return the data for the elementgrp (e.g. the Assettype or Sequence)
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the elementgrp
:rtype: depending on role
:raises: TypeError
"""
element = rt.get_element()
if isinstance(element, djadapter.models.Shot):
egrp = element.sequence # depends on [control=['if'], data=[]]
elif isinstance(element, djadapter.models.Asset):
egrp = element.atype # depends on [control=['if'], data=[]]
elif element is not None:
raise TypeError('Expected the element to be either Asset or Shot. Got %s' % type(element)) # depends on [control=['if'], data=['element']]
else:
return
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return egrp.name # depends on [control=['if'], data=[]] |
def tune(self):
"""
Tell all step methods to tune themselves.
"""
if self.verbose > 0:
print_('\tTuning at iteration', self._current_iter)
# Initialize counter for number of tuning stochastics
tuning_count = 0
for step_method in self.step_methods:
verbose = self.verbose
if step_method.verbose > -1:
verbose = step_method.verbose
# Tune step methods
tuning_count += step_method.tune(verbose=self.verbose)
if verbose > 1:
print_(
'\t\tTuning step method %s, returned %i\n' % (
step_method._id, tuning_count
)
)
sys.stdout.flush()
if self._burn_till_tuned:
if not tuning_count:
# If no step methods needed tuning, increment count
self._tuned_count += 1
else:
# Otherwise re-initialize count
self._tuned_count = 0
# n consecutive clean intervals removed tuning
# n is equal to self._stop_tuning_after
if self._tuned_count == self._stop_tuning_after:
if self.verbose > 0:
print_('\nFinished tuning')
self._tuning = False | def function[tune, parameter[self]]:
constant[
Tell all step methods to tune themselves.
]
if compare[name[self].verbose greater[>] constant[0]] begin[:]
call[name[print_], parameter[constant[ Tuning at iteration], name[self]._current_iter]]
variable[tuning_count] assign[=] constant[0]
for taget[name[step_method]] in starred[name[self].step_methods] begin[:]
variable[verbose] assign[=] name[self].verbose
if compare[name[step_method].verbose greater[>] <ast.UnaryOp object at 0x7da20e957df0>] begin[:]
variable[verbose] assign[=] name[step_method].verbose
<ast.AugAssign object at 0x7da20e954af0>
if compare[name[verbose] greater[>] constant[1]] begin[:]
call[name[print_], parameter[binary_operation[constant[ Tuning step method %s, returned %i
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20e954520>, <ast.Name object at 0x7da20e954580>]]]]]
call[name[sys].stdout.flush, parameter[]]
if name[self]._burn_till_tuned begin[:]
if <ast.UnaryOp object at 0x7da20e9552a0> begin[:]
<ast.AugAssign object at 0x7da20e956560>
if compare[name[self]._tuned_count equal[==] name[self]._stop_tuning_after] begin[:]
if compare[name[self].verbose greater[>] constant[0]] begin[:]
call[name[print_], parameter[constant[
Finished tuning]]]
name[self]._tuning assign[=] constant[False] | keyword[def] identifier[tune] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[verbose] > literal[int] :
identifier[print_] ( literal[string] , identifier[self] . identifier[_current_iter] )
identifier[tuning_count] = literal[int]
keyword[for] identifier[step_method] keyword[in] identifier[self] . identifier[step_methods] :
identifier[verbose] = identifier[self] . identifier[verbose]
keyword[if] identifier[step_method] . identifier[verbose] >- literal[int] :
identifier[verbose] = identifier[step_method] . identifier[verbose]
identifier[tuning_count] += identifier[step_method] . identifier[tune] ( identifier[verbose] = identifier[self] . identifier[verbose] )
keyword[if] identifier[verbose] > literal[int] :
identifier[print_] (
literal[string] %(
identifier[step_method] . identifier[_id] , identifier[tuning_count]
)
)
identifier[sys] . identifier[stdout] . identifier[flush] ()
keyword[if] identifier[self] . identifier[_burn_till_tuned] :
keyword[if] keyword[not] identifier[tuning_count] :
identifier[self] . identifier[_tuned_count] += literal[int]
keyword[else] :
identifier[self] . identifier[_tuned_count] = literal[int]
keyword[if] identifier[self] . identifier[_tuned_count] == identifier[self] . identifier[_stop_tuning_after] :
keyword[if] identifier[self] . identifier[verbose] > literal[int] :
identifier[print_] ( literal[string] )
identifier[self] . identifier[_tuning] = keyword[False] | def tune(self):
"""
Tell all step methods to tune themselves.
"""
if self.verbose > 0:
print_('\tTuning at iteration', self._current_iter) # depends on [control=['if'], data=[]]
# Initialize counter for number of tuning stochastics
tuning_count = 0
for step_method in self.step_methods:
verbose = self.verbose
if step_method.verbose > -1:
verbose = step_method.verbose # depends on [control=['if'], data=[]]
# Tune step methods
tuning_count += step_method.tune(verbose=self.verbose)
if verbose > 1:
print_('\t\tTuning step method %s, returned %i\n' % (step_method._id, tuning_count))
sys.stdout.flush() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['step_method']]
if self._burn_till_tuned:
if not tuning_count:
# If no step methods needed tuning, increment count
self._tuned_count += 1 # depends on [control=['if'], data=[]]
else:
# Otherwise re-initialize count
self._tuned_count = 0
# n consecutive clean intervals removed tuning
# n is equal to self._stop_tuning_after
if self._tuned_count == self._stop_tuning_after:
if self.verbose > 0:
print_('\nFinished tuning') # depends on [control=['if'], data=[]]
self._tuning = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def run(self, *args):
"""Invoke the wrapped script.
Returns:
Return code of the command, or 0 if the command is not run.
"""
if self.prefix_char is None:
prefix_char = config.suite_alias_prefix_char
else:
prefix_char = self.prefix_char
if prefix_char == '':
# empty prefix char means we don't support the '+' args
return self._run_no_args(args)
else:
return self._run(prefix_char, args) | def function[run, parameter[self]]:
constant[Invoke the wrapped script.
Returns:
Return code of the command, or 0 if the command is not run.
]
if compare[name[self].prefix_char is constant[None]] begin[:]
variable[prefix_char] assign[=] name[config].suite_alias_prefix_char
if compare[name[prefix_char] equal[==] constant[]] begin[:]
return[call[name[self]._run_no_args, parameter[name[args]]]] | keyword[def] identifier[run] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[if] identifier[self] . identifier[prefix_char] keyword[is] keyword[None] :
identifier[prefix_char] = identifier[config] . identifier[suite_alias_prefix_char]
keyword[else] :
identifier[prefix_char] = identifier[self] . identifier[prefix_char]
keyword[if] identifier[prefix_char] == literal[string] :
keyword[return] identifier[self] . identifier[_run_no_args] ( identifier[args] )
keyword[else] :
keyword[return] identifier[self] . identifier[_run] ( identifier[prefix_char] , identifier[args] ) | def run(self, *args):
"""Invoke the wrapped script.
Returns:
Return code of the command, or 0 if the command is not run.
"""
if self.prefix_char is None:
prefix_char = config.suite_alias_prefix_char # depends on [control=['if'], data=[]]
else:
prefix_char = self.prefix_char
if prefix_char == '':
# empty prefix char means we don't support the '+' args
return self._run_no_args(args) # depends on [control=['if'], data=[]]
else:
return self._run(prefix_char, args) |
def date_range(start=None, end=None, periods=None, freq=None, tz=None,
normalize=False, name=None, closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : integer, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'. See
:ref:`here <timeseries.offset_aliases>` for a list of
frequency aliases.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
See Also
--------
DatetimeIndex : An immutable container for datetimes.
timedelta_range : Return a fixed frequency TimedeltaIndex.
period_range : Return a fixed frequency PeriodIndex.
interval_range : Return a fixed frequency IntervalIndex.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> pd.date_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `start` and `periods`, the number of periods (days).
>>> pd.date_range(start='1/1/2018', periods=8)
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `end` and `periods`, the number of periods (days).
>>> pd.date_range(end='1/1/2018', periods=8)
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> pd.date_range(start='1/1/2018', periods=5, freq='M')
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq='M')
Multiples are allowed
>>> pd.date_range(start='1/1/2018', periods=5, freq='3M')
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
`freq` can also be specified as an Offset object.
>>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
Specify `tz` to set the timezone.
>>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
'2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
'2018-01-05 00:00:00+09:00'],
dtype='datetime64[ns, Asia/Tokyo]', freq='D')
`closed` controls whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None)
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
Use ``closed='left'`` to exclude `end` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left')
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
dtype='datetime64[ns]', freq='D')
Use ``closed='right'`` to exclude `start` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right')
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
"""
if freq is None and com._any_none(periods, start, end):
freq = 'D'
dtarr = DatetimeArray._generate_range(
start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize,
closed=closed, **kwargs)
return DatetimeIndex._simple_new(
dtarr, tz=dtarr.tz, freq=dtarr.freq, name=name) | def function[date_range, parameter[start, end, periods, freq, tz, normalize, name, closed]]:
constant[
Return a fixed frequency DatetimeIndex.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : integer, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'. See
:ref:`here <timeseries.offset_aliases>` for a list of
frequency aliases.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
See Also
--------
DatetimeIndex : An immutable container for datetimes.
timedelta_range : Return a fixed frequency TimedeltaIndex.
period_range : Return a fixed frequency PeriodIndex.
interval_range : Return a fixed frequency IntervalIndex.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> pd.date_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `start` and `periods`, the number of periods (days).
>>> pd.date_range(start='1/1/2018', periods=8)
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `end` and `periods`, the number of periods (days).
>>> pd.date_range(end='1/1/2018', periods=8)
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> pd.date_range(start='1/1/2018', periods=5, freq='M')
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq='M')
Multiples are allowed
>>> pd.date_range(start='1/1/2018', periods=5, freq='3M')
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
`freq` can also be specified as an Offset object.
>>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
Specify `tz` to set the timezone.
>>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
'2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
'2018-01-05 00:00:00+09:00'],
dtype='datetime64[ns, Asia/Tokyo]', freq='D')
`closed` controls whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None)
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
Use ``closed='left'`` to exclude `end` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left')
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
dtype='datetime64[ns]', freq='D')
Use ``closed='right'`` to exclude `start` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right')
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
]
if <ast.BoolOp object at 0x7da1b26ad0f0> begin[:]
variable[freq] assign[=] constant[D]
variable[dtarr] assign[=] call[name[DatetimeArray]._generate_range, parameter[]]
return[call[name[DatetimeIndex]._simple_new, parameter[name[dtarr]]]] | keyword[def] identifier[date_range] ( identifier[start] = keyword[None] , identifier[end] = keyword[None] , identifier[periods] = keyword[None] , identifier[freq] = keyword[None] , identifier[tz] = keyword[None] ,
identifier[normalize] = keyword[False] , identifier[name] = keyword[None] , identifier[closed] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[freq] keyword[is] keyword[None] keyword[and] identifier[com] . identifier[_any_none] ( identifier[periods] , identifier[start] , identifier[end] ):
identifier[freq] = literal[string]
identifier[dtarr] = identifier[DatetimeArray] . identifier[_generate_range] (
identifier[start] = identifier[start] , identifier[end] = identifier[end] , identifier[periods] = identifier[periods] ,
identifier[freq] = identifier[freq] , identifier[tz] = identifier[tz] , identifier[normalize] = identifier[normalize] ,
identifier[closed] = identifier[closed] ,** identifier[kwargs] )
keyword[return] identifier[DatetimeIndex] . identifier[_simple_new] (
identifier[dtarr] , identifier[tz] = identifier[dtarr] . identifier[tz] , identifier[freq] = identifier[dtarr] . identifier[freq] , identifier[name] = identifier[name] ) | def date_range(start=None, end=None, periods=None, freq=None, tz=None, normalize=False, name=None, closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : integer, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'. See
:ref:`here <timeseries.offset_aliases>` for a list of
frequency aliases.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
See Also
--------
DatetimeIndex : An immutable container for datetimes.
timedelta_range : Return a fixed frequency TimedeltaIndex.
period_range : Return a fixed frequency PeriodIndex.
interval_range : Return a fixed frequency IntervalIndex.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> pd.date_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `start` and `periods`, the number of periods (days).
>>> pd.date_range(start='1/1/2018', periods=8)
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `end` and `periods`, the number of periods (days).
>>> pd.date_range(end='1/1/2018', periods=8)
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> pd.date_range(start='1/1/2018', periods=5, freq='M')
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq='M')
Multiples are allowed
>>> pd.date_range(start='1/1/2018', periods=5, freq='3M')
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
`freq` can also be specified as an Offset object.
>>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
Specify `tz` to set the timezone.
>>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
'2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
'2018-01-05 00:00:00+09:00'],
dtype='datetime64[ns, Asia/Tokyo]', freq='D')
`closed` controls whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None)
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
Use ``closed='left'`` to exclude `end` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left')
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
dtype='datetime64[ns]', freq='D')
Use ``closed='right'`` to exclude `start` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right')
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
"""
if freq is None and com._any_none(periods, start, end):
freq = 'D' # depends on [control=['if'], data=[]]
dtarr = DatetimeArray._generate_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, closed=closed, **kwargs)
return DatetimeIndex._simple_new(dtarr, tz=dtarr.tz, freq=dtarr.freq, name=name) |
def get_grades_by_regid_and_term(regid, term):
"""
Returns a StudentGrades model for the regid and term.
"""
url = "{}/{},{},{}.json".format(enrollment_res_url_prefix,
term.year,
term.quarter,
regid)
return _json_to_grades(get_resource(url), regid, term) | def function[get_grades_by_regid_and_term, parameter[regid, term]]:
constant[
Returns a StudentGrades model for the regid and term.
]
variable[url] assign[=] call[constant[{}/{},{},{}.json].format, parameter[name[enrollment_res_url_prefix], name[term].year, name[term].quarter, name[regid]]]
return[call[name[_json_to_grades], parameter[call[name[get_resource], parameter[name[url]]], name[regid], name[term]]]] | keyword[def] identifier[get_grades_by_regid_and_term] ( identifier[regid] , identifier[term] ):
literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[enrollment_res_url_prefix] ,
identifier[term] . identifier[year] ,
identifier[term] . identifier[quarter] ,
identifier[regid] )
keyword[return] identifier[_json_to_grades] ( identifier[get_resource] ( identifier[url] ), identifier[regid] , identifier[term] ) | def get_grades_by_regid_and_term(regid, term):
"""
Returns a StudentGrades model for the regid and term.
"""
url = '{}/{},{},{}.json'.format(enrollment_res_url_prefix, term.year, term.quarter, regid)
return _json_to_grades(get_resource(url), regid, term) |
def construct_settings(
service_name, client_config, config_override,
retry_names, bundle_descriptors=None, page_descriptors=None,
metrics_headers=(), kwargs=None):
"""Constructs a dictionary mapping method names to _CallSettings.
The ``client_config`` parameter is parsed from a client configuration JSON
file of the form:
.. code-block:: json
{
"interfaces": {
"google.fake.v1.ServiceName": {
"retry_codes": {
"idempotent": ["UNAVAILABLE", "DEADLINE_EXCEEDED"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.2,
"max_retry_delay_millis": 1000,
"initial_rpc_timeout_millis": 2000,
"rpc_timeout_multiplier": 1.5,
"max_rpc_timeout_millis": 30000,
"total_timeout_millis": 45000
}
},
"methods": {
"CreateFoo": {
"retry_codes_name": "idempotent",
"retry_params_name": "default",
"timeout_millis": 30000
},
"Publish": {
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
"bundling": {
"element_count_threshold": 40,
"element_count_limit": 200,
"request_byte_threshold": 90000,
"request_byte_limit": 100000,
"delay_threshold_millis": 100
}
}
}
}
}
}
Args:
service_name (str): The fully-qualified name of this service, used as a
key into the client config file (in the example above, this value
would be ``google.fake.v1.ServiceName``).
client_config (dict): A dictionary parsed from the standard API client
config file.
bundle_descriptors (Mapping[str, BundleDescriptor]): A dictionary of
method names to BundleDescriptor objects for methods that are
bundling-enabled.
page_descriptors (Mapping[str, PageDescriptor]): A dictionary of method
names to PageDescriptor objects for methods that are page
streaming-enabled.
config_override (str): A dictionary in the same structure of
client_config to override the settings. Usually client_config is
supplied from the default config and config_override will be
specified by users.
retry_names (Mapping[str, object]): A dictionary mapping the strings
referring to response status codes to the Python objects representing
those codes.
metrics_headers (Mapping[str, str]): Dictionary of headers to be passed
for analytics. Sent as a dictionary; eventually becomes a
space-separated string (e.g. 'foo/1.0.0 bar/3.14.1').
kwargs (dict): The keyword arguments to be passed to the API calls.
Returns:
dict: A dictionary mapping method names to _CallSettings.
Raises:
KeyError: If the configuration for the service in question cannot be
located in the provided ``client_config``.
"""
# pylint: disable=too-many-locals
# pylint: disable=protected-access
defaults = {}
bundle_descriptors = bundle_descriptors or {}
page_descriptors = page_descriptors or {}
kwargs = kwargs or {}
# Sanity check: It is possible that we got this far but some headers
# were specified with an older library, which sends them as...
# kwargs={'metadata': [('x-goog-api-client', 'foo/1.0 bar/3.0')]}
#
# Note: This is the final format we will send down to GRPC shortly.
#
# Remove any x-goog-api-client header that may have been present
# in the metadata list.
if 'metadata' in kwargs:
kwargs['metadata'] = [value for value in kwargs['metadata']
if value[0].lower() != 'x-goog-api-client']
# Fill out the metrics headers with GAX and GRPC info, and convert
# to a string in the format that the GRPC layer expects.
kwargs.setdefault('metadata', [])
kwargs['metadata'].append(
('x-goog-api-client', metrics.stringify(metrics.fill(metrics_headers)))
)
try:
service_config = client_config['interfaces'][service_name]
except KeyError:
raise KeyError('Client configuration not found for service: {}'
.format(service_name))
overrides = config_override.get('interfaces', {}).get(service_name, {})
for method in service_config.get('methods'):
method_config = service_config['methods'][method]
overriding_method = overrides.get('methods', {}).get(method, {})
snake_name = _upper_camel_to_lower_under(method)
if overriding_method and overriding_method.get('timeout_millis'):
timeout = overriding_method['timeout_millis']
else:
timeout = method_config['timeout_millis']
timeout /= _MILLIS_PER_SECOND
bundle_descriptor = bundle_descriptors.get(snake_name)
bundling_config = method_config.get('bundling', None)
if overriding_method and 'bundling' in overriding_method:
bundling_config = overriding_method['bundling']
bundler = _construct_bundling(bundling_config, bundle_descriptor)
retry_options = _merge_retry_options(
_construct_retry(method_config, service_config['retry_codes'],
service_config['retry_params'], retry_names),
_construct_retry(overriding_method, overrides.get('retry_codes'),
overrides.get('retry_params'), retry_names))
defaults[snake_name] = gax._CallSettings(
timeout=timeout, retry=retry_options,
page_descriptor=page_descriptors.get(snake_name),
bundler=bundler, bundle_descriptor=bundle_descriptor,
kwargs=kwargs)
return defaults | def function[construct_settings, parameter[service_name, client_config, config_override, retry_names, bundle_descriptors, page_descriptors, metrics_headers, kwargs]]:
constant[Constructs a dictionary mapping method names to _CallSettings.
The ``client_config`` parameter is parsed from a client configuration JSON
file of the form:
.. code-block:: json
{
"interfaces": {
"google.fake.v1.ServiceName": {
"retry_codes": {
"idempotent": ["UNAVAILABLE", "DEADLINE_EXCEEDED"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.2,
"max_retry_delay_millis": 1000,
"initial_rpc_timeout_millis": 2000,
"rpc_timeout_multiplier": 1.5,
"max_rpc_timeout_millis": 30000,
"total_timeout_millis": 45000
}
},
"methods": {
"CreateFoo": {
"retry_codes_name": "idempotent",
"retry_params_name": "default",
"timeout_millis": 30000
},
"Publish": {
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
"bundling": {
"element_count_threshold": 40,
"element_count_limit": 200,
"request_byte_threshold": 90000,
"request_byte_limit": 100000,
"delay_threshold_millis": 100
}
}
}
}
}
}
Args:
service_name (str): The fully-qualified name of this service, used as a
key into the client config file (in the example above, this value
would be ``google.fake.v1.ServiceName``).
client_config (dict): A dictionary parsed from the standard API client
config file.
bundle_descriptors (Mapping[str, BundleDescriptor]): A dictionary of
method names to BundleDescriptor objects for methods that are
bundling-enabled.
page_descriptors (Mapping[str, PageDescriptor]): A dictionary of method
names to PageDescriptor objects for methods that are page
streaming-enabled.
config_override (str): A dictionary in the same structure of
client_config to override the settings. Usually client_config is
supplied from the default config and config_override will be
specified by users.
retry_names (Mapping[str, object]): A dictionary mapping the strings
referring to response status codes to the Python objects representing
those codes.
metrics_headers (Mapping[str, str]): Dictionary of headers to be passed
for analytics. Sent as a dictionary; eventually becomes a
space-separated string (e.g. 'foo/1.0.0 bar/3.14.1').
kwargs (dict): The keyword arguments to be passed to the API calls.
Returns:
dict: A dictionary mapping method names to _CallSettings.
Raises:
KeyError: If the configuration for the service in question cannot be
located in the provided ``client_config``.
]
variable[defaults] assign[=] dictionary[[], []]
variable[bundle_descriptors] assign[=] <ast.BoolOp object at 0x7da1b28bc340>
variable[page_descriptors] assign[=] <ast.BoolOp object at 0x7da1b28bc4f0>
variable[kwargs] assign[=] <ast.BoolOp object at 0x7da1b28bf190>
if compare[constant[metadata] in name[kwargs]] begin[:]
call[name[kwargs]][constant[metadata]] assign[=] <ast.ListComp object at 0x7da1b2608310>
call[name[kwargs].setdefault, parameter[constant[metadata], list[[]]]]
call[call[name[kwargs]][constant[metadata]].append, parameter[tuple[[<ast.Constant object at 0x7da1b2866c20>, <ast.Call object at 0x7da1b2864cd0>]]]]
<ast.Try object at 0x7da1b2608dc0>
variable[overrides] assign[=] call[call[name[config_override].get, parameter[constant[interfaces], dictionary[[], []]]].get, parameter[name[service_name], dictionary[[], []]]]
for taget[name[method]] in starred[call[name[service_config].get, parameter[constant[methods]]]] begin[:]
variable[method_config] assign[=] call[call[name[service_config]][constant[methods]]][name[method]]
variable[overriding_method] assign[=] call[call[name[overrides].get, parameter[constant[methods], dictionary[[], []]]].get, parameter[name[method], dictionary[[], []]]]
variable[snake_name] assign[=] call[name[_upper_camel_to_lower_under], parameter[name[method]]]
if <ast.BoolOp object at 0x7da1b260b160> begin[:]
variable[timeout] assign[=] call[name[overriding_method]][constant[timeout_millis]]
<ast.AugAssign object at 0x7da1b2609810>
variable[bundle_descriptor] assign[=] call[name[bundle_descriptors].get, parameter[name[snake_name]]]
variable[bundling_config] assign[=] call[name[method_config].get, parameter[constant[bundling], constant[None]]]
if <ast.BoolOp object at 0x7da1b2609960> begin[:]
variable[bundling_config] assign[=] call[name[overriding_method]][constant[bundling]]
variable[bundler] assign[=] call[name[_construct_bundling], parameter[name[bundling_config], name[bundle_descriptor]]]
variable[retry_options] assign[=] call[name[_merge_retry_options], parameter[call[name[_construct_retry], parameter[name[method_config], call[name[service_config]][constant[retry_codes]], call[name[service_config]][constant[retry_params]], name[retry_names]]], call[name[_construct_retry], parameter[name[overriding_method], call[name[overrides].get, parameter[constant[retry_codes]]], call[name[overrides].get, parameter[constant[retry_params]]], name[retry_names]]]]]
call[name[defaults]][name[snake_name]] assign[=] call[name[gax]._CallSettings, parameter[]]
return[name[defaults]] | keyword[def] identifier[construct_settings] (
identifier[service_name] , identifier[client_config] , identifier[config_override] ,
identifier[retry_names] , identifier[bundle_descriptors] = keyword[None] , identifier[page_descriptors] = keyword[None] ,
identifier[metrics_headers] =(), identifier[kwargs] = keyword[None] ):
literal[string]
identifier[defaults] ={}
identifier[bundle_descriptors] = identifier[bundle_descriptors] keyword[or] {}
identifier[page_descriptors] = identifier[page_descriptors] keyword[or] {}
identifier[kwargs] = identifier[kwargs] keyword[or] {}
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]=[ identifier[value] keyword[for] identifier[value] keyword[in] identifier[kwargs] [ literal[string] ]
keyword[if] identifier[value] [ literal[int] ]. identifier[lower] ()!= literal[string] ]
identifier[kwargs] . identifier[setdefault] ( literal[string] ,[])
identifier[kwargs] [ literal[string] ]. identifier[append] (
( literal[string] , identifier[metrics] . identifier[stringify] ( identifier[metrics] . identifier[fill] ( identifier[metrics_headers] )))
)
keyword[try] :
identifier[service_config] = identifier[client_config] [ literal[string] ][ identifier[service_name] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[KeyError] ( literal[string]
. identifier[format] ( identifier[service_name] ))
identifier[overrides] = identifier[config_override] . identifier[get] ( literal[string] ,{}). identifier[get] ( identifier[service_name] ,{})
keyword[for] identifier[method] keyword[in] identifier[service_config] . identifier[get] ( literal[string] ):
identifier[method_config] = identifier[service_config] [ literal[string] ][ identifier[method] ]
identifier[overriding_method] = identifier[overrides] . identifier[get] ( literal[string] ,{}). identifier[get] ( identifier[method] ,{})
identifier[snake_name] = identifier[_upper_camel_to_lower_under] ( identifier[method] )
keyword[if] identifier[overriding_method] keyword[and] identifier[overriding_method] . identifier[get] ( literal[string] ):
identifier[timeout] = identifier[overriding_method] [ literal[string] ]
keyword[else] :
identifier[timeout] = identifier[method_config] [ literal[string] ]
identifier[timeout] /= identifier[_MILLIS_PER_SECOND]
identifier[bundle_descriptor] = identifier[bundle_descriptors] . identifier[get] ( identifier[snake_name] )
identifier[bundling_config] = identifier[method_config] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[overriding_method] keyword[and] literal[string] keyword[in] identifier[overriding_method] :
identifier[bundling_config] = identifier[overriding_method] [ literal[string] ]
identifier[bundler] = identifier[_construct_bundling] ( identifier[bundling_config] , identifier[bundle_descriptor] )
identifier[retry_options] = identifier[_merge_retry_options] (
identifier[_construct_retry] ( identifier[method_config] , identifier[service_config] [ literal[string] ],
identifier[service_config] [ literal[string] ], identifier[retry_names] ),
identifier[_construct_retry] ( identifier[overriding_method] , identifier[overrides] . identifier[get] ( literal[string] ),
identifier[overrides] . identifier[get] ( literal[string] ), identifier[retry_names] ))
identifier[defaults] [ identifier[snake_name] ]= identifier[gax] . identifier[_CallSettings] (
identifier[timeout] = identifier[timeout] , identifier[retry] = identifier[retry_options] ,
identifier[page_descriptor] = identifier[page_descriptors] . identifier[get] ( identifier[snake_name] ),
identifier[bundler] = identifier[bundler] , identifier[bundle_descriptor] = identifier[bundle_descriptor] ,
identifier[kwargs] = identifier[kwargs] )
keyword[return] identifier[defaults] | def construct_settings(service_name, client_config, config_override, retry_names, bundle_descriptors=None, page_descriptors=None, metrics_headers=(), kwargs=None):
"""Constructs a dictionary mapping method names to _CallSettings.
The ``client_config`` parameter is parsed from a client configuration JSON
file of the form:
.. code-block:: json
{
"interfaces": {
"google.fake.v1.ServiceName": {
"retry_codes": {
"idempotent": ["UNAVAILABLE", "DEADLINE_EXCEEDED"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.2,
"max_retry_delay_millis": 1000,
"initial_rpc_timeout_millis": 2000,
"rpc_timeout_multiplier": 1.5,
"max_rpc_timeout_millis": 30000,
"total_timeout_millis": 45000
}
},
"methods": {
"CreateFoo": {
"retry_codes_name": "idempotent",
"retry_params_name": "default",
"timeout_millis": 30000
},
"Publish": {
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
"bundling": {
"element_count_threshold": 40,
"element_count_limit": 200,
"request_byte_threshold": 90000,
"request_byte_limit": 100000,
"delay_threshold_millis": 100
}
}
}
}
}
}
Args:
service_name (str): The fully-qualified name of this service, used as a
key into the client config file (in the example above, this value
would be ``google.fake.v1.ServiceName``).
client_config (dict): A dictionary parsed from the standard API client
config file.
bundle_descriptors (Mapping[str, BundleDescriptor]): A dictionary of
method names to BundleDescriptor objects for methods that are
bundling-enabled.
page_descriptors (Mapping[str, PageDescriptor]): A dictionary of method
names to PageDescriptor objects for methods that are page
streaming-enabled.
config_override (str): A dictionary in the same structure of
client_config to override the settings. Usually client_config is
supplied from the default config and config_override will be
specified by users.
retry_names (Mapping[str, object]): A dictionary mapping the strings
referring to response status codes to the Python objects representing
those codes.
metrics_headers (Mapping[str, str]): Dictionary of headers to be passed
for analytics. Sent as a dictionary; eventually becomes a
space-separated string (e.g. 'foo/1.0.0 bar/3.14.1').
kwargs (dict): The keyword arguments to be passed to the API calls.
Returns:
dict: A dictionary mapping method names to _CallSettings.
Raises:
KeyError: If the configuration for the service in question cannot be
located in the provided ``client_config``.
"""
# pylint: disable=too-many-locals
# pylint: disable=protected-access
defaults = {}
bundle_descriptors = bundle_descriptors or {}
page_descriptors = page_descriptors or {}
kwargs = kwargs or {}
# Sanity check: It is possible that we got this far but some headers
# were specified with an older library, which sends them as...
# kwargs={'metadata': [('x-goog-api-client', 'foo/1.0 bar/3.0')]}
#
# Note: This is the final format we will send down to GRPC shortly.
#
# Remove any x-goog-api-client header that may have been present
# in the metadata list.
if 'metadata' in kwargs:
kwargs['metadata'] = [value for value in kwargs['metadata'] if value[0].lower() != 'x-goog-api-client'] # depends on [control=['if'], data=['kwargs']]
# Fill out the metrics headers with GAX and GRPC info, and convert
# to a string in the format that the GRPC layer expects.
kwargs.setdefault('metadata', [])
kwargs['metadata'].append(('x-goog-api-client', metrics.stringify(metrics.fill(metrics_headers))))
try:
service_config = client_config['interfaces'][service_name] # depends on [control=['try'], data=[]]
except KeyError:
raise KeyError('Client configuration not found for service: {}'.format(service_name)) # depends on [control=['except'], data=[]]
overrides = config_override.get('interfaces', {}).get(service_name, {})
for method in service_config.get('methods'):
method_config = service_config['methods'][method]
overriding_method = overrides.get('methods', {}).get(method, {})
snake_name = _upper_camel_to_lower_under(method)
if overriding_method and overriding_method.get('timeout_millis'):
timeout = overriding_method['timeout_millis'] # depends on [control=['if'], data=[]]
else:
timeout = method_config['timeout_millis']
timeout /= _MILLIS_PER_SECOND
bundle_descriptor = bundle_descriptors.get(snake_name)
bundling_config = method_config.get('bundling', None)
if overriding_method and 'bundling' in overriding_method:
bundling_config = overriding_method['bundling'] # depends on [control=['if'], data=[]]
bundler = _construct_bundling(bundling_config, bundle_descriptor)
retry_options = _merge_retry_options(_construct_retry(method_config, service_config['retry_codes'], service_config['retry_params'], retry_names), _construct_retry(overriding_method, overrides.get('retry_codes'), overrides.get('retry_params'), retry_names))
defaults[snake_name] = gax._CallSettings(timeout=timeout, retry=retry_options, page_descriptor=page_descriptors.get(snake_name), bundler=bundler, bundle_descriptor=bundle_descriptor, kwargs=kwargs) # depends on [control=['for'], data=['method']]
return defaults |
def check_target_api(api, arch):
"""Warn if the user's target API is less than the current minimum
recommendation
"""
if api >= ARMEABI_MAX_TARGET_API and arch == 'armeabi':
raise BuildInterruptingException(
'Asked to build for armeabi architecture with API '
'{}, but API {} or greater does not support armeabi'.format(
api, ARMEABI_MAX_TARGET_API),
instructions='You probably want to build with --arch=armeabi-v7a instead')
if api < MIN_TARGET_API:
warning('Target API {} < {}'.format(api, MIN_TARGET_API))
warning(OLD_API_MESSAGE) | def function[check_target_api, parameter[api, arch]]:
constant[Warn if the user's target API is less than the current minimum
recommendation
]
if <ast.BoolOp object at 0x7da18f09c130> begin[:]
<ast.Raise object at 0x7da1b1c485e0>
if compare[name[api] less[<] name[MIN_TARGET_API]] begin[:]
call[name[warning], parameter[call[constant[Target API {} < {}].format, parameter[name[api], name[MIN_TARGET_API]]]]]
call[name[warning], parameter[name[OLD_API_MESSAGE]]] | keyword[def] identifier[check_target_api] ( identifier[api] , identifier[arch] ):
literal[string]
keyword[if] identifier[api] >= identifier[ARMEABI_MAX_TARGET_API] keyword[and] identifier[arch] == literal[string] :
keyword[raise] identifier[BuildInterruptingException] (
literal[string]
literal[string] . identifier[format] (
identifier[api] , identifier[ARMEABI_MAX_TARGET_API] ),
identifier[instructions] = literal[string] )
keyword[if] identifier[api] < identifier[MIN_TARGET_API] :
identifier[warning] ( literal[string] . identifier[format] ( identifier[api] , identifier[MIN_TARGET_API] ))
identifier[warning] ( identifier[OLD_API_MESSAGE] ) | def check_target_api(api, arch):
"""Warn if the user's target API is less than the current minimum
recommendation
"""
if api >= ARMEABI_MAX_TARGET_API and arch == 'armeabi':
raise BuildInterruptingException('Asked to build for armeabi architecture with API {}, but API {} or greater does not support armeabi'.format(api, ARMEABI_MAX_TARGET_API), instructions='You probably want to build with --arch=armeabi-v7a instead') # depends on [control=['if'], data=[]]
if api < MIN_TARGET_API:
warning('Target API {} < {}'.format(api, MIN_TARGET_API))
warning(OLD_API_MESSAGE) # depends on [control=['if'], data=['api', 'MIN_TARGET_API']] |
def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
"""
for key, val in list(indict.items()):
if (key in self and isinstance(self[key], dict) and
isinstance(val, dict)):
self[key].merge(val)
else:
self[key] = val | def function[merge, parameter[self, indict]]:
constant[
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
]
for taget[tuple[[<ast.Name object at 0x7da1b0f507f0>, <ast.Name object at 0x7da1b0f500a0>]]] in starred[call[name[list], parameter[call[name[indict].items, parameter[]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b0f50df0> begin[:]
call[call[name[self]][name[key]].merge, parameter[name[val]]] | keyword[def] identifier[merge] ( identifier[self] , identifier[indict] ):
literal[string]
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[list] ( identifier[indict] . identifier[items] ()):
keyword[if] ( identifier[key] keyword[in] identifier[self] keyword[and] identifier[isinstance] ( identifier[self] [ identifier[key] ], identifier[dict] ) keyword[and]
identifier[isinstance] ( identifier[val] , identifier[dict] )):
identifier[self] [ identifier[key] ]. identifier[merge] ( identifier[val] )
keyword[else] :
identifier[self] [ identifier[key] ]= identifier[val] | def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
"""
for (key, val) in list(indict.items()):
if key in self and isinstance(self[key], dict) and isinstance(val, dict):
self[key].merge(val) # depends on [control=['if'], data=[]]
else:
self[key] = val # depends on [control=['for'], data=[]] |
def rotate(self):
"""Rotate the files to disk.
This is done by calling `store.close()` on each store, bumping the
batchno and reopening the stores using their factories.
"""
self._logger.info('Rotating data files. New batch number will be: %s',
self.batchno + 1)
self.estore.close()
self.estore = None
self.batchno += 1
self.estore = self._open_event_store() | def function[rotate, parameter[self]]:
constant[Rotate the files to disk.
This is done by calling `store.close()` on each store, bumping the
batchno and reopening the stores using their factories.
]
call[name[self]._logger.info, parameter[constant[Rotating data files. New batch number will be: %s], binary_operation[name[self].batchno + constant[1]]]]
call[name[self].estore.close, parameter[]]
name[self].estore assign[=] constant[None]
<ast.AugAssign object at 0x7da1b2487910>
name[self].estore assign[=] call[name[self]._open_event_store, parameter[]] | keyword[def] identifier[rotate] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] ,
identifier[self] . identifier[batchno] + literal[int] )
identifier[self] . identifier[estore] . identifier[close] ()
identifier[self] . identifier[estore] = keyword[None]
identifier[self] . identifier[batchno] += literal[int]
identifier[self] . identifier[estore] = identifier[self] . identifier[_open_event_store] () | def rotate(self):
"""Rotate the files to disk.
This is done by calling `store.close()` on each store, bumping the
batchno and reopening the stores using their factories.
"""
self._logger.info('Rotating data files. New batch number will be: %s', self.batchno + 1)
self.estore.close()
self.estore = None
self.batchno += 1
self.estore = self._open_event_store() |
def timing(name=''):
"""Log start, end, and duration."""
start = datetime.datetime.now()
timestamp = start.strftime('%H:%M')
tf.logging.info('Starting job [%s] at %s', name, timestamp)
yield
end = datetime.datetime.now()
timestamp = end.strftime('%H:%M')
tf.logging.info('Finished job [%s] at %s', name, timestamp)
duration = end - start
duration_mins = duration.total_seconds() / 60
tf.logging.info('Total time [%s] (m): %d', name, int(duration_mins)) | def function[timing, parameter[name]]:
constant[Log start, end, and duration.]
variable[start] assign[=] call[name[datetime].datetime.now, parameter[]]
variable[timestamp] assign[=] call[name[start].strftime, parameter[constant[%H:%M]]]
call[name[tf].logging.info, parameter[constant[Starting job [%s] at %s], name[name], name[timestamp]]]
<ast.Yield object at 0x7da1b1e124a0>
variable[end] assign[=] call[name[datetime].datetime.now, parameter[]]
variable[timestamp] assign[=] call[name[end].strftime, parameter[constant[%H:%M]]]
call[name[tf].logging.info, parameter[constant[Finished job [%s] at %s], name[name], name[timestamp]]]
variable[duration] assign[=] binary_operation[name[end] - name[start]]
variable[duration_mins] assign[=] binary_operation[call[name[duration].total_seconds, parameter[]] / constant[60]]
call[name[tf].logging.info, parameter[constant[Total time [%s] (m): %d], name[name], call[name[int], parameter[name[duration_mins]]]]] | keyword[def] identifier[timing] ( identifier[name] = literal[string] ):
literal[string]
identifier[start] = identifier[datetime] . identifier[datetime] . identifier[now] ()
identifier[timestamp] = identifier[start] . identifier[strftime] ( literal[string] )
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] , identifier[name] , identifier[timestamp] )
keyword[yield]
identifier[end] = identifier[datetime] . identifier[datetime] . identifier[now] ()
identifier[timestamp] = identifier[end] . identifier[strftime] ( literal[string] )
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] , identifier[name] , identifier[timestamp] )
identifier[duration] = identifier[end] - identifier[start]
identifier[duration_mins] = identifier[duration] . identifier[total_seconds] ()/ literal[int]
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] , identifier[name] , identifier[int] ( identifier[duration_mins] )) | def timing(name=''):
"""Log start, end, and duration."""
start = datetime.datetime.now()
timestamp = start.strftime('%H:%M')
tf.logging.info('Starting job [%s] at %s', name, timestamp)
yield
end = datetime.datetime.now()
timestamp = end.strftime('%H:%M')
tf.logging.info('Finished job [%s] at %s', name, timestamp)
duration = end - start
duration_mins = duration.total_seconds() / 60
tf.logging.info('Total time [%s] (m): %d', name, int(duration_mins)) |
def queueTypeUpgrade(self, oldtype):
"""
Queue a type upgrade for C{oldtype}.
"""
if oldtype not in self._oldTypesRemaining:
self._oldTypesRemaining.append(oldtype) | def function[queueTypeUpgrade, parameter[self, oldtype]]:
constant[
Queue a type upgrade for C{oldtype}.
]
if compare[name[oldtype] <ast.NotIn object at 0x7da2590d7190> name[self]._oldTypesRemaining] begin[:]
call[name[self]._oldTypesRemaining.append, parameter[name[oldtype]]] | keyword[def] identifier[queueTypeUpgrade] ( identifier[self] , identifier[oldtype] ):
literal[string]
keyword[if] identifier[oldtype] keyword[not] keyword[in] identifier[self] . identifier[_oldTypesRemaining] :
identifier[self] . identifier[_oldTypesRemaining] . identifier[append] ( identifier[oldtype] ) | def queueTypeUpgrade(self, oldtype):
"""
Queue a type upgrade for C{oldtype}.
"""
if oldtype not in self._oldTypesRemaining:
self._oldTypesRemaining.append(oldtype) # depends on [control=['if'], data=['oldtype']] |
def matches(self, path):
"""Tests if the given path matches the pattern.
Note that the unicode translation of the patch is matched, so
replacement characters might have been added.
"""
path = self._prepare_path(path)
return self.full_regex.search(path) is not None | def function[matches, parameter[self, path]]:
constant[Tests if the given path matches the pattern.
Note that the unicode translation of the patch is matched, so
replacement characters might have been added.
]
variable[path] assign[=] call[name[self]._prepare_path, parameter[name[path]]]
return[compare[call[name[self].full_regex.search, parameter[name[path]]] is_not constant[None]]] | keyword[def] identifier[matches] ( identifier[self] , identifier[path] ):
literal[string]
identifier[path] = identifier[self] . identifier[_prepare_path] ( identifier[path] )
keyword[return] identifier[self] . identifier[full_regex] . identifier[search] ( identifier[path] ) keyword[is] keyword[not] keyword[None] | def matches(self, path):
"""Tests if the given path matches the pattern.
Note that the unicode translation of the patch is matched, so
replacement characters might have been added.
"""
path = self._prepare_path(path)
return self.full_regex.search(path) is not None |
def check_input(input):
"""
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: (list of JTensor, isTable)
"""
def to_jtensor(i):
if isinstance(i, np.ndarray):
return JTensor.from_ndarray(i)
elif isinstance(i, JTensor):
return i
else:
raise Exception("Error unknown input type %s" % type(i))
if type(input) is list:
if len(input) == 0:
raise Exception('Error when checking: empty input')
return list(map(lambda i: to_jtensor(i), input)), True
else:
return [to_jtensor(input)], False | def function[check_input, parameter[input]]:
constant[
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: (list of JTensor, isTable)
]
def function[to_jtensor, parameter[i]]:
if call[name[isinstance], parameter[name[i], name[np].ndarray]] begin[:]
return[call[name[JTensor].from_ndarray, parameter[name[i]]]]
if compare[call[name[type], parameter[name[input]]] is name[list]] begin[:]
if compare[call[name[len], parameter[name[input]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da20e963910>
return[tuple[[<ast.Call object at 0x7da20e961420>, <ast.Constant object at 0x7da20e961f60>]]] | keyword[def] identifier[check_input] ( identifier[input] ):
literal[string]
keyword[def] identifier[to_jtensor] ( identifier[i] ):
keyword[if] identifier[isinstance] ( identifier[i] , identifier[np] . identifier[ndarray] ):
keyword[return] identifier[JTensor] . identifier[from_ndarray] ( identifier[i] )
keyword[elif] identifier[isinstance] ( identifier[i] , identifier[JTensor] ):
keyword[return] identifier[i]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[type] ( identifier[i] ))
keyword[if] identifier[type] ( identifier[input] ) keyword[is] identifier[list] :
keyword[if] identifier[len] ( identifier[input] )== literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] identifier[list] ( identifier[map] ( keyword[lambda] identifier[i] : identifier[to_jtensor] ( identifier[i] ), identifier[input] )), keyword[True]
keyword[else] :
keyword[return] [ identifier[to_jtensor] ( identifier[input] )], keyword[False] | def check_input(input):
"""
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: (list of JTensor, isTable)
"""
def to_jtensor(i):
if isinstance(i, np.ndarray):
return JTensor.from_ndarray(i) # depends on [control=['if'], data=[]]
elif isinstance(i, JTensor):
return i # depends on [control=['if'], data=[]]
else:
raise Exception('Error unknown input type %s' % type(i))
if type(input) is list:
if len(input) == 0:
raise Exception('Error when checking: empty input') # depends on [control=['if'], data=[]]
return (list(map(lambda i: to_jtensor(i), input)), True) # depends on [control=['if'], data=['list']]
else:
return ([to_jtensor(input)], False) |
def getComment(self, repo_user, repo_name, comment_id):
"""
GET /repos/:owner/:repo/pull/comments/:number
:param comment_id: The review comment's ID.
"""
return self.api.makeRequest(
['repos', repo_user, repo_name,
'pulls', 'comments', str(comment_id)]) | def function[getComment, parameter[self, repo_user, repo_name, comment_id]]:
constant[
GET /repos/:owner/:repo/pull/comments/:number
:param comment_id: The review comment's ID.
]
return[call[name[self].api.makeRequest, parameter[list[[<ast.Constant object at 0x7da2041d8ee0>, <ast.Name object at 0x7da2041d85b0>, <ast.Name object at 0x7da2041db520>, <ast.Constant object at 0x7da2041db850>, <ast.Constant object at 0x7da2041db0d0>, <ast.Call object at 0x7da2041d8100>]]]]] | keyword[def] identifier[getComment] ( identifier[self] , identifier[repo_user] , identifier[repo_name] , identifier[comment_id] ):
literal[string]
keyword[return] identifier[self] . identifier[api] . identifier[makeRequest] (
[ literal[string] , identifier[repo_user] , identifier[repo_name] ,
literal[string] , literal[string] , identifier[str] ( identifier[comment_id] )]) | def getComment(self, repo_user, repo_name, comment_id):
"""
GET /repos/:owner/:repo/pull/comments/:number
:param comment_id: The review comment's ID.
"""
return self.api.makeRequest(['repos', repo_user, repo_name, 'pulls', 'comments', str(comment_id)]) |
def reset(self):
"""
Clear all cell and segment activity.
"""
super(ApicalTiebreakSequenceMemory, self).reset()
self.prevApicalInput = np.empty(0, dtype="uint32")
self.prevApicalGrowthCandidates = np.empty(0, dtype="uint32")
self.prevPredictedCells = np.empty(0, dtype="uint32") | def function[reset, parameter[self]]:
constant[
Clear all cell and segment activity.
]
call[call[name[super], parameter[name[ApicalTiebreakSequenceMemory], name[self]]].reset, parameter[]]
name[self].prevApicalInput assign[=] call[name[np].empty, parameter[constant[0]]]
name[self].prevApicalGrowthCandidates assign[=] call[name[np].empty, parameter[constant[0]]]
name[self].prevPredictedCells assign[=] call[name[np].empty, parameter[constant[0]]] | keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
identifier[super] ( identifier[ApicalTiebreakSequenceMemory] , identifier[self] ). identifier[reset] ()
identifier[self] . identifier[prevApicalInput] = identifier[np] . identifier[empty] ( literal[int] , identifier[dtype] = literal[string] )
identifier[self] . identifier[prevApicalGrowthCandidates] = identifier[np] . identifier[empty] ( literal[int] , identifier[dtype] = literal[string] )
identifier[self] . identifier[prevPredictedCells] = identifier[np] . identifier[empty] ( literal[int] , identifier[dtype] = literal[string] ) | def reset(self):
"""
Clear all cell and segment activity.
"""
super(ApicalTiebreakSequenceMemory, self).reset()
self.prevApicalInput = np.empty(0, dtype='uint32')
self.prevApicalGrowthCandidates = np.empty(0, dtype='uint32')
self.prevPredictedCells = np.empty(0, dtype='uint32') |
def sed(path,
before,
after,
limit='',
backup='.bak',
options='-r -e',
flags='g',
escape_all=False,
negate_match=False):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Make a simple edit to a file
Equivalent to:
.. code-block:: bash
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
path
The full path to the file to be edited
before
A pattern to find in order to replace with ``after``
after
Text that will replace ``before``
limit : ``''``
An initial pattern to search for before searching for ``before``
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
options : ``-r -e``
Options to pass to sed
flags : ``g``
Flags to modify the sed search; e.g., ``i`` for case-insensitive pattern
matching
negate_match : False
Negate the search command (``!``)
.. versionadded:: 0.17.0
Forward slashes and single quotes will be escaped automatically in the
``before`` and ``after`` patterns.
CLI Example:
.. code-block:: bash
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
'''
# Largely inspired by Fabric's contrib.files.sed()
# XXX:dc: Do we really want to always force escaping?
#
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
# Mandate that before and after are strings
before = six.text_type(before)
after = six.text_type(after)
before = _sed_esc(before, escape_all)
after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.append('-i{0}'.format(backup) if backup else '-i')
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}{negate_match}s/{before}/{after}/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
negate_match='!' if negate_match else '',
before=before,
after=after,
flags=flags
)
)
cmd.append(path)
return __salt__['cmd.run_all'](cmd, python_shell=False) | def function[sed, parameter[path, before, after, limit, backup, options, flags, escape_all, negate_match]]:
constant[
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Make a simple edit to a file
Equivalent to:
.. code-block:: bash
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
path
The full path to the file to be edited
before
A pattern to find in order to replace with ``after``
after
Text that will replace ``before``
limit : ``''``
An initial pattern to search for before searching for ``before``
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
options : ``-r -e``
Options to pass to sed
flags : ``g``
Flags to modify the sed search; e.g., ``i`` for case-insensitive pattern
matching
negate_match : False
Negate the search command (``!``)
.. versionadded:: 0.17.0
Forward slashes and single quotes will be escaped automatically in the
``before`` and ``after`` patterns.
CLI Example:
.. code-block:: bash
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
]
variable[path] assign[=] call[name[os].path.expanduser, parameter[name[path]]]
if <ast.UnaryOp object at 0x7da2054a7430> begin[:]
return[constant[False]]
variable[before] assign[=] call[name[six].text_type, parameter[name[before]]]
variable[after] assign[=] call[name[six].text_type, parameter[name[after]]]
variable[before] assign[=] call[name[_sed_esc], parameter[name[before], name[escape_all]]]
variable[after] assign[=] call[name[_sed_esc], parameter[name[after], name[escape_all]]]
variable[limit] assign[=] call[name[_sed_esc], parameter[name[limit], name[escape_all]]]
if compare[name[sys].platform equal[==] constant[darwin]] begin[:]
variable[options] assign[=] call[name[options].replace, parameter[constant[-r], constant[-E]]]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b20413c0>]]
call[name[cmd].append, parameter[<ast.IfExp object at 0x7da1b2042020>]]
call[name[cmd].extend, parameter[call[name[salt].utils.args.shlex_split, parameter[name[options]]]]]
call[name[cmd].append, parameter[call[constant[{limit}{negate_match}s/{before}/{after}/{flags}].format, parameter[]]]]
call[name[cmd].append, parameter[name[path]]]
return[call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]] | keyword[def] identifier[sed] ( identifier[path] ,
identifier[before] ,
identifier[after] ,
identifier[limit] = literal[string] ,
identifier[backup] = literal[string] ,
identifier[options] = literal[string] ,
identifier[flags] = literal[string] ,
identifier[escape_all] = keyword[False] ,
identifier[negate_match] = keyword[False] ):
literal[string]
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[return] keyword[False]
identifier[before] = identifier[six] . identifier[text_type] ( identifier[before] )
identifier[after] = identifier[six] . identifier[text_type] ( identifier[after] )
identifier[before] = identifier[_sed_esc] ( identifier[before] , identifier[escape_all] )
identifier[after] = identifier[_sed_esc] ( identifier[after] , identifier[escape_all] )
identifier[limit] = identifier[_sed_esc] ( identifier[limit] , identifier[escape_all] )
keyword[if] identifier[sys] . identifier[platform] == literal[string] :
identifier[options] = identifier[options] . identifier[replace] ( literal[string] , literal[string] )
identifier[cmd] =[ literal[string] ]
identifier[cmd] . identifier[append] ( literal[string] . identifier[format] ( identifier[backup] ) keyword[if] identifier[backup] keyword[else] literal[string] )
identifier[cmd] . identifier[extend] ( identifier[salt] . identifier[utils] . identifier[args] . identifier[shlex_split] ( identifier[options] ))
identifier[cmd] . identifier[append] (
literal[string] . identifier[format] (
identifier[limit] = literal[string] . identifier[format] ( identifier[limit] ) keyword[if] identifier[limit] keyword[else] literal[string] ,
identifier[negate_match] = literal[string] keyword[if] identifier[negate_match] keyword[else] literal[string] ,
identifier[before] = identifier[before] ,
identifier[after] = identifier[after] ,
identifier[flags] = identifier[flags]
)
)
identifier[cmd] . identifier[append] ( identifier[path] )
keyword[return] identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[python_shell] = keyword[False] ) | def sed(path, before, after, limit='', backup='.bak', options='-r -e', flags='g', escape_all=False, negate_match=False):
"""
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Make a simple edit to a file
Equivalent to:
.. code-block:: bash
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
path
The full path to the file to be edited
before
A pattern to find in order to replace with ``after``
after
Text that will replace ``before``
limit : ``''``
An initial pattern to search for before searching for ``before``
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
options : ``-r -e``
Options to pass to sed
flags : ``g``
Flags to modify the sed search; e.g., ``i`` for case-insensitive pattern
matching
negate_match : False
Negate the search command (``!``)
.. versionadded:: 0.17.0
Forward slashes and single quotes will be escaped automatically in the
``before`` and ``after`` patterns.
CLI Example:
.. code-block:: bash
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
"""
# Largely inspired by Fabric's contrib.files.sed()
# XXX:dc: Do we really want to always force escaping?
#
path = os.path.expanduser(path)
if not os.path.exists(path):
return False # depends on [control=['if'], data=[]]
# Mandate that before and after are strings
before = six.text_type(before)
after = six.text_type(after)
before = _sed_esc(before, escape_all)
after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
if sys.platform == 'darwin':
options = options.replace('-r', '-E') # depends on [control=['if'], data=[]]
cmd = ['sed']
cmd.append('-i{0}'.format(backup) if backup else '-i')
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append('{limit}{negate_match}s/{before}/{after}/{flags}'.format(limit='/{0}/ '.format(limit) if limit else '', negate_match='!' if negate_match else '', before=before, after=after, flags=flags))
cmd.append(path)
return __salt__['cmd.run_all'](cmd, python_shell=False) |
def get_draft(self, layer_id, expand=[]):
"""
Get the current draft version of a layer.
:raises NotFound: if there is no draft version.
"""
target_url = self.client.get_url('VERSION', 'GET', 'draft', {'layer_id': layer_id})
return self._get(target_url, expand=expand) | def function[get_draft, parameter[self, layer_id, expand]]:
constant[
Get the current draft version of a layer.
:raises NotFound: if there is no draft version.
]
variable[target_url] assign[=] call[name[self].client.get_url, parameter[constant[VERSION], constant[GET], constant[draft], dictionary[[<ast.Constant object at 0x7da1b1046140>], [<ast.Name object at 0x7da1b1047910>]]]]
return[call[name[self]._get, parameter[name[target_url]]]] | keyword[def] identifier[get_draft] ( identifier[self] , identifier[layer_id] , identifier[expand] =[]):
literal[string]
identifier[target_url] = identifier[self] . identifier[client] . identifier[get_url] ( literal[string] , literal[string] , literal[string] ,{ literal[string] : identifier[layer_id] })
keyword[return] identifier[self] . identifier[_get] ( identifier[target_url] , identifier[expand] = identifier[expand] ) | def get_draft(self, layer_id, expand=[]):
"""
Get the current draft version of a layer.
:raises NotFound: if there is no draft version.
"""
target_url = self.client.get_url('VERSION', 'GET', 'draft', {'layer_id': layer_id})
return self._get(target_url, expand=expand) |
def Forster_Zuber(rhol, rhog, mul, kl, Cpl, Hvap, sigma, dPsat, Te=None, q=None):
r'''Calculates heat transfer coefficient for a evaporator operating
in the nucleate boiling regime according to [2]_ as presented in [1]_.
Either heat flux or excess temperature is required.
With `Te` specified:
.. math::
h = 0.00122\left(\frac{k_L^{0.79} C_{p,l}^{0.45}\rho_L^{0.49}}
{\sigma^{0.5}\mu_L^{0.29} H_{vap}^{0.24} \rho_V^{0.24}}\right)
\Delta T_e^{0.24} \Delta P_{sat}^{0.75}
With `q` specified:
.. math::
h = \left[0.00122\left(\frac{k_L^{0.79} C_{p,l}^{0.45}\rho_L^{0.49}}
{\sigma^{0.5}\mu_L^{0.29} H_{vap}^{0.24} \rho_V^{0.24}}\right) \Delta
P_{sat}^{0.75} q^{0.24}\right]^{\frac{1}{1.24}}
Parameters
----------
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the produced gas [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
sigma : float
Surface tension of liquid [N/m]
dPsat : float
Difference in saturation pressure of the fluid at Te and T, [Pa]
Te : float, optional
Excess wall temperature, [K]
q : float, optional
Heat flux, [W/m^2]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Examples have been found in [1]_ and [3]_ and match exactly.
Examples
--------
Water boiling, with excess temperature of 4.3K from [1]_.
>>> Forster_Zuber(Te=4.3, dPsat=3906*4.3, Cpl=4180., kl=0.688,
... mul=0.275E-3, sigma=0.0588, Hvap=2.25E6, rhol=958., rhog=0.597)
3519.9239897462644
References
----------
.. [1] Cao, Eduardo. Heat Transfer in Process Engineering.
McGraw Hill Professional, 2009.
.. [2] Forster, H. K., and N. Zuber. "Dynamics of Vapor Bubbles and Boiling
Heat Transfer." AIChE Journal 1, no. 4 (December 1, 1955): 531-35.
doi:10.1002/aic.690010425.
.. [3] Serth, R. W., Process Heat Transfer: Principles,
Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014.
'''
if Te:
return 0.00122*(kl**0.79*Cpl**0.45*rhol**0.49/sigma**0.5/mul**0.29/Hvap**0.24/rhog**0.24)*Te**0.24*dPsat**0.75
elif q:
return (0.00122*(kl**0.79*Cpl**0.45*rhol**0.49/sigma**0.5/mul**0.29/Hvap**0.24/rhog**0.24)*q**0.24*dPsat**0.75)**(1/1.24)
else:
raise Exception('Either q or Te is needed for this correlation') | def function[Forster_Zuber, parameter[rhol, rhog, mul, kl, Cpl, Hvap, sigma, dPsat, Te, q]]:
constant[Calculates heat transfer coefficient for a evaporator operating
in the nucleate boiling regime according to [2]_ as presented in [1]_.
Either heat flux or excess temperature is required.
With `Te` specified:
.. math::
h = 0.00122\left(\frac{k_L^{0.79} C_{p,l}^{0.45}\rho_L^{0.49}}
{\sigma^{0.5}\mu_L^{0.29} H_{vap}^{0.24} \rho_V^{0.24}}\right)
\Delta T_e^{0.24} \Delta P_{sat}^{0.75}
With `q` specified:
.. math::
h = \left[0.00122\left(\frac{k_L^{0.79} C_{p,l}^{0.45}\rho_L^{0.49}}
{\sigma^{0.5}\mu_L^{0.29} H_{vap}^{0.24} \rho_V^{0.24}}\right) \Delta
P_{sat}^{0.75} q^{0.24}\right]^{\frac{1}{1.24}}
Parameters
----------
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the produced gas [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
sigma : float
Surface tension of liquid [N/m]
dPsat : float
Difference in saturation pressure of the fluid at Te and T, [Pa]
Te : float, optional
Excess wall temperature, [K]
q : float, optional
Heat flux, [W/m^2]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Examples have been found in [1]_ and [3]_ and match exactly.
Examples
--------
Water boiling, with excess temperature of 4.3K from [1]_.
>>> Forster_Zuber(Te=4.3, dPsat=3906*4.3, Cpl=4180., kl=0.688,
... mul=0.275E-3, sigma=0.0588, Hvap=2.25E6, rhol=958., rhog=0.597)
3519.9239897462644
References
----------
.. [1] Cao, Eduardo. Heat Transfer in Process Engineering.
McGraw Hill Professional, 2009.
.. [2] Forster, H. K., and N. Zuber. "Dynamics of Vapor Bubbles and Boiling
Heat Transfer." AIChE Journal 1, no. 4 (December 1, 1955): 531-35.
doi:10.1002/aic.690010425.
.. [3] Serth, R. W., Process Heat Transfer: Principles,
Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014.
]
if name[Te] begin[:]
return[binary_operation[binary_operation[binary_operation[constant[0.00122] * binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[kl] ** constant[0.79]] * binary_operation[name[Cpl] ** constant[0.45]]] * binary_operation[name[rhol] ** constant[0.49]]] / binary_operation[name[sigma] ** constant[0.5]]] / binary_operation[name[mul] ** constant[0.29]]] / binary_operation[name[Hvap] ** constant[0.24]]] / binary_operation[name[rhog] ** constant[0.24]]]] * binary_operation[name[Te] ** constant[0.24]]] * binary_operation[name[dPsat] ** constant[0.75]]]] | keyword[def] identifier[Forster_Zuber] ( identifier[rhol] , identifier[rhog] , identifier[mul] , identifier[kl] , identifier[Cpl] , identifier[Hvap] , identifier[sigma] , identifier[dPsat] , identifier[Te] = keyword[None] , identifier[q] = keyword[None] ):
literal[string]
keyword[if] identifier[Te] :
keyword[return] literal[int] *( identifier[kl] ** literal[int] * identifier[Cpl] ** literal[int] * identifier[rhol] ** literal[int] / identifier[sigma] ** literal[int] / identifier[mul] ** literal[int] / identifier[Hvap] ** literal[int] / identifier[rhog] ** literal[int] )* identifier[Te] ** literal[int] * identifier[dPsat] ** literal[int]
keyword[elif] identifier[q] :
keyword[return] ( literal[int] *( identifier[kl] ** literal[int] * identifier[Cpl] ** literal[int] * identifier[rhol] ** literal[int] / identifier[sigma] ** literal[int] / identifier[mul] ** literal[int] / identifier[Hvap] ** literal[int] / identifier[rhog] ** literal[int] )* identifier[q] ** literal[int] * identifier[dPsat] ** literal[int] )**( literal[int] / literal[int] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] ) | def Forster_Zuber(rhol, rhog, mul, kl, Cpl, Hvap, sigma, dPsat, Te=None, q=None):
"""Calculates heat transfer coefficient for a evaporator operating
in the nucleate boiling regime according to [2]_ as presented in [1]_.
Either heat flux or excess temperature is required.
With `Te` specified:
.. math::
h = 0.00122\\left(\\frac{k_L^{0.79} C_{p,l}^{0.45}\\rho_L^{0.49}}
{\\sigma^{0.5}\\mu_L^{0.29} H_{vap}^{0.24} \\rho_V^{0.24}}\\right)
\\Delta T_e^{0.24} \\Delta P_{sat}^{0.75}
With `q` specified:
.. math::
h = \\left[0.00122\\left(\\frac{k_L^{0.79} C_{p,l}^{0.45}\\rho_L^{0.49}}
{\\sigma^{0.5}\\mu_L^{0.29} H_{vap}^{0.24} \\rho_V^{0.24}}\\right) \\Delta
P_{sat}^{0.75} q^{0.24}\\right]^{\\frac{1}{1.24}}
Parameters
----------
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the produced gas [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
sigma : float
Surface tension of liquid [N/m]
dPsat : float
Difference in saturation pressure of the fluid at Te and T, [Pa]
Te : float, optional
Excess wall temperature, [K]
q : float, optional
Heat flux, [W/m^2]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Examples have been found in [1]_ and [3]_ and match exactly.
Examples
--------
Water boiling, with excess temperature of 4.3K from [1]_.
>>> Forster_Zuber(Te=4.3, dPsat=3906*4.3, Cpl=4180., kl=0.688,
... mul=0.275E-3, sigma=0.0588, Hvap=2.25E6, rhol=958., rhog=0.597)
3519.9239897462644
References
----------
.. [1] Cao, Eduardo. Heat Transfer in Process Engineering.
McGraw Hill Professional, 2009.
.. [2] Forster, H. K., and N. Zuber. "Dynamics of Vapor Bubbles and Boiling
Heat Transfer." AIChE Journal 1, no. 4 (December 1, 1955): 531-35.
doi:10.1002/aic.690010425.
.. [3] Serth, R. W., Process Heat Transfer: Principles,
Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014.
"""
if Te:
return 0.00122 * (kl ** 0.79 * Cpl ** 0.45 * rhol ** 0.49 / sigma ** 0.5 / mul ** 0.29 / Hvap ** 0.24 / rhog ** 0.24) * Te ** 0.24 * dPsat ** 0.75 # depends on [control=['if'], data=[]]
elif q:
return (0.00122 * (kl ** 0.79 * Cpl ** 0.45 * rhol ** 0.49 / sigma ** 0.5 / mul ** 0.29 / Hvap ** 0.24 / rhog ** 0.24) * q ** 0.24 * dPsat ** 0.75) ** (1 / 1.24) # depends on [control=['if'], data=[]]
else:
raise Exception('Either q or Te is needed for this correlation') |
def list(self):
"""
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True | def function[list, parameter[self]]:
constant[
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
]
call[name[self]._initialize_list, parameter[]]
variable[interested] assign[=] constant[True]
variable[response] assign[=] call[name[self]._cloudFormation.list_stacks, parameter[]]
call[name[print], parameter[constant[Stack(s):]]]
while name[interested] begin[:]
if compare[constant[StackSummaries] in name[response]] begin[:]
for taget[name[stack]] in starred[call[name[response]][constant[StackSummaries]]] begin[:]
variable[stack_status] assign[=] call[name[stack]][constant[StackStatus]]
if compare[name[stack_status] not_equal[!=] constant[DELETE_COMPLETE]] begin[:]
call[name[print], parameter[call[constant[ [{}] - {}].format, parameter[call[name[stack]][constant[StackStatus]], call[name[stack]][constant[StackName]]]]]]
variable[next_token] assign[=] call[name[response].get, parameter[constant[NextToken], constant[None]]]
if name[next_token] begin[:]
variable[response] assign[=] call[name[self]._cloudFormation.list_stacks, parameter[]]
return[constant[True]] | keyword[def] identifier[list] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_initialize_list] ()
identifier[interested] = keyword[True]
identifier[response] = identifier[self] . identifier[_cloudFormation] . identifier[list_stacks] ()
identifier[print] ( literal[string] )
keyword[while] identifier[interested] :
keyword[if] literal[string] keyword[in] identifier[response] :
keyword[for] identifier[stack] keyword[in] identifier[response] [ literal[string] ]:
identifier[stack_status] = identifier[stack] [ literal[string] ]
keyword[if] identifier[stack_status] != literal[string] :
identifier[print] ( literal[string] . identifier[format] ( identifier[stack] [ literal[string] ], identifier[stack] [ literal[string] ]))
identifier[next_token] = identifier[response] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[next_token] :
identifier[response] = identifier[self] . identifier[_cloudFormation] . identifier[list_stacks] ( identifier[NextToken] = identifier[next_token] )
keyword[else] :
identifier[interested] = keyword[False]
keyword[return] keyword[True] | def list(self):
"""
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName'])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['stack']] # depends on [control=['if'], data=['response']]
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token) # depends on [control=['if'], data=[]]
else:
interested = False # depends on [control=['while'], data=[]]
return True |
def _query(cls, **kwargs):
"""
Generic query implementation that is used
by the resources.
"""
from sevenbridges.models.link import Link
from sevenbridges.meta.collection import Collection
api = kwargs.pop('api', cls._API)
url = kwargs.pop('url')
extra = {'resource': cls.__name__, 'query': kwargs}
logger.info('Querying {} resource'.format(cls), extra=extra)
response = api.get(url=url, params=kwargs)
data = response.json()
total = response.headers['x-total-matching-query']
items = [cls(api=api, **item) for item in data['items']]
links = [Link(**link) for link in data['links']]
href = data['href']
return Collection(
resource=cls, href=href, total=total, items=items,
links=links, api=api
) | def function[_query, parameter[cls]]:
constant[
Generic query implementation that is used
by the resources.
]
from relative_module[sevenbridges.models.link] import module[Link]
from relative_module[sevenbridges.meta.collection] import module[Collection]
variable[api] assign[=] call[name[kwargs].pop, parameter[constant[api], name[cls]._API]]
variable[url] assign[=] call[name[kwargs].pop, parameter[constant[url]]]
variable[extra] assign[=] dictionary[[<ast.Constant object at 0x7da18dc9bfd0>, <ast.Constant object at 0x7da18dc9bd60>], [<ast.Attribute object at 0x7da18dc98f70>, <ast.Name object at 0x7da18dc9af50>]]
call[name[logger].info, parameter[call[constant[Querying {} resource].format, parameter[name[cls]]]]]
variable[response] assign[=] call[name[api].get, parameter[]]
variable[data] assign[=] call[name[response].json, parameter[]]
variable[total] assign[=] call[name[response].headers][constant[x-total-matching-query]]
variable[items] assign[=] <ast.ListComp object at 0x7da18ede7d90>
variable[links] assign[=] <ast.ListComp object at 0x7da18ede7850>
variable[href] assign[=] call[name[data]][constant[href]]
return[call[name[Collection], parameter[]]] | keyword[def] identifier[_query] ( identifier[cls] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[sevenbridges] . identifier[models] . identifier[link] keyword[import] identifier[Link]
keyword[from] identifier[sevenbridges] . identifier[meta] . identifier[collection] keyword[import] identifier[Collection]
identifier[api] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[cls] . identifier[_API] )
identifier[url] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[extra] ={ literal[string] : identifier[cls] . identifier[__name__] , literal[string] : identifier[kwargs] }
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[cls] ), identifier[extra] = identifier[extra] )
identifier[response] = identifier[api] . identifier[get] ( identifier[url] = identifier[url] , identifier[params] = identifier[kwargs] )
identifier[data] = identifier[response] . identifier[json] ()
identifier[total] = identifier[response] . identifier[headers] [ literal[string] ]
identifier[items] =[ identifier[cls] ( identifier[api] = identifier[api] ,** identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[data] [ literal[string] ]]
identifier[links] =[ identifier[Link] (** identifier[link] ) keyword[for] identifier[link] keyword[in] identifier[data] [ literal[string] ]]
identifier[href] = identifier[data] [ literal[string] ]
keyword[return] identifier[Collection] (
identifier[resource] = identifier[cls] , identifier[href] = identifier[href] , identifier[total] = identifier[total] , identifier[items] = identifier[items] ,
identifier[links] = identifier[links] , identifier[api] = identifier[api]
) | def _query(cls, **kwargs):
"""
Generic query implementation that is used
by the resources.
"""
from sevenbridges.models.link import Link
from sevenbridges.meta.collection import Collection
api = kwargs.pop('api', cls._API)
url = kwargs.pop('url')
extra = {'resource': cls.__name__, 'query': kwargs}
logger.info('Querying {} resource'.format(cls), extra=extra)
response = api.get(url=url, params=kwargs)
data = response.json()
total = response.headers['x-total-matching-query']
items = [cls(api=api, **item) for item in data['items']]
links = [Link(**link) for link in data['links']]
href = data['href']
return Collection(resource=cls, href=href, total=total, items=items, links=links, api=api) |
def __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel):
"""Returns filepath where to store HyperSearch JobID
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: Filepath where to store HyperSearch JobID
"""
# Get the base path and figure out the path of the report file.
basePath = permWorkDir
# Form the name of the output csv file that will contain all the results
filename = "%s_HyperSearchJobID.pkl" % (outputLabel,)
filepath = os.path.join(basePath, filename)
return filepath | def function[__getHyperSearchJobIDFilePath, parameter[cls, permWorkDir, outputLabel]]:
constant[Returns filepath where to store HyperSearch JobID
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: Filepath where to store HyperSearch JobID
]
variable[basePath] assign[=] name[permWorkDir]
variable[filename] assign[=] binary_operation[constant[%s_HyperSearchJobID.pkl] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bc71480>]]]
variable[filepath] assign[=] call[name[os].path.join, parameter[name[basePath], name[filename]]]
return[name[filepath]] | keyword[def] identifier[__getHyperSearchJobIDFilePath] ( identifier[cls] , identifier[permWorkDir] , identifier[outputLabel] ):
literal[string]
identifier[basePath] = identifier[permWorkDir]
identifier[filename] = literal[string] %( identifier[outputLabel] ,)
identifier[filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[basePath] , identifier[filename] )
keyword[return] identifier[filepath] | def __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel):
"""Returns filepath where to store HyperSearch JobID
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: Filepath where to store HyperSearch JobID
"""
# Get the base path and figure out the path of the report file.
basePath = permWorkDir
# Form the name of the output csv file that will contain all the results
filename = '%s_HyperSearchJobID.pkl' % (outputLabel,)
filepath = os.path.join(basePath, filename)
return filepath |
def _get_mechanism(self, rup, C):
"""
Compute the fifth term of the equation 1 described on paragraph :
Get fault type dummy variables, see Table 1
"""
U, SS, NS, RS = self._get_fault_type_dummy_variables(rup)
return C['f1'] * NS + C['f2'] * RS + C['f3'] * SS | def function[_get_mechanism, parameter[self, rup, C]]:
constant[
Compute the fifth term of the equation 1 described on paragraph :
Get fault type dummy variables, see Table 1
]
<ast.Tuple object at 0x7da20c993040> assign[=] call[name[self]._get_fault_type_dummy_variables, parameter[name[rup]]]
return[binary_operation[binary_operation[binary_operation[call[name[C]][constant[f1]] * name[NS]] + binary_operation[call[name[C]][constant[f2]] * name[RS]]] + binary_operation[call[name[C]][constant[f3]] * name[SS]]]] | keyword[def] identifier[_get_mechanism] ( identifier[self] , identifier[rup] , identifier[C] ):
literal[string]
identifier[U] , identifier[SS] , identifier[NS] , identifier[RS] = identifier[self] . identifier[_get_fault_type_dummy_variables] ( identifier[rup] )
keyword[return] identifier[C] [ literal[string] ]* identifier[NS] + identifier[C] [ literal[string] ]* identifier[RS] + identifier[C] [ literal[string] ]* identifier[SS] | def _get_mechanism(self, rup, C):
"""
Compute the fifth term of the equation 1 described on paragraph :
Get fault type dummy variables, see Table 1
"""
(U, SS, NS, RS) = self._get_fault_type_dummy_variables(rup)
return C['f1'] * NS + C['f2'] * RS + C['f3'] * SS |
def err(msg):
"""Pretty-print an error."""
click.echo(click.style(msg, fg="red", bold=True)) | def function[err, parameter[msg]]:
constant[Pretty-print an error.]
call[name[click].echo, parameter[call[name[click].style, parameter[name[msg]]]]] | keyword[def] identifier[err] ( identifier[msg] ):
literal[string]
identifier[click] . identifier[echo] ( identifier[click] . identifier[style] ( identifier[msg] , identifier[fg] = literal[string] , identifier[bold] = keyword[True] )) | def err(msg):
"""Pretty-print an error."""
click.echo(click.style(msg, fg='red', bold=True)) |
def form(self):
"""
This attribute points to default form.
If form was not selected manually then select the form
which has the biggest number of input elements.
The form value is just an `lxml.html` form element.
Example::
g.go('some URL')
# Choose form automatically
print g.form
# And now choose form manually
g.choose_form(1)
print g.form
"""
if self._lxml_form is None:
forms = [(idx, len(list(x.fields)))
for idx, x in enumerate(self.tree.forms)]
if forms:
idx = sorted(forms, key=lambda x: x[1], reverse=True)[0][0]
self.choose_form(idx)
else:
raise DataNotFound('Response does not contains any form')
return self._lxml_form | def function[form, parameter[self]]:
constant[
This attribute points to default form.
If form was not selected manually then select the form
which has the biggest number of input elements.
The form value is just an `lxml.html` form element.
Example::
g.go('some URL')
# Choose form automatically
print g.form
# And now choose form manually
g.choose_form(1)
print g.form
]
if compare[name[self]._lxml_form is constant[None]] begin[:]
variable[forms] assign[=] <ast.ListComp object at 0x7da1b18dccd0>
if name[forms] begin[:]
variable[idx] assign[=] call[call[call[name[sorted], parameter[name[forms]]]][constant[0]]][constant[0]]
call[name[self].choose_form, parameter[name[idx]]]
return[name[self]._lxml_form] | keyword[def] identifier[form] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_lxml_form] keyword[is] keyword[None] :
identifier[forms] =[( identifier[idx] , identifier[len] ( identifier[list] ( identifier[x] . identifier[fields] )))
keyword[for] identifier[idx] , identifier[x] keyword[in] identifier[enumerate] ( identifier[self] . identifier[tree] . identifier[forms] )]
keyword[if] identifier[forms] :
identifier[idx] = identifier[sorted] ( identifier[forms] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ], identifier[reverse] = keyword[True] )[ literal[int] ][ literal[int] ]
identifier[self] . identifier[choose_form] ( identifier[idx] )
keyword[else] :
keyword[raise] identifier[DataNotFound] ( literal[string] )
keyword[return] identifier[self] . identifier[_lxml_form] | def form(self):
"""
This attribute points to default form.
If form was not selected manually then select the form
which has the biggest number of input elements.
The form value is just an `lxml.html` form element.
Example::
g.go('some URL')
# Choose form automatically
print g.form
# And now choose form manually
g.choose_form(1)
print g.form
"""
if self._lxml_form is None:
forms = [(idx, len(list(x.fields))) for (idx, x) in enumerate(self.tree.forms)]
if forms:
idx = sorted(forms, key=lambda x: x[1], reverse=True)[0][0]
self.choose_form(idx) # depends on [control=['if'], data=[]]
else:
raise DataNotFound('Response does not contains any form') # depends on [control=['if'], data=[]]
return self._lxml_form |
def _ParseAbstractInteger(text, is_long=False):
"""Parses an integer without checking size/signedness.
Args:
text: The text to parse.
is_long: True if the value should be returned as a long integer.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
# Do the actual parsing. Exception handling is propagated to caller.
try:
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if is_long:
return long(text, 0)
else:
return int(text, 0)
except ValueError:
raise ValueError('Couldn\'t parse integer: %s' % text) | def function[_ParseAbstractInteger, parameter[text, is_long]]:
constant[Parses an integer without checking size/signedness.
Args:
text: The text to parse.
is_long: True if the value should be returned as a long integer.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
]
<ast.Try object at 0x7da1b1f19870> | keyword[def] identifier[_ParseAbstractInteger] ( identifier[text] , identifier[is_long] = keyword[False] ):
literal[string]
keyword[try] :
keyword[if] identifier[is_long] :
keyword[return] identifier[long] ( identifier[text] , literal[int] )
keyword[else] :
keyword[return] identifier[int] ( identifier[text] , literal[int] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[text] ) | def _ParseAbstractInteger(text, is_long=False):
"""Parses an integer without checking size/signedness.
Args:
text: The text to parse.
is_long: True if the value should be returned as a long integer.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
# Do the actual parsing. Exception handling is propagated to caller.
try:
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if is_long:
return long(text, 0) # depends on [control=['if'], data=[]]
else:
return int(text, 0) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError("Couldn't parse integer: %s" % text) # depends on [control=['except'], data=[]] |
def del_password(name, root=None):
'''
.. versionadded:: 2014.7.0
Delete the password from name user
name
User to delete
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' shadow.del_password username
'''
cmd = ['passwd']
if root is not None:
cmd.extend(('-R', root))
cmd.extend(('-d', name))
__salt__['cmd.run'](cmd, python_shell=False, output_loglevel='quiet')
uinfo = info(name, root=root)
return not uinfo['passwd'] and uinfo['name'] == name | def function[del_password, parameter[name, root]]:
constant[
.. versionadded:: 2014.7.0
Delete the password from name user
name
User to delete
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' shadow.del_password username
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da18f811ff0>]]
if compare[name[root] is_not constant[None]] begin[:]
call[name[cmd].extend, parameter[tuple[[<ast.Constant object at 0x7da18f810a00>, <ast.Name object at 0x7da18f813d30>]]]]
call[name[cmd].extend, parameter[tuple[[<ast.Constant object at 0x7da18f811d50>, <ast.Name object at 0x7da18f811ab0>]]]]
call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]]
variable[uinfo] assign[=] call[name[info], parameter[name[name]]]
return[<ast.BoolOp object at 0x7da18f811660>] | keyword[def] identifier[del_password] ( identifier[name] , identifier[root] = keyword[None] ):
literal[string]
identifier[cmd] =[ literal[string] ]
keyword[if] identifier[root] keyword[is] keyword[not] keyword[None] :
identifier[cmd] . identifier[extend] (( literal[string] , identifier[root] ))
identifier[cmd] . identifier[extend] (( literal[string] , identifier[name] ))
identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[python_shell] = keyword[False] , identifier[output_loglevel] = literal[string] )
identifier[uinfo] = identifier[info] ( identifier[name] , identifier[root] = identifier[root] )
keyword[return] keyword[not] identifier[uinfo] [ literal[string] ] keyword[and] identifier[uinfo] [ literal[string] ]== identifier[name] | def del_password(name, root=None):
"""
.. versionadded:: 2014.7.0
Delete the password from name user
name
User to delete
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' shadow.del_password username
"""
cmd = ['passwd']
if root is not None:
cmd.extend(('-R', root)) # depends on [control=['if'], data=['root']]
cmd.extend(('-d', name))
__salt__['cmd.run'](cmd, python_shell=False, output_loglevel='quiet')
uinfo = info(name, root=root)
return not uinfo['passwd'] and uinfo['name'] == name |
def iphexval(ip):
'''
Retrieve the hexadecimal representation of an IP address
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt '*' network.iphexval 10.0.0.1
'''
a = ip.split('.')
hexval = ['%02X' % int(x) for x in a] # pylint: disable=E1321
return ''.join(hexval) | def function[iphexval, parameter[ip]]:
constant[
Retrieve the hexadecimal representation of an IP address
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt '*' network.iphexval 10.0.0.1
]
variable[a] assign[=] call[name[ip].split, parameter[constant[.]]]
variable[hexval] assign[=] <ast.ListComp object at 0x7da204961d80>
return[call[constant[].join, parameter[name[hexval]]]] | keyword[def] identifier[iphexval] ( identifier[ip] ):
literal[string]
identifier[a] = identifier[ip] . identifier[split] ( literal[string] )
identifier[hexval] =[ literal[string] % identifier[int] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[a] ]
keyword[return] literal[string] . identifier[join] ( identifier[hexval] ) | def iphexval(ip):
"""
Retrieve the hexadecimal representation of an IP address
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt '*' network.iphexval 10.0.0.1
"""
a = ip.split('.')
hexval = ['%02X' % int(x) for x in a] # pylint: disable=E1321
return ''.join(hexval) |
def encode(self):
"""Encodes matrix
:return: Encoder used
"""
encoder = LabelEncoder() # encoder
values = self.get_as_list()
encoded = encoder.fit_transform(values) # long list of encoded
n_columns = len(self.matrix[0])
n_rows = len(self.matrix)
self.matrix = [
encoded[i: i + n_columns]
for i in range(0, n_rows * n_columns, n_columns)
]
return encoder | def function[encode, parameter[self]]:
constant[Encodes matrix
:return: Encoder used
]
variable[encoder] assign[=] call[name[LabelEncoder], parameter[]]
variable[values] assign[=] call[name[self].get_as_list, parameter[]]
variable[encoded] assign[=] call[name[encoder].fit_transform, parameter[name[values]]]
variable[n_columns] assign[=] call[name[len], parameter[call[name[self].matrix][constant[0]]]]
variable[n_rows] assign[=] call[name[len], parameter[name[self].matrix]]
name[self].matrix assign[=] <ast.ListComp object at 0x7da204622e90>
return[name[encoder]] | keyword[def] identifier[encode] ( identifier[self] ):
literal[string]
identifier[encoder] = identifier[LabelEncoder] ()
identifier[values] = identifier[self] . identifier[get_as_list] ()
identifier[encoded] = identifier[encoder] . identifier[fit_transform] ( identifier[values] )
identifier[n_columns] = identifier[len] ( identifier[self] . identifier[matrix] [ literal[int] ])
identifier[n_rows] = identifier[len] ( identifier[self] . identifier[matrix] )
identifier[self] . identifier[matrix] =[
identifier[encoded] [ identifier[i] : identifier[i] + identifier[n_columns] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n_rows] * identifier[n_columns] , identifier[n_columns] )
]
keyword[return] identifier[encoder] | def encode(self):
"""Encodes matrix
:return: Encoder used
"""
encoder = LabelEncoder() # encoder
values = self.get_as_list()
encoded = encoder.fit_transform(values) # long list of encoded
n_columns = len(self.matrix[0])
n_rows = len(self.matrix)
self.matrix = [encoded[i:i + n_columns] for i in range(0, n_rows * n_columns, n_columns)]
return encoder |
def convert_items(data, mapping):
"""
Input: list of dicts (each dict a record for one item),
mapping with column names to swap into the records.
Output: updated list of dicts.
"""
new_recs = []
for rec in data:
new_rec = map_magic.mapping(rec, mapping)
new_recs.append(new_rec)
return new_recs | def function[convert_items, parameter[data, mapping]]:
constant[
Input: list of dicts (each dict a record for one item),
mapping with column names to swap into the records.
Output: updated list of dicts.
]
variable[new_recs] assign[=] list[[]]
for taget[name[rec]] in starred[name[data]] begin[:]
variable[new_rec] assign[=] call[name[map_magic].mapping, parameter[name[rec], name[mapping]]]
call[name[new_recs].append, parameter[name[new_rec]]]
return[name[new_recs]] | keyword[def] identifier[convert_items] ( identifier[data] , identifier[mapping] ):
literal[string]
identifier[new_recs] =[]
keyword[for] identifier[rec] keyword[in] identifier[data] :
identifier[new_rec] = identifier[map_magic] . identifier[mapping] ( identifier[rec] , identifier[mapping] )
identifier[new_recs] . identifier[append] ( identifier[new_rec] )
keyword[return] identifier[new_recs] | def convert_items(data, mapping):
"""
Input: list of dicts (each dict a record for one item),
mapping with column names to swap into the records.
Output: updated list of dicts.
"""
new_recs = []
for rec in data:
new_rec = map_magic.mapping(rec, mapping)
new_recs.append(new_rec) # depends on [control=['for'], data=['rec']]
return new_recs |
def to_list(self):
'''convert an actions bitmask into a list of action strings'''
res = []
for a in self.__class__.ACTIONS:
aBit = self.__class__.action_bitmask(a)
if ((self & aBit) == aBit):
res.append(a)
return res | def function[to_list, parameter[self]]:
constant[convert an actions bitmask into a list of action strings]
variable[res] assign[=] list[[]]
for taget[name[a]] in starred[name[self].__class__.ACTIONS] begin[:]
variable[aBit] assign[=] call[name[self].__class__.action_bitmask, parameter[name[a]]]
if compare[binary_operation[name[self] <ast.BitAnd object at 0x7da2590d6b60> name[aBit]] equal[==] name[aBit]] begin[:]
call[name[res].append, parameter[name[a]]]
return[name[res]] | keyword[def] identifier[to_list] ( identifier[self] ):
literal[string]
identifier[res] =[]
keyword[for] identifier[a] keyword[in] identifier[self] . identifier[__class__] . identifier[ACTIONS] :
identifier[aBit] = identifier[self] . identifier[__class__] . identifier[action_bitmask] ( identifier[a] )
keyword[if] (( identifier[self] & identifier[aBit] )== identifier[aBit] ):
identifier[res] . identifier[append] ( identifier[a] )
keyword[return] identifier[res] | def to_list(self):
"""convert an actions bitmask into a list of action strings"""
res = []
for a in self.__class__.ACTIONS:
aBit = self.__class__.action_bitmask(a)
if self & aBit == aBit:
res.append(a) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
return res |
def replace_refs_factory(references, use_cleveref_default, use_eqref,
plusname, starname, target):
"""Returns replace_refs(key, value, fmt, meta) action that replaces
references with format-specific content. The content is determined using
the 'references' dict, which associates reference labels with numbers or
string tags (e.g., { 'fig:1':1, 'fig:2':2, ...}). If 'use_cleveref_default'
is True, or if "modifier" in the reference's attributes is "+" or "*", then
clever referencing is used; i.e., a name is placed in front of the number
or string tag. The 'plusname' and 'starname' lists give the singular
and plural names for "+" and "*" clever references, respectively. The
'target' is the LaTeX type for clever referencing (e.g., "figure",
"equation", "table", ...)."""
global _cleveref_tex_flag # pylint: disable=global-statement
# Update global if clever referencing is required by default
_cleveref_tex_flag = _cleveref_tex_flag or use_cleveref_default
def _insert_cleveref_fakery(key, value, meta):
r"""Inserts TeX to support clever referencing in LaTeX documents
if the key isn't a RawBlock. If the key is a RawBlock, then check
the value to see if the TeX was already inserted.
The \providecommand macro is used to fake the cleveref package's
behaviour if it is not provided in the template via
\usepackage{cleveref}.
TeX is inserted into the value. Replacement elements are returned.
"""
global _cleveref_tex_flag # pylint: disable=global-statement
comment1 = '% pandoc-xnos: cleveref formatting'
tex1 = [comment1,
r'\crefformat{%s}{%s~#2#1#3}'%(target, plusname[0]),
r'\Crefformat{%s}{%s~#2#1#3}'%(target, starname[0])]
if key == 'RawBlock': # Check for existing cleveref TeX
if value[1].startswith(comment1):
# Append the new portion
value[1] = value[1] + '\n' + '\n'.join(tex1[1:])
_cleveref_tex_flag = False # Cleveref fakery already installed
elif key != 'RawBlock': # Write the cleveref TeX
_cleveref_tex_flag = False # Cancels further attempts
ret = []
# Check first to see if fakery is turned off
if not 'xnos-cleveref-fake' in meta or \
check_bool(get_meta(meta, 'xnos-cleveref-fake')):
# Cleveref fakery
tex2 = [
r'% pandoc-xnos: cleveref fakery',
r'\newcommand{\plusnamesingular}{}',
r'\newcommand{\starnamesingular}{}',
r'\newcommand{\xrefname}[1]{'\
r'\protect\renewcommand{\plusnamesingular}{#1}}',
r'\newcommand{\Xrefname}[1]{'\
r'\protect\renewcommand{\starnamesingular}{#1}}',
r'\providecommand{\cref}{\plusnamesingular~\ref}',
r'\providecommand{\Cref}{\starnamesingular~\ref}',
r'\providecommand{\crefformat}[2]{}',
r'\providecommand{\Crefformat}[2]{}']
ret.append(RawBlock('tex', '\n'.join(tex2)))
ret.append(RawBlock('tex', '\n'.join(tex1)))
return ret
return None
def _cite_replacement(key, value, fmt, meta):
"""Returns context-dependent content to replace a Cite element."""
assert key == 'Cite'
attrs, label = value[0], _get_label(key, value)
attrs = PandocAttributes(attrs, 'pandoc')
assert label in references
# Get the replacement value
text = str(references[label])
# Choose between \Cref, \cref and \ref
use_cleveref = attrs['modifier'] in ['*', '+'] \
if 'modifier' in attrs.kvs else use_cleveref_default
plus = attrs['modifier'] == '+' if 'modifier' in attrs.kvs \
else use_cleveref_default
name = plusname[0] if plus else starname[0] # Name used by cref
# The replacement depends on the output format
if fmt == 'latex':
if use_cleveref:
# Renew commands needed for cleveref fakery
if not 'xnos-cleveref-fake' in meta or \
check_bool(get_meta(meta, 'xnos-cleveref-fake')):
faketex = (r'\xrefname' if plus else r'\Xrefname') + \
'{%s}' % name
else:
faketex = ''
macro = r'\cref' if plus else r'\Cref'
ret = RawInline('tex', r'%s%s{%s}'%(faketex, macro, label))
elif use_eqref:
ret = RawInline('tex', r'\eqref{%s}'%label)
else:
ret = RawInline('tex', r'\ref{%s}'%label)
else:
if use_eqref:
text = '(' + text + ')'
linktext = [Math({"t":"InlineMath", "c":[]}, text[1:-1]) \
if text.startswith('$') and text.endswith('$') \
else Str(text)]
link = elt('Link', 2)(linktext, ['#%s' % label, '']) \
if _PANDOCVERSION < '1.16' else \
Link(['', [], []], linktext, ['#%s' % label, ''])
ret = ([Str(name), Space()] if use_cleveref else []) + [link]
return ret
def replace_refs(key, value, fmt, meta): # pylint: disable=unused-argument
"""Replaces references with format-specific content."""
if fmt == 'latex' and _cleveref_tex_flag:
# Put the cleveref TeX fakery in front of the first block element
# that isn't a RawBlock.
if not key in ['Plain', 'Para', 'CodeBlock', 'RawBlock',
'BlockQuote', 'OrderedList', 'BulletList',
'DefinitionList', 'Header', 'HorizontalRule',
'Table', 'Div', 'Null']:
return None
# Reconstruct the block element
el = _getel(key, value)
# Insert cleveref TeX in front of the block element
tex = _insert_cleveref_fakery(key, value, meta)
if tex:
return tex + [el]
elif key == 'Cite' and len(value) == 3: # Replace the reference
return _cite_replacement(key, value, fmt, meta)
return None
return replace_refs | def function[replace_refs_factory, parameter[references, use_cleveref_default, use_eqref, plusname, starname, target]]:
constant[Returns replace_refs(key, value, fmt, meta) action that replaces
references with format-specific content. The content is determined using
the 'references' dict, which associates reference labels with numbers or
string tags (e.g., { 'fig:1':1, 'fig:2':2, ...}). If 'use_cleveref_default'
is True, or if "modifier" in the reference's attributes is "+" or "*", then
clever referencing is used; i.e., a name is placed in front of the number
or string tag. The 'plusname' and 'starname' lists give the singular
and plural names for "+" and "*" clever references, respectively. The
'target' is the LaTeX type for clever referencing (e.g., "figure",
"equation", "table", ...).]
<ast.Global object at 0x7da1b26ae590>
variable[_cleveref_tex_flag] assign[=] <ast.BoolOp object at 0x7da1b26aeb90>
def function[_insert_cleveref_fakery, parameter[key, value, meta]]:
constant[Inserts TeX to support clever referencing in LaTeX documents
if the key isn't a RawBlock. If the key is a RawBlock, then check
the value to see if the TeX was already inserted.
The \providecommand macro is used to fake the cleveref package's
behaviour if it is not provided in the template via
\usepackage{cleveref}.
TeX is inserted into the value. Replacement elements are returned.
]
<ast.Global object at 0x7da1b26ae1d0>
variable[comment1] assign[=] constant[% pandoc-xnos: cleveref formatting]
variable[tex1] assign[=] list[[<ast.Name object at 0x7da1b26aeef0>, <ast.BinOp object at 0x7da1b26aed10>, <ast.BinOp object at 0x7da1b26ad480>]]
if compare[name[key] equal[==] constant[RawBlock]] begin[:]
if call[call[name[value]][constant[1]].startswith, parameter[name[comment1]]] begin[:]
call[name[value]][constant[1]] assign[=] binary_operation[binary_operation[call[name[value]][constant[1]] + constant[
]] + call[constant[
].join, parameter[call[name[tex1]][<ast.Slice object at 0x7da1b26acc40>]]]]
variable[_cleveref_tex_flag] assign[=] constant[False]
return[constant[None]]
def function[_cite_replacement, parameter[key, value, fmt, meta]]:
constant[Returns context-dependent content to replace a Cite element.]
assert[compare[name[key] equal[==] constant[Cite]]]
<ast.Tuple object at 0x7da1b26ac490> assign[=] tuple[[<ast.Subscript object at 0x7da1b26ace50>, <ast.Call object at 0x7da1b26ad7b0>]]
variable[attrs] assign[=] call[name[PandocAttributes], parameter[name[attrs], constant[pandoc]]]
assert[compare[name[label] in name[references]]]
variable[text] assign[=] call[name[str], parameter[call[name[references]][name[label]]]]
variable[use_cleveref] assign[=] <ast.IfExp object at 0x7da1b26af280>
variable[plus] assign[=] <ast.IfExp object at 0x7da1b26ad1e0>
variable[name] assign[=] <ast.IfExp object at 0x7da1b26ad300>
if compare[name[fmt] equal[==] constant[latex]] begin[:]
if name[use_cleveref] begin[:]
if <ast.BoolOp object at 0x7da1b26aead0> begin[:]
variable[faketex] assign[=] binary_operation[<ast.IfExp object at 0x7da1b26ae320> + binary_operation[constant[{%s}] <ast.Mod object at 0x7da2590d6920> name[name]]]
variable[macro] assign[=] <ast.IfExp object at 0x7da1b26af760>
variable[ret] assign[=] call[name[RawInline], parameter[constant[tex], binary_operation[constant[%s%s{%s}] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26ae4a0>, <ast.Name object at 0x7da1b26ae260>, <ast.Name object at 0x7da1b26af100>]]]]]
return[name[ret]]
def function[replace_refs, parameter[key, value, fmt, meta]]:
constant[Replaces references with format-specific content.]
if <ast.BoolOp object at 0x7da207f9a2c0> begin[:]
if <ast.UnaryOp object at 0x7da207f99cc0> begin[:]
return[constant[None]]
variable[el] assign[=] call[name[_getel], parameter[name[key], name[value]]]
variable[tex] assign[=] call[name[_insert_cleveref_fakery], parameter[name[key], name[value], name[meta]]]
if name[tex] begin[:]
return[binary_operation[name[tex] + list[[<ast.Name object at 0x7da207f9b4f0>]]]]
return[constant[None]]
return[name[replace_refs]] | keyword[def] identifier[replace_refs_factory] ( identifier[references] , identifier[use_cleveref_default] , identifier[use_eqref] ,
identifier[plusname] , identifier[starname] , identifier[target] ):
literal[string]
keyword[global] identifier[_cleveref_tex_flag]
identifier[_cleveref_tex_flag] = identifier[_cleveref_tex_flag] keyword[or] identifier[use_cleveref_default]
keyword[def] identifier[_insert_cleveref_fakery] ( identifier[key] , identifier[value] , identifier[meta] ):
literal[string]
keyword[global] identifier[_cleveref_tex_flag]
identifier[comment1] = literal[string]
identifier[tex1] =[ identifier[comment1] ,
literal[string] %( identifier[target] , identifier[plusname] [ literal[int] ]),
literal[string] %( identifier[target] , identifier[starname] [ literal[int] ])]
keyword[if] identifier[key] == literal[string] :
keyword[if] identifier[value] [ literal[int] ]. identifier[startswith] ( identifier[comment1] ):
identifier[value] [ literal[int] ]= identifier[value] [ literal[int] ]+ literal[string] + literal[string] . identifier[join] ( identifier[tex1] [ literal[int] :])
identifier[_cleveref_tex_flag] = keyword[False]
keyword[elif] identifier[key] != literal[string] :
identifier[_cleveref_tex_flag] = keyword[False]
identifier[ret] =[]
keyword[if] keyword[not] literal[string] keyword[in] identifier[meta] keyword[or] identifier[check_bool] ( identifier[get_meta] ( identifier[meta] , literal[string] )):
identifier[tex2] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] literal[string] ,
literal[string] literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ]
identifier[ret] . identifier[append] ( identifier[RawBlock] ( literal[string] , literal[string] . identifier[join] ( identifier[tex2] )))
identifier[ret] . identifier[append] ( identifier[RawBlock] ( literal[string] , literal[string] . identifier[join] ( identifier[tex1] )))
keyword[return] identifier[ret]
keyword[return] keyword[None]
keyword[def] identifier[_cite_replacement] ( identifier[key] , identifier[value] , identifier[fmt] , identifier[meta] ):
literal[string]
keyword[assert] identifier[key] == literal[string]
identifier[attrs] , identifier[label] = identifier[value] [ literal[int] ], identifier[_get_label] ( identifier[key] , identifier[value] )
identifier[attrs] = identifier[PandocAttributes] ( identifier[attrs] , literal[string] )
keyword[assert] identifier[label] keyword[in] identifier[references]
identifier[text] = identifier[str] ( identifier[references] [ identifier[label] ])
identifier[use_cleveref] = identifier[attrs] [ literal[string] ] keyword[in] [ literal[string] , literal[string] ] keyword[if] literal[string] keyword[in] identifier[attrs] . identifier[kvs] keyword[else] identifier[use_cleveref_default]
identifier[plus] = identifier[attrs] [ literal[string] ]== literal[string] keyword[if] literal[string] keyword[in] identifier[attrs] . identifier[kvs] keyword[else] identifier[use_cleveref_default]
identifier[name] = identifier[plusname] [ literal[int] ] keyword[if] identifier[plus] keyword[else] identifier[starname] [ literal[int] ]
keyword[if] identifier[fmt] == literal[string] :
keyword[if] identifier[use_cleveref] :
keyword[if] keyword[not] literal[string] keyword[in] identifier[meta] keyword[or] identifier[check_bool] ( identifier[get_meta] ( identifier[meta] , literal[string] )):
identifier[faketex] =( literal[string] keyword[if] identifier[plus] keyword[else] literal[string] )+ literal[string] % identifier[name]
keyword[else] :
identifier[faketex] = literal[string]
identifier[macro] = literal[string] keyword[if] identifier[plus] keyword[else] literal[string]
identifier[ret] = identifier[RawInline] ( literal[string] , literal[string] %( identifier[faketex] , identifier[macro] , identifier[label] ))
keyword[elif] identifier[use_eqref] :
identifier[ret] = identifier[RawInline] ( literal[string] , literal[string] % identifier[label] )
keyword[else] :
identifier[ret] = identifier[RawInline] ( literal[string] , literal[string] % identifier[label] )
keyword[else] :
keyword[if] identifier[use_eqref] :
identifier[text] = literal[string] + identifier[text] + literal[string]
identifier[linktext] =[ identifier[Math] ({ literal[string] : literal[string] , literal[string] :[]}, identifier[text] [ literal[int] :- literal[int] ]) keyword[if] identifier[text] . identifier[startswith] ( literal[string] ) keyword[and] identifier[text] . identifier[endswith] ( literal[string] ) keyword[else] identifier[Str] ( identifier[text] )]
identifier[link] = identifier[elt] ( literal[string] , literal[int] )( identifier[linktext] ,[ literal[string] % identifier[label] , literal[string] ]) keyword[if] identifier[_PANDOCVERSION] < literal[string] keyword[else] identifier[Link] ([ literal[string] ,[],[]], identifier[linktext] ,[ literal[string] % identifier[label] , literal[string] ])
identifier[ret] =([ identifier[Str] ( identifier[name] ), identifier[Space] ()] keyword[if] identifier[use_cleveref] keyword[else] [])+[ identifier[link] ]
keyword[return] identifier[ret]
keyword[def] identifier[replace_refs] ( identifier[key] , identifier[value] , identifier[fmt] , identifier[meta] ):
literal[string]
keyword[if] identifier[fmt] == literal[string] keyword[and] identifier[_cleveref_tex_flag] :
keyword[if] keyword[not] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ]:
keyword[return] keyword[None]
identifier[el] = identifier[_getel] ( identifier[key] , identifier[value] )
identifier[tex] = identifier[_insert_cleveref_fakery] ( identifier[key] , identifier[value] , identifier[meta] )
keyword[if] identifier[tex] :
keyword[return] identifier[tex] +[ identifier[el] ]
keyword[elif] identifier[key] == literal[string] keyword[and] identifier[len] ( identifier[value] )== literal[int] :
keyword[return] identifier[_cite_replacement] ( identifier[key] , identifier[value] , identifier[fmt] , identifier[meta] )
keyword[return] keyword[None]
keyword[return] identifier[replace_refs] | def replace_refs_factory(references, use_cleveref_default, use_eqref, plusname, starname, target):
"""Returns replace_refs(key, value, fmt, meta) action that replaces
references with format-specific content. The content is determined using
the 'references' dict, which associates reference labels with numbers or
string tags (e.g., { 'fig:1':1, 'fig:2':2, ...}). If 'use_cleveref_default'
is True, or if "modifier" in the reference's attributes is "+" or "*", then
clever referencing is used; i.e., a name is placed in front of the number
or string tag. The 'plusname' and 'starname' lists give the singular
and plural names for "+" and "*" clever references, respectively. The
'target' is the LaTeX type for clever referencing (e.g., "figure",
"equation", "table", ...)."""
global _cleveref_tex_flag # pylint: disable=global-statement
# Update global if clever referencing is required by default
_cleveref_tex_flag = _cleveref_tex_flag or use_cleveref_default
def _insert_cleveref_fakery(key, value, meta):
"""Inserts TeX to support clever referencing in LaTeX documents
if the key isn't a RawBlock. If the key is a RawBlock, then check
the value to see if the TeX was already inserted.
The \\providecommand macro is used to fake the cleveref package's
behaviour if it is not provided in the template via
\\usepackage{cleveref}.
TeX is inserted into the value. Replacement elements are returned.
"""
global _cleveref_tex_flag # pylint: disable=global-statement
comment1 = '% pandoc-xnos: cleveref formatting'
tex1 = [comment1, '\\crefformat{%s}{%s~#2#1#3}' % (target, plusname[0]), '\\Crefformat{%s}{%s~#2#1#3}' % (target, starname[0])]
if key == 'RawBlock': # Check for existing cleveref TeX
if value[1].startswith(comment1):
# Append the new portion
value[1] = value[1] + '\n' + '\n'.join(tex1[1:])
_cleveref_tex_flag = False # Cleveref fakery already installed # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif key != 'RawBlock': # Write the cleveref TeX
_cleveref_tex_flag = False # Cancels further attempts
ret = []
# Check first to see if fakery is turned off
if not 'xnos-cleveref-fake' in meta or check_bool(get_meta(meta, 'xnos-cleveref-fake')):
# Cleveref fakery
tex2 = ['% pandoc-xnos: cleveref fakery', '\\newcommand{\\plusnamesingular}{}', '\\newcommand{\\starnamesingular}{}', '\\newcommand{\\xrefname}[1]{\\protect\\renewcommand{\\plusnamesingular}{#1}}', '\\newcommand{\\Xrefname}[1]{\\protect\\renewcommand{\\starnamesingular}{#1}}', '\\providecommand{\\cref}{\\plusnamesingular~\\ref}', '\\providecommand{\\Cref}{\\starnamesingular~\\ref}', '\\providecommand{\\crefformat}[2]{}', '\\providecommand{\\Crefformat}[2]{}']
ret.append(RawBlock('tex', '\n'.join(tex2))) # depends on [control=['if'], data=[]]
ret.append(RawBlock('tex', '\n'.join(tex1)))
return ret # depends on [control=['if'], data=[]]
return None
def _cite_replacement(key, value, fmt, meta):
"""Returns context-dependent content to replace a Cite element."""
assert key == 'Cite'
(attrs, label) = (value[0], _get_label(key, value))
attrs = PandocAttributes(attrs, 'pandoc')
assert label in references
# Get the replacement value
text = str(references[label])
# Choose between \Cref, \cref and \ref
use_cleveref = attrs['modifier'] in ['*', '+'] if 'modifier' in attrs.kvs else use_cleveref_default
plus = attrs['modifier'] == '+' if 'modifier' in attrs.kvs else use_cleveref_default
name = plusname[0] if plus else starname[0] # Name used by cref
# The replacement depends on the output format
if fmt == 'latex':
if use_cleveref:
# Renew commands needed for cleveref fakery
if not 'xnos-cleveref-fake' in meta or check_bool(get_meta(meta, 'xnos-cleveref-fake')):
faketex = ('\\xrefname' if plus else '\\Xrefname') + '{%s}' % name # depends on [control=['if'], data=[]]
else:
faketex = ''
macro = '\\cref' if plus else '\\Cref'
ret = RawInline('tex', '%s%s{%s}' % (faketex, macro, label)) # depends on [control=['if'], data=[]]
elif use_eqref:
ret = RawInline('tex', '\\eqref{%s}' % label) # depends on [control=['if'], data=[]]
else:
ret = RawInline('tex', '\\ref{%s}' % label) # depends on [control=['if'], data=[]]
else:
if use_eqref:
text = '(' + text + ')' # depends on [control=['if'], data=[]]
linktext = [Math({'t': 'InlineMath', 'c': []}, text[1:-1]) if text.startswith('$') and text.endswith('$') else Str(text)]
link = elt('Link', 2)(linktext, ['#%s' % label, '']) if _PANDOCVERSION < '1.16' else Link(['', [], []], linktext, ['#%s' % label, ''])
ret = ([Str(name), Space()] if use_cleveref else []) + [link]
return ret
def replace_refs(key, value, fmt, meta): # pylint: disable=unused-argument
'Replaces references with format-specific content.'
if fmt == 'latex' and _cleveref_tex_flag:
# Put the cleveref TeX fakery in front of the first block element
# that isn't a RawBlock.
if not key in ['Plain', 'Para', 'CodeBlock', 'RawBlock', 'BlockQuote', 'OrderedList', 'BulletList', 'DefinitionList', 'Header', 'HorizontalRule', 'Table', 'Div', 'Null']:
return None # depends on [control=['if'], data=[]]
# Reconstruct the block element
el = _getel(key, value)
# Insert cleveref TeX in front of the block element
tex = _insert_cleveref_fakery(key, value, meta)
if tex:
return tex + [el] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif key == 'Cite' and len(value) == 3: # Replace the reference
return _cite_replacement(key, value, fmt, meta) # depends on [control=['if'], data=[]]
return None
return replace_refs |
def sel_entries(self):
"""Generator which returns all SEL entries."""
ENTIRE_RECORD = 0xff
rsp = self.send_message_with_name('GetSelInfo')
if rsp.entries == 0:
return
reservation_id = self.get_sel_reservation_id()
next_record_id = 0
while True:
req = create_request_by_name('GetSelEntry')
req.reservation_id = reservation_id
req.record_id = next_record_id
req.offset = 0
self.max_req_len = ENTIRE_RECORD
record_data = ByteBuffer()
while True:
req.length = self.max_req_len
if (self.max_req_len != 0xff
and (req.offset + req.length) > 16):
req.length = 16 - req.offset
rsp = self.send_message(req)
if rsp.completion_code == constants.CC_CANT_RET_NUM_REQ_BYTES:
if self.max_req_len == 0xff:
self.max_req_len = 16
else:
self.max_req_len -= 1
continue
else:
check_completion_code(rsp.completion_code)
record_data.extend(rsp.record_data)
req.offset = len(record_data)
if len(record_data) >= 16:
break
next_record_id = rsp.next_record_id
yield SelEntry(record_data)
if next_record_id == 0xffff:
break | def function[sel_entries, parameter[self]]:
constant[Generator which returns all SEL entries.]
variable[ENTIRE_RECORD] assign[=] constant[255]
variable[rsp] assign[=] call[name[self].send_message_with_name, parameter[constant[GetSelInfo]]]
if compare[name[rsp].entries equal[==] constant[0]] begin[:]
return[None]
variable[reservation_id] assign[=] call[name[self].get_sel_reservation_id, parameter[]]
variable[next_record_id] assign[=] constant[0]
while constant[True] begin[:]
variable[req] assign[=] call[name[create_request_by_name], parameter[constant[GetSelEntry]]]
name[req].reservation_id assign[=] name[reservation_id]
name[req].record_id assign[=] name[next_record_id]
name[req].offset assign[=] constant[0]
name[self].max_req_len assign[=] name[ENTIRE_RECORD]
variable[record_data] assign[=] call[name[ByteBuffer], parameter[]]
while constant[True] begin[:]
name[req].length assign[=] name[self].max_req_len
if <ast.BoolOp object at 0x7da1b07784c0> begin[:]
name[req].length assign[=] binary_operation[constant[16] - name[req].offset]
variable[rsp] assign[=] call[name[self].send_message, parameter[name[req]]]
if compare[name[rsp].completion_code equal[==] name[constants].CC_CANT_RET_NUM_REQ_BYTES] begin[:]
if compare[name[self].max_req_len equal[==] constant[255]] begin[:]
name[self].max_req_len assign[=] constant[16]
continue
call[name[record_data].extend, parameter[name[rsp].record_data]]
name[req].offset assign[=] call[name[len], parameter[name[record_data]]]
if compare[call[name[len], parameter[name[record_data]]] greater_or_equal[>=] constant[16]] begin[:]
break
variable[next_record_id] assign[=] name[rsp].next_record_id
<ast.Yield object at 0x7da1b077b460>
if compare[name[next_record_id] equal[==] constant[65535]] begin[:]
break | keyword[def] identifier[sel_entries] ( identifier[self] ):
literal[string]
identifier[ENTIRE_RECORD] = literal[int]
identifier[rsp] = identifier[self] . identifier[send_message_with_name] ( literal[string] )
keyword[if] identifier[rsp] . identifier[entries] == literal[int] :
keyword[return]
identifier[reservation_id] = identifier[self] . identifier[get_sel_reservation_id] ()
identifier[next_record_id] = literal[int]
keyword[while] keyword[True] :
identifier[req] = identifier[create_request_by_name] ( literal[string] )
identifier[req] . identifier[reservation_id] = identifier[reservation_id]
identifier[req] . identifier[record_id] = identifier[next_record_id]
identifier[req] . identifier[offset] = literal[int]
identifier[self] . identifier[max_req_len] = identifier[ENTIRE_RECORD]
identifier[record_data] = identifier[ByteBuffer] ()
keyword[while] keyword[True] :
identifier[req] . identifier[length] = identifier[self] . identifier[max_req_len]
keyword[if] ( identifier[self] . identifier[max_req_len] != literal[int]
keyword[and] ( identifier[req] . identifier[offset] + identifier[req] . identifier[length] )> literal[int] ):
identifier[req] . identifier[length] = literal[int] - identifier[req] . identifier[offset]
identifier[rsp] = identifier[self] . identifier[send_message] ( identifier[req] )
keyword[if] identifier[rsp] . identifier[completion_code] == identifier[constants] . identifier[CC_CANT_RET_NUM_REQ_BYTES] :
keyword[if] identifier[self] . identifier[max_req_len] == literal[int] :
identifier[self] . identifier[max_req_len] = literal[int]
keyword[else] :
identifier[self] . identifier[max_req_len] -= literal[int]
keyword[continue]
keyword[else] :
identifier[check_completion_code] ( identifier[rsp] . identifier[completion_code] )
identifier[record_data] . identifier[extend] ( identifier[rsp] . identifier[record_data] )
identifier[req] . identifier[offset] = identifier[len] ( identifier[record_data] )
keyword[if] identifier[len] ( identifier[record_data] )>= literal[int] :
keyword[break]
identifier[next_record_id] = identifier[rsp] . identifier[next_record_id]
keyword[yield] identifier[SelEntry] ( identifier[record_data] )
keyword[if] identifier[next_record_id] == literal[int] :
keyword[break] | def sel_entries(self):
"""Generator which returns all SEL entries."""
ENTIRE_RECORD = 255
rsp = self.send_message_with_name('GetSelInfo')
if rsp.entries == 0:
return # depends on [control=['if'], data=[]]
reservation_id = self.get_sel_reservation_id()
next_record_id = 0
while True:
req = create_request_by_name('GetSelEntry')
req.reservation_id = reservation_id
req.record_id = next_record_id
req.offset = 0
self.max_req_len = ENTIRE_RECORD
record_data = ByteBuffer()
while True:
req.length = self.max_req_len
if self.max_req_len != 255 and req.offset + req.length > 16:
req.length = 16 - req.offset # depends on [control=['if'], data=[]]
rsp = self.send_message(req)
if rsp.completion_code == constants.CC_CANT_RET_NUM_REQ_BYTES:
if self.max_req_len == 255:
self.max_req_len = 16 # depends on [control=['if'], data=[]]
else:
self.max_req_len -= 1
continue # depends on [control=['if'], data=[]]
else:
check_completion_code(rsp.completion_code)
record_data.extend(rsp.record_data)
req.offset = len(record_data)
if len(record_data) >= 16:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
next_record_id = rsp.next_record_id
yield SelEntry(record_data)
if next_record_id == 65535:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def set_token(self, token):
"""
Set token in authentification for next requests
:param token: str. token to set in auth. If None, reinit auth
"""
if token:
auth = HTTPBasicAuth(token, '')
self._token = token
self.authenticated = True # TODO: Remove this parameter
self.session.auth = auth
logger.debug("Using session token: %s", token)
else:
self._token = None
self.authenticated = False
self.session.auth = None
logger.debug("Session token/auth reinitialised") | def function[set_token, parameter[self, token]]:
constant[
Set token in authentification for next requests
:param token: str. token to set in auth. If None, reinit auth
]
if name[token] begin[:]
variable[auth] assign[=] call[name[HTTPBasicAuth], parameter[name[token], constant[]]]
name[self]._token assign[=] name[token]
name[self].authenticated assign[=] constant[True]
name[self].session.auth assign[=] name[auth]
call[name[logger].debug, parameter[constant[Using session token: %s], name[token]]] | keyword[def] identifier[set_token] ( identifier[self] , identifier[token] ):
literal[string]
keyword[if] identifier[token] :
identifier[auth] = identifier[HTTPBasicAuth] ( identifier[token] , literal[string] )
identifier[self] . identifier[_token] = identifier[token]
identifier[self] . identifier[authenticated] = keyword[True]
identifier[self] . identifier[session] . identifier[auth] = identifier[auth]
identifier[logger] . identifier[debug] ( literal[string] , identifier[token] )
keyword[else] :
identifier[self] . identifier[_token] = keyword[None]
identifier[self] . identifier[authenticated] = keyword[False]
identifier[self] . identifier[session] . identifier[auth] = keyword[None]
identifier[logger] . identifier[debug] ( literal[string] ) | def set_token(self, token):
"""
Set token in authentification for next requests
:param token: str. token to set in auth. If None, reinit auth
"""
if token:
auth = HTTPBasicAuth(token, '')
self._token = token
self.authenticated = True # TODO: Remove this parameter
self.session.auth = auth
logger.debug('Using session token: %s', token) # depends on [control=['if'], data=[]]
else:
self._token = None
self.authenticated = False
self.session.auth = None
logger.debug('Session token/auth reinitialised') |
def pay(self, predecessor):
"""If the predecessor is not None, gives the appropriate amount of
payoff to the predecessor in payment for its contribution to this
match set's expected future payoff. The predecessor argument should
be either None or a MatchSet instance whose selected action led
directly to this match set's situation.
Usage:
match_set = model.match(situation)
match_set.pay(previous_match_set)
Arguments:
predecessor: The MatchSet instance which was produced by the
same classifier set in response to the immediately
preceding situation, or None if this is the first situation
in the scenario.
Return: None
"""
assert predecessor is None or isinstance(predecessor, MatchSet)
if predecessor is not None:
expectation = self._algorithm.get_future_expectation(self)
predecessor.payoff += expectation | def function[pay, parameter[self, predecessor]]:
constant[If the predecessor is not None, gives the appropriate amount of
payoff to the predecessor in payment for its contribution to this
match set's expected future payoff. The predecessor argument should
be either None or a MatchSet instance whose selected action led
directly to this match set's situation.
Usage:
match_set = model.match(situation)
match_set.pay(previous_match_set)
Arguments:
predecessor: The MatchSet instance which was produced by the
same classifier set in response to the immediately
preceding situation, or None if this is the first situation
in the scenario.
Return: None
]
assert[<ast.BoolOp object at 0x7da1b0f5aaa0>]
if compare[name[predecessor] is_not constant[None]] begin[:]
variable[expectation] assign[=] call[name[self]._algorithm.get_future_expectation, parameter[name[self]]]
<ast.AugAssign object at 0x7da1b0fe9e70> | keyword[def] identifier[pay] ( identifier[self] , identifier[predecessor] ):
literal[string]
keyword[assert] identifier[predecessor] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[predecessor] , identifier[MatchSet] )
keyword[if] identifier[predecessor] keyword[is] keyword[not] keyword[None] :
identifier[expectation] = identifier[self] . identifier[_algorithm] . identifier[get_future_expectation] ( identifier[self] )
identifier[predecessor] . identifier[payoff] += identifier[expectation] | def pay(self, predecessor):
"""If the predecessor is not None, gives the appropriate amount of
payoff to the predecessor in payment for its contribution to this
match set's expected future payoff. The predecessor argument should
be either None or a MatchSet instance whose selected action led
directly to this match set's situation.
Usage:
match_set = model.match(situation)
match_set.pay(previous_match_set)
Arguments:
predecessor: The MatchSet instance which was produced by the
same classifier set in response to the immediately
preceding situation, or None if this is the first situation
in the scenario.
Return: None
"""
assert predecessor is None or isinstance(predecessor, MatchSet)
if predecessor is not None:
expectation = self._algorithm.get_future_expectation(self)
predecessor.payoff += expectation # depends on [control=['if'], data=['predecessor']] |
def open(cls, filename, crs=None):
"""Creates a FileCollection from a file in disk.
Parameters
----------
filename : str
Path of the file to read.
crs : CRS
overrides the crs of the collection, this funtion will not reprojects
"""
with fiona.Env():
with fiona.open(filename, 'r') as source:
original_crs = CRS(source.crs)
schema = source.schema
length = len(source)
crs = crs or original_crs
ret_val = cls(filename, crs, schema, length)
return ret_val | def function[open, parameter[cls, filename, crs]]:
constant[Creates a FileCollection from a file in disk.
Parameters
----------
filename : str
Path of the file to read.
crs : CRS
overrides the crs of the collection, this funtion will not reprojects
]
with call[name[fiona].Env, parameter[]] begin[:]
with call[name[fiona].open, parameter[name[filename], constant[r]]] begin[:]
variable[original_crs] assign[=] call[name[CRS], parameter[name[source].crs]]
variable[schema] assign[=] name[source].schema
variable[length] assign[=] call[name[len], parameter[name[source]]]
variable[crs] assign[=] <ast.BoolOp object at 0x7da18f09ce20>
variable[ret_val] assign[=] call[name[cls], parameter[name[filename], name[crs], name[schema], name[length]]]
return[name[ret_val]] | keyword[def] identifier[open] ( identifier[cls] , identifier[filename] , identifier[crs] = keyword[None] ):
literal[string]
keyword[with] identifier[fiona] . identifier[Env] ():
keyword[with] identifier[fiona] . identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[source] :
identifier[original_crs] = identifier[CRS] ( identifier[source] . identifier[crs] )
identifier[schema] = identifier[source] . identifier[schema]
identifier[length] = identifier[len] ( identifier[source] )
identifier[crs] = identifier[crs] keyword[or] identifier[original_crs]
identifier[ret_val] = identifier[cls] ( identifier[filename] , identifier[crs] , identifier[schema] , identifier[length] )
keyword[return] identifier[ret_val] | def open(cls, filename, crs=None):
"""Creates a FileCollection from a file in disk.
Parameters
----------
filename : str
Path of the file to read.
crs : CRS
overrides the crs of the collection, this funtion will not reprojects
"""
with fiona.Env():
with fiona.open(filename, 'r') as source:
original_crs = CRS(source.crs)
schema = source.schema
length = len(source) # depends on [control=['with'], data=['source']] # depends on [control=['with'], data=[]]
crs = crs or original_crs
ret_val = cls(filename, crs, schema, length)
return ret_val |
def attach_vpngw(self, req, id, driver):
"""Attach network to VPN gateway
:Param req
:Type object Request
"""
vpngw = driver.get_vnpgw(req.params, id)
if vpngw is None:
vpngw = driver.create_vpngw(req.params, id)
response = driver.attach_vpngw(req.params, vpngw)
data = {
'action': 'attach_igw',
'controller': 'network',
'id': id,
'cloud': req.environ['calplus.cloud'],
'response': response
}
return data | def function[attach_vpngw, parameter[self, req, id, driver]]:
constant[Attach network to VPN gateway
:Param req
:Type object Request
]
variable[vpngw] assign[=] call[name[driver].get_vnpgw, parameter[name[req].params, name[id]]]
if compare[name[vpngw] is constant[None]] begin[:]
variable[vpngw] assign[=] call[name[driver].create_vpngw, parameter[name[req].params, name[id]]]
variable[response] assign[=] call[name[driver].attach_vpngw, parameter[name[req].params, name[vpngw]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b2876680>, <ast.Constant object at 0x7da1b2877c40>, <ast.Constant object at 0x7da1b2877e80>, <ast.Constant object at 0x7da1b2875bd0>, <ast.Constant object at 0x7da1b2877eb0>], [<ast.Constant object at 0x7da1b2876f20>, <ast.Constant object at 0x7da1b28769b0>, <ast.Name object at 0x7da1b28767d0>, <ast.Subscript object at 0x7da1b2876d40>, <ast.Name object at 0x7da1b2874a90>]]
return[name[data]] | keyword[def] identifier[attach_vpngw] ( identifier[self] , identifier[req] , identifier[id] , identifier[driver] ):
literal[string]
identifier[vpngw] = identifier[driver] . identifier[get_vnpgw] ( identifier[req] . identifier[params] , identifier[id] )
keyword[if] identifier[vpngw] keyword[is] keyword[None] :
identifier[vpngw] = identifier[driver] . identifier[create_vpngw] ( identifier[req] . identifier[params] , identifier[id] )
identifier[response] = identifier[driver] . identifier[attach_vpngw] ( identifier[req] . identifier[params] , identifier[vpngw] )
identifier[data] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[id] ,
literal[string] : identifier[req] . identifier[environ] [ literal[string] ],
literal[string] : identifier[response]
}
keyword[return] identifier[data] | def attach_vpngw(self, req, id, driver):
"""Attach network to VPN gateway
:Param req
:Type object Request
"""
vpngw = driver.get_vnpgw(req.params, id)
if vpngw is None:
vpngw = driver.create_vpngw(req.params, id) # depends on [control=['if'], data=['vpngw']]
response = driver.attach_vpngw(req.params, vpngw)
data = {'action': 'attach_igw', 'controller': 'network', 'id': id, 'cloud': req.environ['calplus.cloud'], 'response': response}
return data |
def _delete_nodes(self, features):
""" Removes the node corresponding to each item in 'features'.
"""
graph = self._graph
if graph is not None:
for feature in features:
graph.delete_node( id(feature) )
graph.arrange_all() | def function[_delete_nodes, parameter[self, features]]:
constant[ Removes the node corresponding to each item in 'features'.
]
variable[graph] assign[=] name[self]._graph
if compare[name[graph] is_not constant[None]] begin[:]
for taget[name[feature]] in starred[name[features]] begin[:]
call[name[graph].delete_node, parameter[call[name[id], parameter[name[feature]]]]]
call[name[graph].arrange_all, parameter[]] | keyword[def] identifier[_delete_nodes] ( identifier[self] , identifier[features] ):
literal[string]
identifier[graph] = identifier[self] . identifier[_graph]
keyword[if] identifier[graph] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[feature] keyword[in] identifier[features] :
identifier[graph] . identifier[delete_node] ( identifier[id] ( identifier[feature] ))
identifier[graph] . identifier[arrange_all] () | def _delete_nodes(self, features):
""" Removes the node corresponding to each item in 'features'.
"""
graph = self._graph
if graph is not None:
for feature in features:
graph.delete_node(id(feature)) # depends on [control=['for'], data=['feature']] # depends on [control=['if'], data=['graph']]
graph.arrange_all() |
def set_input(self, input_id):
"""Send Input command."""
req_url = ENDPOINTS["setInput"].format(self.ip_address, self.zone_id)
params = {"input": input_id}
return request(req_url, params=params) | def function[set_input, parameter[self, input_id]]:
constant[Send Input command.]
variable[req_url] assign[=] call[call[name[ENDPOINTS]][constant[setInput]].format, parameter[name[self].ip_address, name[self].zone_id]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b24ada50>], [<ast.Name object at 0x7da1b24ad810>]]
return[call[name[request], parameter[name[req_url]]]] | keyword[def] identifier[set_input] ( identifier[self] , identifier[input_id] ):
literal[string]
identifier[req_url] = identifier[ENDPOINTS] [ literal[string] ]. identifier[format] ( identifier[self] . identifier[ip_address] , identifier[self] . identifier[zone_id] )
identifier[params] ={ literal[string] : identifier[input_id] }
keyword[return] identifier[request] ( identifier[req_url] , identifier[params] = identifier[params] ) | def set_input(self, input_id):
"""Send Input command."""
req_url = ENDPOINTS['setInput'].format(self.ip_address, self.zone_id)
params = {'input': input_id}
return request(req_url, params=params) |
def actual_causation():
"""The actual causation example network, consisting of an ``OR`` and
``AND`` gate with self-loops.
"""
tpm = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]
])
cm = np.array([
[1, 1],
[1, 1]
])
return Network(tpm, cm, node_labels=('OR', 'AND')) | def function[actual_causation, parameter[]]:
constant[The actual causation example network, consisting of an ``OR`` and
``AND`` gate with self-loops.
]
variable[tpm] assign[=] call[name[np].array, parameter[list[[<ast.List object at 0x7da18c4ce890>, <ast.List object at 0x7da18c4cd1e0>, <ast.List object at 0x7da18c4ce050>, <ast.List object at 0x7da18c4cf0a0>]]]]
variable[cm] assign[=] call[name[np].array, parameter[list[[<ast.List object at 0x7da18c4ce290>, <ast.List object at 0x7da18c4cd1b0>]]]]
return[call[name[Network], parameter[name[tpm], name[cm]]]] | keyword[def] identifier[actual_causation] ():
literal[string]
identifier[tpm] = identifier[np] . identifier[array] ([
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ]
])
identifier[cm] = identifier[np] . identifier[array] ([
[ literal[int] , literal[int] ],
[ literal[int] , literal[int] ]
])
keyword[return] identifier[Network] ( identifier[tpm] , identifier[cm] , identifier[node_labels] =( literal[string] , literal[string] )) | def actual_causation():
"""The actual causation example network, consisting of an ``OR`` and
``AND`` gate with self-loops.
"""
tpm = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
cm = np.array([[1, 1], [1, 1]])
return Network(tpm, cm, node_labels=('OR', 'AND')) |
def get_vcl(self, service_id, version_number, name, include_content=True):
"""Get the uploaded VCL for a particular service and version."""
content = self._fetch("/service/%s/version/%d/vcl/%s?include_content=%d" % (service_id, version_number, name, int(include_content)))
return FastlyVCL(self, content) | def function[get_vcl, parameter[self, service_id, version_number, name, include_content]]:
constant[Get the uploaded VCL for a particular service and version.]
variable[content] assign[=] call[name[self]._fetch, parameter[binary_operation[constant[/service/%s/version/%d/vcl/%s?include_content=%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0f434c0>, <ast.Name object at 0x7da1b0f40fd0>, <ast.Name object at 0x7da1b0f41f90>, <ast.Call object at 0x7da1b0f404c0>]]]]]
return[call[name[FastlyVCL], parameter[name[self], name[content]]]] | keyword[def] identifier[get_vcl] ( identifier[self] , identifier[service_id] , identifier[version_number] , identifier[name] , identifier[include_content] = keyword[True] ):
literal[string]
identifier[content] = identifier[self] . identifier[_fetch] ( literal[string] %( identifier[service_id] , identifier[version_number] , identifier[name] , identifier[int] ( identifier[include_content] )))
keyword[return] identifier[FastlyVCL] ( identifier[self] , identifier[content] ) | def get_vcl(self, service_id, version_number, name, include_content=True):
"""Get the uploaded VCL for a particular service and version."""
content = self._fetch('/service/%s/version/%d/vcl/%s?include_content=%d' % (service_id, version_number, name, int(include_content)))
return FastlyVCL(self, content) |
def send(client, sender, targets, msg_name, dest_name=None, block=None):
"""send a message from one to one-or-more engines."""
dest_name = msg_name if dest_name is None else dest_name
def _send(targets, m_name):
msg = globals()[m_name]
return com.send(targets, msg)
client[sender].apply_async(_send, targets, msg_name)
return client[targets].execute('%s=com.recv()'%dest_name, block=None) | def function[send, parameter[client, sender, targets, msg_name, dest_name, block]]:
constant[send a message from one to one-or-more engines.]
variable[dest_name] assign[=] <ast.IfExp object at 0x7da1b26ae470>
def function[_send, parameter[targets, m_name]]:
variable[msg] assign[=] call[call[name[globals], parameter[]]][name[m_name]]
return[call[name[com].send, parameter[name[targets], name[msg]]]]
call[call[name[client]][name[sender]].apply_async, parameter[name[_send], name[targets], name[msg_name]]]
return[call[call[name[client]][name[targets]].execute, parameter[binary_operation[constant[%s=com.recv()] <ast.Mod object at 0x7da2590d6920> name[dest_name]]]]] | keyword[def] identifier[send] ( identifier[client] , identifier[sender] , identifier[targets] , identifier[msg_name] , identifier[dest_name] = keyword[None] , identifier[block] = keyword[None] ):
literal[string]
identifier[dest_name] = identifier[msg_name] keyword[if] identifier[dest_name] keyword[is] keyword[None] keyword[else] identifier[dest_name]
keyword[def] identifier[_send] ( identifier[targets] , identifier[m_name] ):
identifier[msg] = identifier[globals] ()[ identifier[m_name] ]
keyword[return] identifier[com] . identifier[send] ( identifier[targets] , identifier[msg] )
identifier[client] [ identifier[sender] ]. identifier[apply_async] ( identifier[_send] , identifier[targets] , identifier[msg_name] )
keyword[return] identifier[client] [ identifier[targets] ]. identifier[execute] ( literal[string] % identifier[dest_name] , identifier[block] = keyword[None] ) | def send(client, sender, targets, msg_name, dest_name=None, block=None):
"""send a message from one to one-or-more engines."""
dest_name = msg_name if dest_name is None else dest_name
def _send(targets, m_name):
msg = globals()[m_name]
return com.send(targets, msg)
client[sender].apply_async(_send, targets, msg_name)
return client[targets].execute('%s=com.recv()' % dest_name, block=None) |
def main(log_files):
""" Main executor of the trimmomatic_report template.
Parameters
----------
log_files : list
List of paths to the trimmomatic log files.
"""
log_storage = OrderedDict()
for log in log_files:
log_id = log.rstrip("_trimlog.txt")
# Populate storage of current sample
log_storage[log_id] = parse_log(log)
# Remove temporary trim log file
os.remove(log)
write_report(log_storage, "trimmomatic_report.csv", log_id) | def function[main, parameter[log_files]]:
constant[ Main executor of the trimmomatic_report template.
Parameters
----------
log_files : list
List of paths to the trimmomatic log files.
]
variable[log_storage] assign[=] call[name[OrderedDict], parameter[]]
for taget[name[log]] in starred[name[log_files]] begin[:]
variable[log_id] assign[=] call[name[log].rstrip, parameter[constant[_trimlog.txt]]]
call[name[log_storage]][name[log_id]] assign[=] call[name[parse_log], parameter[name[log]]]
call[name[os].remove, parameter[name[log]]]
call[name[write_report], parameter[name[log_storage], constant[trimmomatic_report.csv], name[log_id]]] | keyword[def] identifier[main] ( identifier[log_files] ):
literal[string]
identifier[log_storage] = identifier[OrderedDict] ()
keyword[for] identifier[log] keyword[in] identifier[log_files] :
identifier[log_id] = identifier[log] . identifier[rstrip] ( literal[string] )
identifier[log_storage] [ identifier[log_id] ]= identifier[parse_log] ( identifier[log] )
identifier[os] . identifier[remove] ( identifier[log] )
identifier[write_report] ( identifier[log_storage] , literal[string] , identifier[log_id] ) | def main(log_files):
""" Main executor of the trimmomatic_report template.
Parameters
----------
log_files : list
List of paths to the trimmomatic log files.
"""
log_storage = OrderedDict()
for log in log_files:
log_id = log.rstrip('_trimlog.txt')
# Populate storage of current sample
log_storage[log_id] = parse_log(log)
# Remove temporary trim log file
os.remove(log) # depends on [control=['for'], data=['log']]
write_report(log_storage, 'trimmomatic_report.csv', log_id) |
def initialize():
"""
Initializes the cauldron library by confirming that it can be imported
by the importlib library. If the attempt to import it fails, the system
path will be modified and the attempt retried. If both attempts fail, an
import error will be raised.
"""
cauldron_module = get_cauldron_module()
if cauldron_module is not None:
return cauldron_module
sys.path.append(ROOT_DIRECTORY)
cauldron_module = get_cauldron_module()
if cauldron_module is not None:
return cauldron_module
raise ImportError(' '.join((
'Unable to import cauldron.'
'The package was not installed in a known location.'
))) | def function[initialize, parameter[]]:
constant[
Initializes the cauldron library by confirming that it can be imported
by the importlib library. If the attempt to import it fails, the system
path will be modified and the attempt retried. If both attempts fail, an
import error will be raised.
]
variable[cauldron_module] assign[=] call[name[get_cauldron_module], parameter[]]
if compare[name[cauldron_module] is_not constant[None]] begin[:]
return[name[cauldron_module]]
call[name[sys].path.append, parameter[name[ROOT_DIRECTORY]]]
variable[cauldron_module] assign[=] call[name[get_cauldron_module], parameter[]]
if compare[name[cauldron_module] is_not constant[None]] begin[:]
return[name[cauldron_module]]
<ast.Raise object at 0x7da20cabda80> | keyword[def] identifier[initialize] ():
literal[string]
identifier[cauldron_module] = identifier[get_cauldron_module] ()
keyword[if] identifier[cauldron_module] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[cauldron_module]
identifier[sys] . identifier[path] . identifier[append] ( identifier[ROOT_DIRECTORY] )
identifier[cauldron_module] = identifier[get_cauldron_module] ()
keyword[if] identifier[cauldron_module] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[cauldron_module]
keyword[raise] identifier[ImportError] ( literal[string] . identifier[join] ((
literal[string]
literal[string]
))) | def initialize():
"""
Initializes the cauldron library by confirming that it can be imported
by the importlib library. If the attempt to import it fails, the system
path will be modified and the attempt retried. If both attempts fail, an
import error will be raised.
"""
cauldron_module = get_cauldron_module()
if cauldron_module is not None:
return cauldron_module # depends on [control=['if'], data=['cauldron_module']]
sys.path.append(ROOT_DIRECTORY)
cauldron_module = get_cauldron_module()
if cauldron_module is not None:
return cauldron_module # depends on [control=['if'], data=['cauldron_module']]
raise ImportError(' '.join('Unable to import cauldron.The package was not installed in a known location.')) |
def simxGetOutMessageInfo(clientID, infoType):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
info = ct.c_int()
return c_GetOutMessageInfo(clientID, infoType, ct.byref(info)), info.value | def function[simxGetOutMessageInfo, parameter[clientID, infoType]]:
constant[
Please have a look at the function description/documentation in the V-REP user manual
]
variable[info] assign[=] call[name[ct].c_int, parameter[]]
return[tuple[[<ast.Call object at 0x7da1b133fa90>, <ast.Attribute object at 0x7da1b133cd90>]]] | keyword[def] identifier[simxGetOutMessageInfo] ( identifier[clientID] , identifier[infoType] ):
literal[string]
identifier[info] = identifier[ct] . identifier[c_int] ()
keyword[return] identifier[c_GetOutMessageInfo] ( identifier[clientID] , identifier[infoType] , identifier[ct] . identifier[byref] ( identifier[info] )), identifier[info] . identifier[value] | def simxGetOutMessageInfo(clientID, infoType):
"""
Please have a look at the function description/documentation in the V-REP user manual
"""
info = ct.c_int()
return (c_GetOutMessageInfo(clientID, infoType, ct.byref(info)), info.value) |
def fetch_messages(self):
"""Sends FetchRequests for all topic/partitions set for consumption
Returns:
Generator that yields KafkaMessage structs
after deserializing with the configured `deserializer_class`
Note:
Refreshes metadata on errors, and resets fetch offset on
OffsetOutOfRange, per the configured `auto_offset_reset` policy
See Also:
Key KafkaConsumer configuration parameters:
* `fetch_message_max_bytes`
* `fetch_max_wait_ms`
* `fetch_min_bytes`
* `deserializer_class`
* `auto_offset_reset`
"""
max_bytes = self._config['fetch_message_max_bytes']
max_wait_time = self._config['fetch_wait_max_ms']
min_bytes = self._config['fetch_min_bytes']
if not self._topics:
raise KafkaConfigurationError('No topics or partitions configured')
if not self._offsets.fetch:
raise KafkaConfigurationError(
'No fetch offsets found when calling fetch_messages'
)
fetches = [FetchRequest(topic, partition,
self._offsets.fetch[(topic, partition)],
max_bytes)
for (topic, partition) in self._topics]
# send_fetch_request will batch topic/partition requests by leader
responses = self._client.send_fetch_request(
fetches,
max_wait_time=max_wait_time,
min_bytes=min_bytes,
fail_on_error=False
)
for resp in responses:
if isinstance(resp, FailedPayloadsError):
logger.warning('FailedPayloadsError attempting to fetch data')
self._refresh_metadata_on_error()
continue
topic = kafka_bytestring(resp.topic)
partition = resp.partition
try:
check_error(resp)
except OffsetOutOfRangeError:
logger.warning('OffsetOutOfRange: topic %s, partition %d, '
'offset %d (Highwatermark: %d)',
topic, partition,
self._offsets.fetch[(topic, partition)],
resp.highwaterMark)
# Reset offset
self._offsets.fetch[(topic, partition)] = (
self._reset_partition_offset((topic, partition))
)
continue
except NotLeaderForPartitionError:
logger.warning("NotLeaderForPartitionError for %s - %d. "
"Metadata may be out of date",
topic, partition)
self._refresh_metadata_on_error()
continue
except RequestTimedOutError:
logger.warning("RequestTimedOutError for %s - %d",
topic, partition)
continue
# Track server highwater mark
self._offsets.highwater[(topic, partition)] = resp.highwaterMark
# Yield each message
# Kafka-python could raise an exception during iteration
# we are not catching -- user will need to address
for (offset, message) in resp.messages:
# deserializer_class could raise an exception here
val = self._config['deserializer_class'](message.value)
msg = KafkaMessage(topic, partition, offset, message.key, val)
# in some cases the server will return earlier messages
# than we requested. skip them per kafka spec
if offset < self._offsets.fetch[(topic, partition)]:
logger.debug('message offset less than fetched offset '
'skipping: %s', msg)
continue
# Only increment fetch offset
# if we safely got the message and deserialized
self._offsets.fetch[(topic, partition)] = offset + 1
# Then yield to user
yield msg | def function[fetch_messages, parameter[self]]:
constant[Sends FetchRequests for all topic/partitions set for consumption
Returns:
Generator that yields KafkaMessage structs
after deserializing with the configured `deserializer_class`
Note:
Refreshes metadata on errors, and resets fetch offset on
OffsetOutOfRange, per the configured `auto_offset_reset` policy
See Also:
Key KafkaConsumer configuration parameters:
* `fetch_message_max_bytes`
* `fetch_max_wait_ms`
* `fetch_min_bytes`
* `deserializer_class`
* `auto_offset_reset`
]
variable[max_bytes] assign[=] call[name[self]._config][constant[fetch_message_max_bytes]]
variable[max_wait_time] assign[=] call[name[self]._config][constant[fetch_wait_max_ms]]
variable[min_bytes] assign[=] call[name[self]._config][constant[fetch_min_bytes]]
if <ast.UnaryOp object at 0x7da1b19b1240> begin[:]
<ast.Raise object at 0x7da1b19b2140>
if <ast.UnaryOp object at 0x7da1b19b1630> begin[:]
<ast.Raise object at 0x7da1b19b1b40>
variable[fetches] assign[=] <ast.ListComp object at 0x7da1b19b3df0>
variable[responses] assign[=] call[name[self]._client.send_fetch_request, parameter[name[fetches]]]
for taget[name[resp]] in starred[name[responses]] begin[:]
if call[name[isinstance], parameter[name[resp], name[FailedPayloadsError]]] begin[:]
call[name[logger].warning, parameter[constant[FailedPayloadsError attempting to fetch data]]]
call[name[self]._refresh_metadata_on_error, parameter[]]
continue
variable[topic] assign[=] call[name[kafka_bytestring], parameter[name[resp].topic]]
variable[partition] assign[=] name[resp].partition
<ast.Try object at 0x7da1b19427a0>
call[name[self]._offsets.highwater][tuple[[<ast.Name object at 0x7da1b1940490>, <ast.Name object at 0x7da1b1942c50>]]] assign[=] name[resp].highwaterMark
for taget[tuple[[<ast.Name object at 0x7da1b1942da0>, <ast.Name object at 0x7da1b19422f0>]]] in starred[name[resp].messages] begin[:]
variable[val] assign[=] call[call[name[self]._config][constant[deserializer_class]], parameter[name[message].value]]
variable[msg] assign[=] call[name[KafkaMessage], parameter[name[topic], name[partition], name[offset], name[message].key, name[val]]]
if compare[name[offset] less[<] call[name[self]._offsets.fetch][tuple[[<ast.Name object at 0x7da1b1943790>, <ast.Name object at 0x7da1b1942e00>]]]] begin[:]
call[name[logger].debug, parameter[constant[message offset less than fetched offset skipping: %s], name[msg]]]
continue
call[name[self]._offsets.fetch][tuple[[<ast.Name object at 0x7da1b1943880>, <ast.Name object at 0x7da1b1942a40>]]] assign[=] binary_operation[name[offset] + constant[1]]
<ast.Yield object at 0x7da1b1801e70> | keyword[def] identifier[fetch_messages] ( identifier[self] ):
literal[string]
identifier[max_bytes] = identifier[self] . identifier[_config] [ literal[string] ]
identifier[max_wait_time] = identifier[self] . identifier[_config] [ literal[string] ]
identifier[min_bytes] = identifier[self] . identifier[_config] [ literal[string] ]
keyword[if] keyword[not] identifier[self] . identifier[_topics] :
keyword[raise] identifier[KafkaConfigurationError] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[_offsets] . identifier[fetch] :
keyword[raise] identifier[KafkaConfigurationError] (
literal[string]
)
identifier[fetches] =[ identifier[FetchRequest] ( identifier[topic] , identifier[partition] ,
identifier[self] . identifier[_offsets] . identifier[fetch] [( identifier[topic] , identifier[partition] )],
identifier[max_bytes] )
keyword[for] ( identifier[topic] , identifier[partition] ) keyword[in] identifier[self] . identifier[_topics] ]
identifier[responses] = identifier[self] . identifier[_client] . identifier[send_fetch_request] (
identifier[fetches] ,
identifier[max_wait_time] = identifier[max_wait_time] ,
identifier[min_bytes] = identifier[min_bytes] ,
identifier[fail_on_error] = keyword[False]
)
keyword[for] identifier[resp] keyword[in] identifier[responses] :
keyword[if] identifier[isinstance] ( identifier[resp] , identifier[FailedPayloadsError] ):
identifier[logger] . identifier[warning] ( literal[string] )
identifier[self] . identifier[_refresh_metadata_on_error] ()
keyword[continue]
identifier[topic] = identifier[kafka_bytestring] ( identifier[resp] . identifier[topic] )
identifier[partition] = identifier[resp] . identifier[partition]
keyword[try] :
identifier[check_error] ( identifier[resp] )
keyword[except] identifier[OffsetOutOfRangeError] :
identifier[logger] . identifier[warning] ( literal[string]
literal[string] ,
identifier[topic] , identifier[partition] ,
identifier[self] . identifier[_offsets] . identifier[fetch] [( identifier[topic] , identifier[partition] )],
identifier[resp] . identifier[highwaterMark] )
identifier[self] . identifier[_offsets] . identifier[fetch] [( identifier[topic] , identifier[partition] )]=(
identifier[self] . identifier[_reset_partition_offset] (( identifier[topic] , identifier[partition] ))
)
keyword[continue]
keyword[except] identifier[NotLeaderForPartitionError] :
identifier[logger] . identifier[warning] ( literal[string]
literal[string] ,
identifier[topic] , identifier[partition] )
identifier[self] . identifier[_refresh_metadata_on_error] ()
keyword[continue]
keyword[except] identifier[RequestTimedOutError] :
identifier[logger] . identifier[warning] ( literal[string] ,
identifier[topic] , identifier[partition] )
keyword[continue]
identifier[self] . identifier[_offsets] . identifier[highwater] [( identifier[topic] , identifier[partition] )]= identifier[resp] . identifier[highwaterMark]
keyword[for] ( identifier[offset] , identifier[message] ) keyword[in] identifier[resp] . identifier[messages] :
identifier[val] = identifier[self] . identifier[_config] [ literal[string] ]( identifier[message] . identifier[value] )
identifier[msg] = identifier[KafkaMessage] ( identifier[topic] , identifier[partition] , identifier[offset] , identifier[message] . identifier[key] , identifier[val] )
keyword[if] identifier[offset] < identifier[self] . identifier[_offsets] . identifier[fetch] [( identifier[topic] , identifier[partition] )]:
identifier[logger] . identifier[debug] ( literal[string]
literal[string] , identifier[msg] )
keyword[continue]
identifier[self] . identifier[_offsets] . identifier[fetch] [( identifier[topic] , identifier[partition] )]= identifier[offset] + literal[int]
keyword[yield] identifier[msg] | def fetch_messages(self):
"""Sends FetchRequests for all topic/partitions set for consumption
Returns:
Generator that yields KafkaMessage structs
after deserializing with the configured `deserializer_class`
Note:
Refreshes metadata on errors, and resets fetch offset on
OffsetOutOfRange, per the configured `auto_offset_reset` policy
See Also:
Key KafkaConsumer configuration parameters:
* `fetch_message_max_bytes`
* `fetch_max_wait_ms`
* `fetch_min_bytes`
* `deserializer_class`
* `auto_offset_reset`
"""
max_bytes = self._config['fetch_message_max_bytes']
max_wait_time = self._config['fetch_wait_max_ms']
min_bytes = self._config['fetch_min_bytes']
if not self._topics:
raise KafkaConfigurationError('No topics or partitions configured') # depends on [control=['if'], data=[]]
if not self._offsets.fetch:
raise KafkaConfigurationError('No fetch offsets found when calling fetch_messages') # depends on [control=['if'], data=[]]
fetches = [FetchRequest(topic, partition, self._offsets.fetch[topic, partition], max_bytes) for (topic, partition) in self._topics]
# send_fetch_request will batch topic/partition requests by leader
responses = self._client.send_fetch_request(fetches, max_wait_time=max_wait_time, min_bytes=min_bytes, fail_on_error=False)
for resp in responses:
if isinstance(resp, FailedPayloadsError):
logger.warning('FailedPayloadsError attempting to fetch data')
self._refresh_metadata_on_error()
continue # depends on [control=['if'], data=[]]
topic = kafka_bytestring(resp.topic)
partition = resp.partition
try:
check_error(resp) # depends on [control=['try'], data=[]]
except OffsetOutOfRangeError:
logger.warning('OffsetOutOfRange: topic %s, partition %d, offset %d (Highwatermark: %d)', topic, partition, self._offsets.fetch[topic, partition], resp.highwaterMark)
# Reset offset
self._offsets.fetch[topic, partition] = self._reset_partition_offset((topic, partition))
continue # depends on [control=['except'], data=[]]
except NotLeaderForPartitionError:
logger.warning('NotLeaderForPartitionError for %s - %d. Metadata may be out of date', topic, partition)
self._refresh_metadata_on_error()
continue # depends on [control=['except'], data=[]]
except RequestTimedOutError:
logger.warning('RequestTimedOutError for %s - %d', topic, partition)
continue # depends on [control=['except'], data=[]]
# Track server highwater mark
self._offsets.highwater[topic, partition] = resp.highwaterMark
# Yield each message
# Kafka-python could raise an exception during iteration
# we are not catching -- user will need to address
for (offset, message) in resp.messages:
# deserializer_class could raise an exception here
val = self._config['deserializer_class'](message.value)
msg = KafkaMessage(topic, partition, offset, message.key, val)
# in some cases the server will return earlier messages
# than we requested. skip them per kafka spec
if offset < self._offsets.fetch[topic, partition]:
logger.debug('message offset less than fetched offset skipping: %s', msg)
continue # depends on [control=['if'], data=[]]
# Only increment fetch offset
# if we safely got the message and deserialized
self._offsets.fetch[topic, partition] = offset + 1
# Then yield to user
yield msg # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['resp']] |
def remove_all_locks(self):
"""Removes all locks and ensures their content is written to disk."""
locks = list(self._locks.items())
locks.sort(key=lambda l: l[1].get_last_access())
for l in locks:
self._remove_lock(l[0]) | def function[remove_all_locks, parameter[self]]:
constant[Removes all locks and ensures their content is written to disk.]
variable[locks] assign[=] call[name[list], parameter[call[name[self]._locks.items, parameter[]]]]
call[name[locks].sort, parameter[]]
for taget[name[l]] in starred[name[locks]] begin[:]
call[name[self]._remove_lock, parameter[call[name[l]][constant[0]]]] | keyword[def] identifier[remove_all_locks] ( identifier[self] ):
literal[string]
identifier[locks] = identifier[list] ( identifier[self] . identifier[_locks] . identifier[items] ())
identifier[locks] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[l] : identifier[l] [ literal[int] ]. identifier[get_last_access] ())
keyword[for] identifier[l] keyword[in] identifier[locks] :
identifier[self] . identifier[_remove_lock] ( identifier[l] [ literal[int] ]) | def remove_all_locks(self):
"""Removes all locks and ensures their content is written to disk."""
locks = list(self._locks.items())
locks.sort(key=lambda l: l[1].get_last_access())
for l in locks:
self._remove_lock(l[0]) # depends on [control=['for'], data=['l']] |
def _print_tabulate_data(headers, data, table_format): # pragma: no cover
"""
Shows the tabulate data on the screen and returns None.
:param headers: A list of column headers.
:param data: A list of tabular data to display.
:returns: None
"""
print(tabulate.tabulate(data, headers, tablefmt=table_format)) | def function[_print_tabulate_data, parameter[headers, data, table_format]]:
constant[
Shows the tabulate data on the screen and returns None.
:param headers: A list of column headers.
:param data: A list of tabular data to display.
:returns: None
]
call[name[print], parameter[call[name[tabulate].tabulate, parameter[name[data], name[headers]]]]] | keyword[def] identifier[_print_tabulate_data] ( identifier[headers] , identifier[data] , identifier[table_format] ):
literal[string]
identifier[print] ( identifier[tabulate] . identifier[tabulate] ( identifier[data] , identifier[headers] , identifier[tablefmt] = identifier[table_format] )) | def _print_tabulate_data(headers, data, table_format): # pragma: no cover
'\n Shows the tabulate data on the screen and returns None.\n\n :param headers: A list of column headers.\n :param data: A list of tabular data to display.\n :returns: None\n '
print(tabulate.tabulate(data, headers, tablefmt=table_format)) |
def _create_entry(self, url, title, tags):
"""
Create an entry
:param url: url to save
:param title: title to set
:param tags: tags to set
:return: status
"""
try:
self.pocket.add(url=url, title=title, tags=tags)
sentence = str('pocket {} created').format(url)
logger.debug(sentence)
status = True
except Exception as e:
logger.critical(e)
update_result(self.trigger_id, msg=e, status=False)
status = False
return status | def function[_create_entry, parameter[self, url, title, tags]]:
constant[
Create an entry
:param url: url to save
:param title: title to set
:param tags: tags to set
:return: status
]
<ast.Try object at 0x7da18eb560e0>
return[name[status]] | keyword[def] identifier[_create_entry] ( identifier[self] , identifier[url] , identifier[title] , identifier[tags] ):
literal[string]
keyword[try] :
identifier[self] . identifier[pocket] . identifier[add] ( identifier[url] = identifier[url] , identifier[title] = identifier[title] , identifier[tags] = identifier[tags] )
identifier[sentence] = identifier[str] ( literal[string] ). identifier[format] ( identifier[url] )
identifier[logger] . identifier[debug] ( identifier[sentence] )
identifier[status] = keyword[True]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[critical] ( identifier[e] )
identifier[update_result] ( identifier[self] . identifier[trigger_id] , identifier[msg] = identifier[e] , identifier[status] = keyword[False] )
identifier[status] = keyword[False]
keyword[return] identifier[status] | def _create_entry(self, url, title, tags):
"""
Create an entry
:param url: url to save
:param title: title to set
:param tags: tags to set
:return: status
"""
try:
self.pocket.add(url=url, title=title, tags=tags)
sentence = str('pocket {} created').format(url)
logger.debug(sentence)
status = True # depends on [control=['try'], data=[]]
except Exception as e:
logger.critical(e)
update_result(self.trigger_id, msg=e, status=False)
status = False # depends on [control=['except'], data=['e']]
return status |
def CopyToStatTimeTuple(self):
"""Copies the date time value to a stat timestamp tuple.
Returns:
tuple[int, int]: a POSIX timestamp in seconds and the remainder in
100 nano seconds or (None, None) on error.
"""
normalized_timestamp = self._GetNormalizedTimestamp()
if normalized_timestamp is None:
return None, None
if self._precision in (
definitions.PRECISION_1_NANOSECOND,
definitions.PRECISION_100_NANOSECONDS,
definitions.PRECISION_1_MICROSECOND,
definitions.PRECISION_1_MILLISECOND,
definitions.PRECISION_100_MILLISECONDS):
remainder = int((normalized_timestamp % 1) * self._100NS_PER_SECOND)
return int(normalized_timestamp), remainder
return int(normalized_timestamp), None | def function[CopyToStatTimeTuple, parameter[self]]:
constant[Copies the date time value to a stat timestamp tuple.
Returns:
tuple[int, int]: a POSIX timestamp in seconds and the remainder in
100 nano seconds or (None, None) on error.
]
variable[normalized_timestamp] assign[=] call[name[self]._GetNormalizedTimestamp, parameter[]]
if compare[name[normalized_timestamp] is constant[None]] begin[:]
return[tuple[[<ast.Constant object at 0x7da18f720a60>, <ast.Constant object at 0x7da18f721150>]]]
if compare[name[self]._precision in tuple[[<ast.Attribute object at 0x7da18f7209a0>, <ast.Attribute object at 0x7da18f721a50>, <ast.Attribute object at 0x7da18f7200d0>, <ast.Attribute object at 0x7da18f721e10>, <ast.Attribute object at 0x7da18f723010>]]] begin[:]
variable[remainder] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[normalized_timestamp] <ast.Mod object at 0x7da2590d6920> constant[1]] * name[self]._100NS_PER_SECOND]]]
return[tuple[[<ast.Call object at 0x7da18f7222f0>, <ast.Name object at 0x7da18f720b80>]]]
return[tuple[[<ast.Call object at 0x7da18f723610>, <ast.Constant object at 0x7da18f721cf0>]]] | keyword[def] identifier[CopyToStatTimeTuple] ( identifier[self] ):
literal[string]
identifier[normalized_timestamp] = identifier[self] . identifier[_GetNormalizedTimestamp] ()
keyword[if] identifier[normalized_timestamp] keyword[is] keyword[None] :
keyword[return] keyword[None] , keyword[None]
keyword[if] identifier[self] . identifier[_precision] keyword[in] (
identifier[definitions] . identifier[PRECISION_1_NANOSECOND] ,
identifier[definitions] . identifier[PRECISION_100_NANOSECONDS] ,
identifier[definitions] . identifier[PRECISION_1_MICROSECOND] ,
identifier[definitions] . identifier[PRECISION_1_MILLISECOND] ,
identifier[definitions] . identifier[PRECISION_100_MILLISECONDS] ):
identifier[remainder] = identifier[int] (( identifier[normalized_timestamp] % literal[int] )* identifier[self] . identifier[_100NS_PER_SECOND] )
keyword[return] identifier[int] ( identifier[normalized_timestamp] ), identifier[remainder]
keyword[return] identifier[int] ( identifier[normalized_timestamp] ), keyword[None] | def CopyToStatTimeTuple(self):
"""Copies the date time value to a stat timestamp tuple.
Returns:
tuple[int, int]: a POSIX timestamp in seconds and the remainder in
100 nano seconds or (None, None) on error.
"""
normalized_timestamp = self._GetNormalizedTimestamp()
if normalized_timestamp is None:
return (None, None) # depends on [control=['if'], data=[]]
if self._precision in (definitions.PRECISION_1_NANOSECOND, definitions.PRECISION_100_NANOSECONDS, definitions.PRECISION_1_MICROSECOND, definitions.PRECISION_1_MILLISECOND, definitions.PRECISION_100_MILLISECONDS):
remainder = int(normalized_timestamp % 1 * self._100NS_PER_SECOND)
return (int(normalized_timestamp), remainder) # depends on [control=['if'], data=[]]
return (int(normalized_timestamp), None) |
def __getDummyDateList():
"""
Generate a dummy date list for testing without
hitting the server
"""
D = []
for y in xrange(2001, 2010):
for d in xrange(1, 365, 1):
D.append('A%04d%03d' % (y, d))
return D | def function[__getDummyDateList, parameter[]]:
constant[
Generate a dummy date list for testing without
hitting the server
]
variable[D] assign[=] list[[]]
for taget[name[y]] in starred[call[name[xrange], parameter[constant[2001], constant[2010]]]] begin[:]
for taget[name[d]] in starred[call[name[xrange], parameter[constant[1], constant[365], constant[1]]]] begin[:]
call[name[D].append, parameter[binary_operation[constant[A%04d%03d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18dc9a980>, <ast.Name object at 0x7da18dc98af0>]]]]]
return[name[D]] | keyword[def] identifier[__getDummyDateList] ():
literal[string]
identifier[D] =[]
keyword[for] identifier[y] keyword[in] identifier[xrange] ( literal[int] , literal[int] ):
keyword[for] identifier[d] keyword[in] identifier[xrange] ( literal[int] , literal[int] , literal[int] ):
identifier[D] . identifier[append] ( literal[string] %( identifier[y] , identifier[d] ))
keyword[return] identifier[D] | def __getDummyDateList():
"""
Generate a dummy date list for testing without
hitting the server
"""
D = []
for y in xrange(2001, 2010):
for d in xrange(1, 365, 1):
D.append('A%04d%03d' % (y, d)) # depends on [control=['for'], data=['d']] # depends on [control=['for'], data=['y']]
return D |
def update_health(self, reporter, info):
# type: (object, HealthInfo) -> None
"""Set the health attribute. Called from part"""
with self.changes_squashed:
alarm = info.alarm
if alarm.is_ok():
self._faults.pop(reporter, None)
else:
self._faults[reporter] = alarm
if self._faults:
# Sort them by severity
faults = sorted(self._faults.values(),
key=lambda a: a.severity.value)
alarm = faults[-1]
text = faults[-1].message
else:
alarm = None
text = "OK"
self.health.set_value(text, alarm=alarm) | def function[update_health, parameter[self, reporter, info]]:
constant[Set the health attribute. Called from part]
with name[self].changes_squashed begin[:]
variable[alarm] assign[=] name[info].alarm
if call[name[alarm].is_ok, parameter[]] begin[:]
call[name[self]._faults.pop, parameter[name[reporter], constant[None]]]
if name[self]._faults begin[:]
variable[faults] assign[=] call[name[sorted], parameter[call[name[self]._faults.values, parameter[]]]]
variable[alarm] assign[=] call[name[faults]][<ast.UnaryOp object at 0x7da18f720490>]
variable[text] assign[=] call[name[faults]][<ast.UnaryOp object at 0x7da18f722890>].message
call[name[self].health.set_value, parameter[name[text]]] | keyword[def] identifier[update_health] ( identifier[self] , identifier[reporter] , identifier[info] ):
literal[string]
keyword[with] identifier[self] . identifier[changes_squashed] :
identifier[alarm] = identifier[info] . identifier[alarm]
keyword[if] identifier[alarm] . identifier[is_ok] ():
identifier[self] . identifier[_faults] . identifier[pop] ( identifier[reporter] , keyword[None] )
keyword[else] :
identifier[self] . identifier[_faults] [ identifier[reporter] ]= identifier[alarm]
keyword[if] identifier[self] . identifier[_faults] :
identifier[faults] = identifier[sorted] ( identifier[self] . identifier[_faults] . identifier[values] (),
identifier[key] = keyword[lambda] identifier[a] : identifier[a] . identifier[severity] . identifier[value] )
identifier[alarm] = identifier[faults] [- literal[int] ]
identifier[text] = identifier[faults] [- literal[int] ]. identifier[message]
keyword[else] :
identifier[alarm] = keyword[None]
identifier[text] = literal[string]
identifier[self] . identifier[health] . identifier[set_value] ( identifier[text] , identifier[alarm] = identifier[alarm] ) | def update_health(self, reporter, info):
# type: (object, HealthInfo) -> None
'Set the health attribute. Called from part'
with self.changes_squashed:
alarm = info.alarm
if alarm.is_ok():
self._faults.pop(reporter, None) # depends on [control=['if'], data=[]]
else:
self._faults[reporter] = alarm
if self._faults:
# Sort them by severity
faults = sorted(self._faults.values(), key=lambda a: a.severity.value)
alarm = faults[-1]
text = faults[-1].message # depends on [control=['if'], data=[]]
else:
alarm = None
text = 'OK'
self.health.set_value(text, alarm=alarm) # depends on [control=['with'], data=[]] |
def dot(self, other_tf):
"""Compose this rigid transform with another.
This transform is on the left-hand side of the composition.
Parameters
----------
other_tf : :obj:`RigidTransform`
The other RigidTransform to compose with this one.
Returns
-------
:obj:`RigidTransform`
A RigidTransform that represents the composition.
Raises
------
ValueError
If the to_frame of other_tf is not identical to this transform's
from_frame.
"""
if other_tf.to_frame != self.from_frame:
raise ValueError('To frame of right hand side ({0}) must match from frame of left hand side ({1})'.format(other_tf.to_frame, self.from_frame))
pose_tf = self.matrix.dot(other_tf.matrix)
rotation, translation = RigidTransform.rotation_and_translation_from_matrix(pose_tf)
if isinstance(other_tf, SimilarityTransform):
return SimilarityTransform(self.rotation, self.translation, scale=1.0,
from_frame=self.from_frame,
to_frame=self.to_frame) * other_tf
return RigidTransform(rotation, translation,
from_frame=other_tf.from_frame,
to_frame=self.to_frame) | def function[dot, parameter[self, other_tf]]:
constant[Compose this rigid transform with another.
This transform is on the left-hand side of the composition.
Parameters
----------
other_tf : :obj:`RigidTransform`
The other RigidTransform to compose with this one.
Returns
-------
:obj:`RigidTransform`
A RigidTransform that represents the composition.
Raises
------
ValueError
If the to_frame of other_tf is not identical to this transform's
from_frame.
]
if compare[name[other_tf].to_frame not_equal[!=] name[self].from_frame] begin[:]
<ast.Raise object at 0x7da1b1241420>
variable[pose_tf] assign[=] call[name[self].matrix.dot, parameter[name[other_tf].matrix]]
<ast.Tuple object at 0x7da1b12436a0> assign[=] call[name[RigidTransform].rotation_and_translation_from_matrix, parameter[name[pose_tf]]]
if call[name[isinstance], parameter[name[other_tf], name[SimilarityTransform]]] begin[:]
return[binary_operation[call[name[SimilarityTransform], parameter[name[self].rotation, name[self].translation]] * name[other_tf]]]
return[call[name[RigidTransform], parameter[name[rotation], name[translation]]]] | keyword[def] identifier[dot] ( identifier[self] , identifier[other_tf] ):
literal[string]
keyword[if] identifier[other_tf] . identifier[to_frame] != identifier[self] . identifier[from_frame] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[other_tf] . identifier[to_frame] , identifier[self] . identifier[from_frame] ))
identifier[pose_tf] = identifier[self] . identifier[matrix] . identifier[dot] ( identifier[other_tf] . identifier[matrix] )
identifier[rotation] , identifier[translation] = identifier[RigidTransform] . identifier[rotation_and_translation_from_matrix] ( identifier[pose_tf] )
keyword[if] identifier[isinstance] ( identifier[other_tf] , identifier[SimilarityTransform] ):
keyword[return] identifier[SimilarityTransform] ( identifier[self] . identifier[rotation] , identifier[self] . identifier[translation] , identifier[scale] = literal[int] ,
identifier[from_frame] = identifier[self] . identifier[from_frame] ,
identifier[to_frame] = identifier[self] . identifier[to_frame] )* identifier[other_tf]
keyword[return] identifier[RigidTransform] ( identifier[rotation] , identifier[translation] ,
identifier[from_frame] = identifier[other_tf] . identifier[from_frame] ,
identifier[to_frame] = identifier[self] . identifier[to_frame] ) | def dot(self, other_tf):
"""Compose this rigid transform with another.
This transform is on the left-hand side of the composition.
Parameters
----------
other_tf : :obj:`RigidTransform`
The other RigidTransform to compose with this one.
Returns
-------
:obj:`RigidTransform`
A RigidTransform that represents the composition.
Raises
------
ValueError
If the to_frame of other_tf is not identical to this transform's
from_frame.
"""
if other_tf.to_frame != self.from_frame:
raise ValueError('To frame of right hand side ({0}) must match from frame of left hand side ({1})'.format(other_tf.to_frame, self.from_frame)) # depends on [control=['if'], data=[]]
pose_tf = self.matrix.dot(other_tf.matrix)
(rotation, translation) = RigidTransform.rotation_and_translation_from_matrix(pose_tf)
if isinstance(other_tf, SimilarityTransform):
return SimilarityTransform(self.rotation, self.translation, scale=1.0, from_frame=self.from_frame, to_frame=self.to_frame) * other_tf # depends on [control=['if'], data=[]]
return RigidTransform(rotation, translation, from_frame=other_tf.from_frame, to_frame=self.to_frame) |
def is_event_loop_running_qt4(app=None):
"""Is the qt4 event loop running."""
if app is None:
app = get_app_qt4([''])
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
# Does qt4 provide a other way to detect this?
return False | def function[is_event_loop_running_qt4, parameter[app]]:
constant[Is the qt4 event loop running.]
if compare[name[app] is constant[None]] begin[:]
variable[app] assign[=] call[name[get_app_qt4], parameter[list[[<ast.Constant object at 0x7da20c6c6530>]]]]
if call[name[hasattr], parameter[name[app], constant[_in_event_loop]]] begin[:]
return[name[app]._in_event_loop] | keyword[def] identifier[is_event_loop_running_qt4] ( identifier[app] = keyword[None] ):
literal[string]
keyword[if] identifier[app] keyword[is] keyword[None] :
identifier[app] = identifier[get_app_qt4] ([ literal[string] ])
keyword[if] identifier[hasattr] ( identifier[app] , literal[string] ):
keyword[return] identifier[app] . identifier[_in_event_loop]
keyword[else] :
keyword[return] keyword[False] | def is_event_loop_running_qt4(app=None):
"""Is the qt4 event loop running."""
if app is None:
app = get_app_qt4(['']) # depends on [control=['if'], data=['app']]
if hasattr(app, '_in_event_loop'):
return app._in_event_loop # depends on [control=['if'], data=[]]
else:
# Does qt4 provide a other way to detect this?
return False |
def remove_group_role(request, role, group, domain=None, project=None):
"""Removes a given single role for a group from a domain or project."""
manager = keystoneclient(request, admin=True).roles
return manager.revoke(role=role, group=group, project=project,
domain=domain) | def function[remove_group_role, parameter[request, role, group, domain, project]]:
constant[Removes a given single role for a group from a domain or project.]
variable[manager] assign[=] call[name[keystoneclient], parameter[name[request]]].roles
return[call[name[manager].revoke, parameter[]]] | keyword[def] identifier[remove_group_role] ( identifier[request] , identifier[role] , identifier[group] , identifier[domain] = keyword[None] , identifier[project] = keyword[None] ):
literal[string]
identifier[manager] = identifier[keystoneclient] ( identifier[request] , identifier[admin] = keyword[True] ). identifier[roles]
keyword[return] identifier[manager] . identifier[revoke] ( identifier[role] = identifier[role] , identifier[group] = identifier[group] , identifier[project] = identifier[project] ,
identifier[domain] = identifier[domain] ) | def remove_group_role(request, role, group, domain=None, project=None):
"""Removes a given single role for a group from a domain or project."""
manager = keystoneclient(request, admin=True).roles
return manager.revoke(role=role, group=group, project=project, domain=domain) |
def register_lazy_provider_method(self, cls, method):
"""
Register a class method lazily as a provider.
"""
if 'provides' not in getattr(method, '__di__', {}):
raise DiayException('method %r is not a provider' % method)
@functools.wraps(method)
def wrapper(*args, **kwargs):
return getattr(self.get(cls), method.__name__)(*args, **kwargs)
self.factories[method.__di__['provides']] = wrapper | def function[register_lazy_provider_method, parameter[self, cls, method]]:
constant[
Register a class method lazily as a provider.
]
if compare[constant[provides] <ast.NotIn object at 0x7da2590d7190> call[name[getattr], parameter[name[method], constant[__di__], dictionary[[], []]]]] begin[:]
<ast.Raise object at 0x7da2041da0b0>
def function[wrapper, parameter[]]:
return[call[call[name[getattr], parameter[call[name[self].get, parameter[name[cls]]], name[method].__name__]], parameter[<ast.Starred object at 0x7da2041d9330>]]]
call[name[self].factories][call[name[method].__di__][constant[provides]]] assign[=] name[wrapper] | keyword[def] identifier[register_lazy_provider_method] ( identifier[self] , identifier[cls] , identifier[method] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[getattr] ( identifier[method] , literal[string] ,{}):
keyword[raise] identifier[DiayException] ( literal[string] % identifier[method] )
@ identifier[functools] . identifier[wraps] ( identifier[method] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
keyword[return] identifier[getattr] ( identifier[self] . identifier[get] ( identifier[cls] ), identifier[method] . identifier[__name__] )(* identifier[args] ,** identifier[kwargs] )
identifier[self] . identifier[factories] [ identifier[method] . identifier[__di__] [ literal[string] ]]= identifier[wrapper] | def register_lazy_provider_method(self, cls, method):
"""
Register a class method lazily as a provider.
"""
if 'provides' not in getattr(method, '__di__', {}):
raise DiayException('method %r is not a provider' % method) # depends on [control=['if'], data=[]]
@functools.wraps(method)
def wrapper(*args, **kwargs):
return getattr(self.get(cls), method.__name__)(*args, **kwargs)
self.factories[method.__di__['provides']] = wrapper |
def marvcli_run(ctx, datasets, deps, excluded_nodes, force, force_dependent,
force_deps, keep, keep_going, list_nodes,
list_dependent, selected_nodes, update_detail,
update_listing, cachesize, collections):
"""Run nodes for selected datasets.
Datasets are specified by a list of set ids, or --collection
<name>, use --collection=* to run for all collections. --node in
conjunction with --collection=* will pick those collections for
which the selected nodes are configured.
Set ids may be abbreviated to any uniquely identifying
prefix. Suffix a prefix by '+' to match multiple.
"""
if collections and datasets:
ctx.fail('--collection and DATASETS are mutually exclusive')
if list_dependent and not selected_nodes:
ctx.fail('--list-dependent needs at least one selected --node')
if not any([datasets, collections, list_nodes]):
click.echo(ctx.get_help())
ctx.exit(1)
deps = 'force' if force_deps else deps
force = force_deps or force
site = create_app().site
if '*' in collections:
if selected_nodes:
collections = [k for k, v in site.collections.items()
if set(v.nodes).issuperset(selected_nodes)]
if not collections:
ctx.fail('No collections have all selected nodes')
else:
collections = None
else:
for col in collections:
if col not in site.collections:
ctx.fail('Unknown collection: {}'.format(col))
if list_nodes:
for col in (collections or sorted(site.collections.keys())):
click.echo('{}:'.format(col))
for name in sorted(site.collections[col].nodes):
if name == 'dataset':
continue
click.echo(' {}'.format(name))
return
if list_dependent:
for col in (collections or sorted(site.collections.keys())):
click.echo('{}:'.format(col))
dependent = {x for name in selected_nodes
for x in site.collections[col].nodes[name].dependent}
for name in sorted(x.name for x in dependent):
click.echo(' {}'.format(name))
return
errors = []
setids = [SetID(x) for x in parse_setids(datasets)]
if not setids:
query = db.session.query(Dataset.setid)\
.filter(Dataset.discarded.isnot(True))\
.filter(Dataset.status.op('&')(STATUS_MISSING) == 0)
if collections is not None:
query = query.filter(Dataset.collection.in_(collections))
setids = (SetID(x[0]) for x in query)
for setid in setids:
if IPDB:
site.run(setid, selected_nodes, deps, force, keep,
force_dependent, update_detail, update_listing,
excluded_nodes, cachesize=cachesize)
else:
try:
site.run(setid, selected_nodes, deps, force, keep,
force_dependent, update_detail, update_listing,
excluded_nodes, cachesize=cachesize)
except UnknownNode as e:
ctx.fail('Collection {} has no node {}'.format(*e.args))
except NoResultFound:
click.echo('ERROR: unknown {!r}'.format(setid), err=True)
if not keep_going:
raise
except BaseException as e:
errors.append(setid)
if isinstance(e, KeyboardInterrupt):
log.warn('KeyboardInterrupt: aborting')
raise
elif isinstance(e, DirectoryAlreadyExists):
click.echo("""
ERROR: Directory for node run already exists:
{!r}
In case no other node run is in progress, this is a bug which you are kindly
asked to report, providing information regarding any previous, failed node runs.
""".format(e.args[0]), err=True)
if not keep_going:
ctx.abort()
else:
log.error('Exception occured for dataset %s:', setid, exc_info=True)
log.error('Error occured for dataset %s: %s', setid, e)
if not keep_going:
ctx.exit(1)
if errors:
log.error('There were errors for %r', errors) | def function[marvcli_run, parameter[ctx, datasets, deps, excluded_nodes, force, force_dependent, force_deps, keep, keep_going, list_nodes, list_dependent, selected_nodes, update_detail, update_listing, cachesize, collections]]:
constant[Run nodes for selected datasets.
Datasets are specified by a list of set ids, or --collection
<name>, use --collection=* to run for all collections. --node in
conjunction with --collection=* will pick those collections for
which the selected nodes are configured.
Set ids may be abbreviated to any uniquely identifying
prefix. Suffix a prefix by '+' to match multiple.
]
if <ast.BoolOp object at 0x7da1b265f9a0> begin[:]
call[name[ctx].fail, parameter[constant[--collection and DATASETS are mutually exclusive]]]
if <ast.BoolOp object at 0x7da1b265fa30> begin[:]
call[name[ctx].fail, parameter[constant[--list-dependent needs at least one selected --node]]]
if <ast.UnaryOp object at 0x7da1b265e290> begin[:]
call[name[click].echo, parameter[call[name[ctx].get_help, parameter[]]]]
call[name[ctx].exit, parameter[constant[1]]]
variable[deps] assign[=] <ast.IfExp object at 0x7da1b265f3d0>
variable[force] assign[=] <ast.BoolOp object at 0x7da1b265dd20>
variable[site] assign[=] call[name[create_app], parameter[]].site
if compare[constant[*] in name[collections]] begin[:]
if name[selected_nodes] begin[:]
variable[collections] assign[=] <ast.ListComp object at 0x7da1b265e680>
if <ast.UnaryOp object at 0x7da1b265f4f0> begin[:]
call[name[ctx].fail, parameter[constant[No collections have all selected nodes]]]
if name[list_nodes] begin[:]
for taget[name[col]] in starred[<ast.BoolOp object at 0x7da2044c3ca0>] begin[:]
call[name[click].echo, parameter[call[constant[{}:].format, parameter[name[col]]]]]
for taget[name[name]] in starred[call[name[sorted], parameter[call[name[site].collections][name[col]].nodes]]] begin[:]
if compare[name[name] equal[==] constant[dataset]] begin[:]
continue
call[name[click].echo, parameter[call[constant[ {}].format, parameter[name[name]]]]]
return[None]
if name[list_dependent] begin[:]
for taget[name[col]] in starred[<ast.BoolOp object at 0x7da2044c3d00>] begin[:]
call[name[click].echo, parameter[call[constant[{}:].format, parameter[name[col]]]]]
variable[dependent] assign[=] <ast.SetComp object at 0x7da2044c3e50>
for taget[name[name]] in starred[call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da2044c1690>]]] begin[:]
call[name[click].echo, parameter[call[constant[ {}].format, parameter[name[name]]]]]
return[None]
variable[errors] assign[=] list[[]]
variable[setids] assign[=] <ast.ListComp object at 0x7da2044c3f40>
if <ast.UnaryOp object at 0x7da2044c2800> begin[:]
variable[query] assign[=] call[call[call[name[db].session.query, parameter[name[Dataset].setid]].filter, parameter[call[name[Dataset].discarded.isnot, parameter[constant[True]]]]].filter, parameter[compare[call[call[name[Dataset].status.op, parameter[constant[&]]], parameter[name[STATUS_MISSING]]] equal[==] constant[0]]]]
if compare[name[collections] is_not constant[None]] begin[:]
variable[query] assign[=] call[name[query].filter, parameter[call[name[Dataset].collection.in_, parameter[name[collections]]]]]
variable[setids] assign[=] <ast.GeneratorExp object at 0x7da20c6c7400>
for taget[name[setid]] in starred[name[setids]] begin[:]
if name[IPDB] begin[:]
call[name[site].run, parameter[name[setid], name[selected_nodes], name[deps], name[force], name[keep], name[force_dependent], name[update_detail], name[update_listing], name[excluded_nodes]]]
if name[errors] begin[:]
call[name[log].error, parameter[constant[There were errors for %r], name[errors]]] | keyword[def] identifier[marvcli_run] ( identifier[ctx] , identifier[datasets] , identifier[deps] , identifier[excluded_nodes] , identifier[force] , identifier[force_dependent] ,
identifier[force_deps] , identifier[keep] , identifier[keep_going] , identifier[list_nodes] ,
identifier[list_dependent] , identifier[selected_nodes] , identifier[update_detail] ,
identifier[update_listing] , identifier[cachesize] , identifier[collections] ):
literal[string]
keyword[if] identifier[collections] keyword[and] identifier[datasets] :
identifier[ctx] . identifier[fail] ( literal[string] )
keyword[if] identifier[list_dependent] keyword[and] keyword[not] identifier[selected_nodes] :
identifier[ctx] . identifier[fail] ( literal[string] )
keyword[if] keyword[not] identifier[any] ([ identifier[datasets] , identifier[collections] , identifier[list_nodes] ]):
identifier[click] . identifier[echo] ( identifier[ctx] . identifier[get_help] ())
identifier[ctx] . identifier[exit] ( literal[int] )
identifier[deps] = literal[string] keyword[if] identifier[force_deps] keyword[else] identifier[deps]
identifier[force] = identifier[force_deps] keyword[or] identifier[force]
identifier[site] = identifier[create_app] (). identifier[site]
keyword[if] literal[string] keyword[in] identifier[collections] :
keyword[if] identifier[selected_nodes] :
identifier[collections] =[ identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[site] . identifier[collections] . identifier[items] ()
keyword[if] identifier[set] ( identifier[v] . identifier[nodes] ). identifier[issuperset] ( identifier[selected_nodes] )]
keyword[if] keyword[not] identifier[collections] :
identifier[ctx] . identifier[fail] ( literal[string] )
keyword[else] :
identifier[collections] = keyword[None]
keyword[else] :
keyword[for] identifier[col] keyword[in] identifier[collections] :
keyword[if] identifier[col] keyword[not] keyword[in] identifier[site] . identifier[collections] :
identifier[ctx] . identifier[fail] ( literal[string] . identifier[format] ( identifier[col] ))
keyword[if] identifier[list_nodes] :
keyword[for] identifier[col] keyword[in] ( identifier[collections] keyword[or] identifier[sorted] ( identifier[site] . identifier[collections] . identifier[keys] ())):
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[col] ))
keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[site] . identifier[collections] [ identifier[col] ]. identifier[nodes] ):
keyword[if] identifier[name] == literal[string] :
keyword[continue]
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[return]
keyword[if] identifier[list_dependent] :
keyword[for] identifier[col] keyword[in] ( identifier[collections] keyword[or] identifier[sorted] ( identifier[site] . identifier[collections] . identifier[keys] ())):
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[col] ))
identifier[dependent] ={ identifier[x] keyword[for] identifier[name] keyword[in] identifier[selected_nodes]
keyword[for] identifier[x] keyword[in] identifier[site] . identifier[collections] [ identifier[col] ]. identifier[nodes] [ identifier[name] ]. identifier[dependent] }
keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[x] . identifier[name] keyword[for] identifier[x] keyword[in] identifier[dependent] ):
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[return]
identifier[errors] =[]
identifier[setids] =[ identifier[SetID] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[parse_setids] ( identifier[datasets] )]
keyword[if] keyword[not] identifier[setids] :
identifier[query] = identifier[db] . identifier[session] . identifier[query] ( identifier[Dataset] . identifier[setid] ). identifier[filter] ( identifier[Dataset] . identifier[discarded] . identifier[isnot] ( keyword[True] )). identifier[filter] ( identifier[Dataset] . identifier[status] . identifier[op] ( literal[string] )( identifier[STATUS_MISSING] )== literal[int] )
keyword[if] identifier[collections] keyword[is] keyword[not] keyword[None] :
identifier[query] = identifier[query] . identifier[filter] ( identifier[Dataset] . identifier[collection] . identifier[in_] ( identifier[collections] ))
identifier[setids] =( identifier[SetID] ( identifier[x] [ literal[int] ]) keyword[for] identifier[x] keyword[in] identifier[query] )
keyword[for] identifier[setid] keyword[in] identifier[setids] :
keyword[if] identifier[IPDB] :
identifier[site] . identifier[run] ( identifier[setid] , identifier[selected_nodes] , identifier[deps] , identifier[force] , identifier[keep] ,
identifier[force_dependent] , identifier[update_detail] , identifier[update_listing] ,
identifier[excluded_nodes] , identifier[cachesize] = identifier[cachesize] )
keyword[else] :
keyword[try] :
identifier[site] . identifier[run] ( identifier[setid] , identifier[selected_nodes] , identifier[deps] , identifier[force] , identifier[keep] ,
identifier[force_dependent] , identifier[update_detail] , identifier[update_listing] ,
identifier[excluded_nodes] , identifier[cachesize] = identifier[cachesize] )
keyword[except] identifier[UnknownNode] keyword[as] identifier[e] :
identifier[ctx] . identifier[fail] ( literal[string] . identifier[format] (* identifier[e] . identifier[args] ))
keyword[except] identifier[NoResultFound] :
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[setid] ), identifier[err] = keyword[True] )
keyword[if] keyword[not] identifier[keep_going] :
keyword[raise]
keyword[except] identifier[BaseException] keyword[as] identifier[e] :
identifier[errors] . identifier[append] ( identifier[setid] )
keyword[if] identifier[isinstance] ( identifier[e] , identifier[KeyboardInterrupt] ):
identifier[log] . identifier[warn] ( literal[string] )
keyword[raise]
keyword[elif] identifier[isinstance] ( identifier[e] , identifier[DirectoryAlreadyExists] ):
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[e] . identifier[args] [ literal[int] ]), identifier[err] = keyword[True] )
keyword[if] keyword[not] identifier[keep_going] :
identifier[ctx] . identifier[abort] ()
keyword[else] :
identifier[log] . identifier[error] ( literal[string] , identifier[setid] , identifier[exc_info] = keyword[True] )
identifier[log] . identifier[error] ( literal[string] , identifier[setid] , identifier[e] )
keyword[if] keyword[not] identifier[keep_going] :
identifier[ctx] . identifier[exit] ( literal[int] )
keyword[if] identifier[errors] :
identifier[log] . identifier[error] ( literal[string] , identifier[errors] ) | def marvcli_run(ctx, datasets, deps, excluded_nodes, force, force_dependent, force_deps, keep, keep_going, list_nodes, list_dependent, selected_nodes, update_detail, update_listing, cachesize, collections):
"""Run nodes for selected datasets.
Datasets are specified by a list of set ids, or --collection
<name>, use --collection=* to run for all collections. --node in
conjunction with --collection=* will pick those collections for
which the selected nodes are configured.
Set ids may be abbreviated to any uniquely identifying
prefix. Suffix a prefix by '+' to match multiple.
"""
if collections and datasets:
ctx.fail('--collection and DATASETS are mutually exclusive') # depends on [control=['if'], data=[]]
if list_dependent and (not selected_nodes):
ctx.fail('--list-dependent needs at least one selected --node') # depends on [control=['if'], data=[]]
if not any([datasets, collections, list_nodes]):
click.echo(ctx.get_help())
ctx.exit(1) # depends on [control=['if'], data=[]]
deps = 'force' if force_deps else deps
force = force_deps or force
site = create_app().site
if '*' in collections:
if selected_nodes:
collections = [k for (k, v) in site.collections.items() if set(v.nodes).issuperset(selected_nodes)]
if not collections:
ctx.fail('No collections have all selected nodes') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
collections = None # depends on [control=['if'], data=['collections']]
else:
for col in collections:
if col not in site.collections:
ctx.fail('Unknown collection: {}'.format(col)) # depends on [control=['if'], data=['col']] # depends on [control=['for'], data=['col']]
if list_nodes:
for col in collections or sorted(site.collections.keys()):
click.echo('{}:'.format(col))
for name in sorted(site.collections[col].nodes):
if name == 'dataset':
continue # depends on [control=['if'], data=[]]
click.echo(' {}'.format(name)) # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=['col']]
return # depends on [control=['if'], data=[]]
if list_dependent:
for col in collections or sorted(site.collections.keys()):
click.echo('{}:'.format(col))
dependent = {x for name in selected_nodes for x in site.collections[col].nodes[name].dependent}
for name in sorted((x.name for x in dependent)):
click.echo(' {}'.format(name)) # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=['col']]
return # depends on [control=['if'], data=[]]
errors = []
setids = [SetID(x) for x in parse_setids(datasets)]
if not setids:
query = db.session.query(Dataset.setid).filter(Dataset.discarded.isnot(True)).filter(Dataset.status.op('&')(STATUS_MISSING) == 0)
if collections is not None:
query = query.filter(Dataset.collection.in_(collections)) # depends on [control=['if'], data=['collections']]
setids = (SetID(x[0]) for x in query) # depends on [control=['if'], data=[]]
for setid in setids:
if IPDB:
site.run(setid, selected_nodes, deps, force, keep, force_dependent, update_detail, update_listing, excluded_nodes, cachesize=cachesize) # depends on [control=['if'], data=[]]
else:
try:
site.run(setid, selected_nodes, deps, force, keep, force_dependent, update_detail, update_listing, excluded_nodes, cachesize=cachesize) # depends on [control=['try'], data=[]]
except UnknownNode as e:
ctx.fail('Collection {} has no node {}'.format(*e.args)) # depends on [control=['except'], data=['e']]
except NoResultFound:
click.echo('ERROR: unknown {!r}'.format(setid), err=True)
if not keep_going:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
except BaseException as e:
errors.append(setid)
if isinstance(e, KeyboardInterrupt):
log.warn('KeyboardInterrupt: aborting')
raise # depends on [control=['if'], data=[]]
elif isinstance(e, DirectoryAlreadyExists):
click.echo('\nERROR: Directory for node run already exists:\n{!r}\nIn case no other node run is in progress, this is a bug which you are kindly\nasked to report, providing information regarding any previous, failed node runs.\n'.format(e.args[0]), err=True)
if not keep_going:
ctx.abort() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
log.error('Exception occured for dataset %s:', setid, exc_info=True)
log.error('Error occured for dataset %s: %s', setid, e)
if not keep_going:
ctx.exit(1) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['setid']]
if errors:
log.error('There were errors for %r', errors) # depends on [control=['if'], data=[]] |
def find_contiguous(*caches):
"""Separate one or more cache entry lists into time-contiguous sub-lists
Parameters
----------
*caches : `list`
One or more lists of file paths
(`str` or :class:`~lal.utils.CacheEntry`).
Returns
-------
caches : `iter` of `list`
an interable yielding each contiguous cache
"""
flat = flatten(*caches)
for segment in cache_segments(flat):
yield sieve(flat, segment=segment) | def function[find_contiguous, parameter[]]:
constant[Separate one or more cache entry lists into time-contiguous sub-lists
Parameters
----------
*caches : `list`
One or more lists of file paths
(`str` or :class:`~lal.utils.CacheEntry`).
Returns
-------
caches : `iter` of `list`
an interable yielding each contiguous cache
]
variable[flat] assign[=] call[name[flatten], parameter[<ast.Starred object at 0x7da18f09d330>]]
for taget[name[segment]] in starred[call[name[cache_segments], parameter[name[flat]]]] begin[:]
<ast.Yield object at 0x7da18f09f1f0> | keyword[def] identifier[find_contiguous] (* identifier[caches] ):
literal[string]
identifier[flat] = identifier[flatten] (* identifier[caches] )
keyword[for] identifier[segment] keyword[in] identifier[cache_segments] ( identifier[flat] ):
keyword[yield] identifier[sieve] ( identifier[flat] , identifier[segment] = identifier[segment] ) | def find_contiguous(*caches):
"""Separate one or more cache entry lists into time-contiguous sub-lists
Parameters
----------
*caches : `list`
One or more lists of file paths
(`str` or :class:`~lal.utils.CacheEntry`).
Returns
-------
caches : `iter` of `list`
an interable yielding each contiguous cache
"""
flat = flatten(*caches)
for segment in cache_segments(flat):
yield sieve(flat, segment=segment) # depends on [control=['for'], data=['segment']] |
def gen_mul(src1, src2, dst):
"""Return a MUL instruction.
"""
assert src1.size == src2.size
return ReilBuilder.build(ReilMnemonic.MUL, src1, src2, dst) | def function[gen_mul, parameter[src1, src2, dst]]:
constant[Return a MUL instruction.
]
assert[compare[name[src1].size equal[==] name[src2].size]]
return[call[name[ReilBuilder].build, parameter[name[ReilMnemonic].MUL, name[src1], name[src2], name[dst]]]] | keyword[def] identifier[gen_mul] ( identifier[src1] , identifier[src2] , identifier[dst] ):
literal[string]
keyword[assert] identifier[src1] . identifier[size] == identifier[src2] . identifier[size]
keyword[return] identifier[ReilBuilder] . identifier[build] ( identifier[ReilMnemonic] . identifier[MUL] , identifier[src1] , identifier[src2] , identifier[dst] ) | def gen_mul(src1, src2, dst):
"""Return a MUL instruction.
"""
assert src1.size == src2.size
return ReilBuilder.build(ReilMnemonic.MUL, src1, src2, dst) |
def duplicate_verts(script):
""" "Check for every vertex on the mesh: if there are two vertices with
the same coordinates they are merged into a single one.
Args:
script: the FilterScript object or script filename to write
the filter to.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
if script.ml_version == '1.3.4BETA':
filter_xml = ' <filter name="Remove Duplicated Vertex"/>\n'
else:
filter_xml = ' <filter name="Remove Duplicate Vertices"/>\n'
util.write_filter(script, filter_xml)
return None | def function[duplicate_verts, parameter[script]]:
constant[ "Check for every vertex on the mesh: if there are two vertices with
the same coordinates they are merged into a single one.
Args:
script: the FilterScript object or script filename to write
the filter to.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
]
if compare[name[script].ml_version equal[==] constant[1.3.4BETA]] begin[:]
variable[filter_xml] assign[=] constant[ <filter name="Remove Duplicated Vertex"/>
]
call[name[util].write_filter, parameter[name[script], name[filter_xml]]]
return[constant[None]] | keyword[def] identifier[duplicate_verts] ( identifier[script] ):
literal[string]
keyword[if] identifier[script] . identifier[ml_version] == literal[string] :
identifier[filter_xml] = literal[string]
keyword[else] :
identifier[filter_xml] = literal[string]
identifier[util] . identifier[write_filter] ( identifier[script] , identifier[filter_xml] )
keyword[return] keyword[None] | def duplicate_verts(script):
""" "Check for every vertex on the mesh: if there are two vertices with
the same coordinates they are merged into a single one.
Args:
script: the FilterScript object or script filename to write
the filter to.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
if script.ml_version == '1.3.4BETA':
filter_xml = ' <filter name="Remove Duplicated Vertex"/>\n' # depends on [control=['if'], data=[]]
else:
filter_xml = ' <filter name="Remove Duplicate Vertices"/>\n'
util.write_filter(script, filter_xml)
return None |
def ancestors(self):
"""Returns a list of the ancestors of this node."""
ancestors = set([])
self._depth_ascend(self, ancestors)
try:
ancestors.remove(self)
except KeyError:
# we weren't ancestor of ourself, that's ok
pass
return list(ancestors) | def function[ancestors, parameter[self]]:
constant[Returns a list of the ancestors of this node.]
variable[ancestors] assign[=] call[name[set], parameter[list[[]]]]
call[name[self]._depth_ascend, parameter[name[self], name[ancestors]]]
<ast.Try object at 0x7da1b0048e20>
return[call[name[list], parameter[name[ancestors]]]] | keyword[def] identifier[ancestors] ( identifier[self] ):
literal[string]
identifier[ancestors] = identifier[set] ([])
identifier[self] . identifier[_depth_ascend] ( identifier[self] , identifier[ancestors] )
keyword[try] :
identifier[ancestors] . identifier[remove] ( identifier[self] )
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[return] identifier[list] ( identifier[ancestors] ) | def ancestors(self):
"""Returns a list of the ancestors of this node."""
ancestors = set([])
self._depth_ascend(self, ancestors)
try:
ancestors.remove(self) # depends on [control=['try'], data=[]]
except KeyError:
# we weren't ancestor of ourself, that's ok
pass # depends on [control=['except'], data=[]]
return list(ancestors) |
def process_failure(project):
"""
If any scan operations register a failure, sys.exit(1) is called
to allow build to register a failure
"""
if failure:
lists = get_lists.GetLists()
report_url = lists.report_url(project)
if report_url:
print(report_url)
sys.exit(1) | def function[process_failure, parameter[project]]:
constant[
If any scan operations register a failure, sys.exit(1) is called
to allow build to register a failure
]
if name[failure] begin[:]
variable[lists] assign[=] call[name[get_lists].GetLists, parameter[]]
variable[report_url] assign[=] call[name[lists].report_url, parameter[name[project]]]
if name[report_url] begin[:]
call[name[print], parameter[name[report_url]]]
call[name[sys].exit, parameter[constant[1]]] | keyword[def] identifier[process_failure] ( identifier[project] ):
literal[string]
keyword[if] identifier[failure] :
identifier[lists] = identifier[get_lists] . identifier[GetLists] ()
identifier[report_url] = identifier[lists] . identifier[report_url] ( identifier[project] )
keyword[if] identifier[report_url] :
identifier[print] ( identifier[report_url] )
identifier[sys] . identifier[exit] ( literal[int] ) | def process_failure(project):
"""
If any scan operations register a failure, sys.exit(1) is called
to allow build to register a failure
"""
if failure:
lists = get_lists.GetLists()
report_url = lists.report_url(project)
if report_url:
print(report_url) # depends on [control=['if'], data=[]]
sys.exit(1) # depends on [control=['if'], data=[]] |
def as_rgb(self):
"""
Returns a tuple (r, g, b) of the color.
"""
html = self.as_html()
return (
int(html[1:3], 16),
int(html[3:5], 16),
int(html[5:7], 16)
) | def function[as_rgb, parameter[self]]:
constant[
Returns a tuple (r, g, b) of the color.
]
variable[html] assign[=] call[name[self].as_html, parameter[]]
return[tuple[[<ast.Call object at 0x7da1b2344bb0>, <ast.Call object at 0x7da1b2346980>, <ast.Call object at 0x7da1b2346da0>]]] | keyword[def] identifier[as_rgb] ( identifier[self] ):
literal[string]
identifier[html] = identifier[self] . identifier[as_html] ()
keyword[return] (
identifier[int] ( identifier[html] [ literal[int] : literal[int] ], literal[int] ),
identifier[int] ( identifier[html] [ literal[int] : literal[int] ], literal[int] ),
identifier[int] ( identifier[html] [ literal[int] : literal[int] ], literal[int] )
) | def as_rgb(self):
"""
Returns a tuple (r, g, b) of the color.
"""
html = self.as_html()
return (int(html[1:3], 16), int(html[3:5], 16), int(html[5:7], 16)) |
def occ_issues_lookup(issue=None, code=None):
'''
Lookup occurrence issue definitions and short codes
:param issue: Full name of issue, e.g, CONTINENT_COUNTRY_MISMATCH
:param code: an issue short code, e.g. ccm
Usage
pygbif.occ_issues_lookup(issue = 'CONTINENT_COUNTRY_MISMATCH')
pygbif.occ_issues_lookup(issue = 'MULTIMEDIA_DATE_INVALID')
pygbif.occ_issues_lookup(issue = 'ZERO_COORDINATE')
pygbif.occ_issues_lookup(code = 'cdiv')
'''
if code is None:
bb = [trymatch(issue, x) for x in gbifissues['issue'] ]
tmp = filter(None, bb)
else:
bb = [trymatch(code, x) for x in gbifissues['code'] ]
tmp = filter(None, bb)
return tmp | def function[occ_issues_lookup, parameter[issue, code]]:
constant[
Lookup occurrence issue definitions and short codes
:param issue: Full name of issue, e.g, CONTINENT_COUNTRY_MISMATCH
:param code: an issue short code, e.g. ccm
Usage
pygbif.occ_issues_lookup(issue = 'CONTINENT_COUNTRY_MISMATCH')
pygbif.occ_issues_lookup(issue = 'MULTIMEDIA_DATE_INVALID')
pygbif.occ_issues_lookup(issue = 'ZERO_COORDINATE')
pygbif.occ_issues_lookup(code = 'cdiv')
]
if compare[name[code] is constant[None]] begin[:]
variable[bb] assign[=] <ast.ListComp object at 0x7da1b10efb20>
variable[tmp] assign[=] call[name[filter], parameter[constant[None], name[bb]]]
return[name[tmp]] | keyword[def] identifier[occ_issues_lookup] ( identifier[issue] = keyword[None] , identifier[code] = keyword[None] ):
literal[string]
keyword[if] identifier[code] keyword[is] keyword[None] :
identifier[bb] =[ identifier[trymatch] ( identifier[issue] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[gbifissues] [ literal[string] ]]
identifier[tmp] = identifier[filter] ( keyword[None] , identifier[bb] )
keyword[else] :
identifier[bb] =[ identifier[trymatch] ( identifier[code] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[gbifissues] [ literal[string] ]]
identifier[tmp] = identifier[filter] ( keyword[None] , identifier[bb] )
keyword[return] identifier[tmp] | def occ_issues_lookup(issue=None, code=None):
"""
Lookup occurrence issue definitions and short codes
:param issue: Full name of issue, e.g, CONTINENT_COUNTRY_MISMATCH
:param code: an issue short code, e.g. ccm
Usage
pygbif.occ_issues_lookup(issue = 'CONTINENT_COUNTRY_MISMATCH')
pygbif.occ_issues_lookup(issue = 'MULTIMEDIA_DATE_INVALID')
pygbif.occ_issues_lookup(issue = 'ZERO_COORDINATE')
pygbif.occ_issues_lookup(code = 'cdiv')
"""
if code is None:
bb = [trymatch(issue, x) for x in gbifissues['issue']]
tmp = filter(None, bb) # depends on [control=['if'], data=[]]
else:
bb = [trymatch(code, x) for x in gbifissues['code']]
tmp = filter(None, bb)
return tmp |
def serve_rpc(self):
"""Launches configured # of workers per loaded plugin."""
if cfg.CONF.QUARK_ASYNC.rpc_workers < 1:
cfg.CONF.set_override('rpc_workers', 1, "QUARK_ASYNC")
try:
rpc = service.RpcWorker(self.plugins)
launcher = common_service.ProcessLauncher(CONF, wait_interval=1.0)
launcher.launch_service(rpc, workers=CONF.QUARK_ASYNC.rpc_workers)
return launcher
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unrecoverable error: please check log for '
'details.')) | def function[serve_rpc, parameter[self]]:
constant[Launches configured # of workers per loaded plugin.]
if compare[name[cfg].CONF.QUARK_ASYNC.rpc_workers less[<] constant[1]] begin[:]
call[name[cfg].CONF.set_override, parameter[constant[rpc_workers], constant[1], constant[QUARK_ASYNC]]]
<ast.Try object at 0x7da18f09e590> | keyword[def] identifier[serve_rpc] ( identifier[self] ):
literal[string]
keyword[if] identifier[cfg] . identifier[CONF] . identifier[QUARK_ASYNC] . identifier[rpc_workers] < literal[int] :
identifier[cfg] . identifier[CONF] . identifier[set_override] ( literal[string] , literal[int] , literal[string] )
keyword[try] :
identifier[rpc] = identifier[service] . identifier[RpcWorker] ( identifier[self] . identifier[plugins] )
identifier[launcher] = identifier[common_service] . identifier[ProcessLauncher] ( identifier[CONF] , identifier[wait_interval] = literal[int] )
identifier[launcher] . identifier[launch_service] ( identifier[rpc] , identifier[workers] = identifier[CONF] . identifier[QUARK_ASYNC] . identifier[rpc_workers] )
keyword[return] identifier[launcher]
keyword[except] identifier[Exception] :
keyword[with] identifier[excutils] . identifier[save_and_reraise_exception] ():
identifier[LOG] . identifier[exception] ( identifier[_LE] ( literal[string]
literal[string] )) | def serve_rpc(self):
"""Launches configured # of workers per loaded plugin."""
if cfg.CONF.QUARK_ASYNC.rpc_workers < 1:
cfg.CONF.set_override('rpc_workers', 1, 'QUARK_ASYNC') # depends on [control=['if'], data=[]]
try:
rpc = service.RpcWorker(self.plugins)
launcher = common_service.ProcessLauncher(CONF, wait_interval=1.0)
launcher.launch_service(rpc, workers=CONF.QUARK_ASYNC.rpc_workers)
return launcher # depends on [control=['try'], data=[]]
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unrecoverable error: please check log for details.')) # depends on [control=['with'], data=[]] # depends on [control=['except'], data=[]] |
def add_doc(self, doc, index_update=True, label_guesser_update=True):
"""
Add a document to the index
"""
if not self.index_writer and index_update:
self.index_writer = self.index.writer()
if not self.label_guesser_updater and label_guesser_update:
self.label_guesser_updater = self.label_guesser.get_updater()
logger.info("Indexing new doc: %s" % doc)
if index_update:
self._update_doc_in_index(self.index_writer, doc)
if label_guesser_update:
self.label_guesser_updater.add_doc(doc)
if doc.docid not in self._docs_by_id:
self._docs_by_id[doc.docid] = doc | def function[add_doc, parameter[self, doc, index_update, label_guesser_update]]:
constant[
Add a document to the index
]
if <ast.BoolOp object at 0x7da18f58ea40> begin[:]
name[self].index_writer assign[=] call[name[self].index.writer, parameter[]]
if <ast.BoolOp object at 0x7da20e9b1750> begin[:]
name[self].label_guesser_updater assign[=] call[name[self].label_guesser.get_updater, parameter[]]
call[name[logger].info, parameter[binary_operation[constant[Indexing new doc: %s] <ast.Mod object at 0x7da2590d6920> name[doc]]]]
if name[index_update] begin[:]
call[name[self]._update_doc_in_index, parameter[name[self].index_writer, name[doc]]]
if name[label_guesser_update] begin[:]
call[name[self].label_guesser_updater.add_doc, parameter[name[doc]]]
if compare[name[doc].docid <ast.NotIn object at 0x7da2590d7190> name[self]._docs_by_id] begin[:]
call[name[self]._docs_by_id][name[doc].docid] assign[=] name[doc] | keyword[def] identifier[add_doc] ( identifier[self] , identifier[doc] , identifier[index_update] = keyword[True] , identifier[label_guesser_update] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[index_writer] keyword[and] identifier[index_update] :
identifier[self] . identifier[index_writer] = identifier[self] . identifier[index] . identifier[writer] ()
keyword[if] keyword[not] identifier[self] . identifier[label_guesser_updater] keyword[and] identifier[label_guesser_update] :
identifier[self] . identifier[label_guesser_updater] = identifier[self] . identifier[label_guesser] . identifier[get_updater] ()
identifier[logger] . identifier[info] ( literal[string] % identifier[doc] )
keyword[if] identifier[index_update] :
identifier[self] . identifier[_update_doc_in_index] ( identifier[self] . identifier[index_writer] , identifier[doc] )
keyword[if] identifier[label_guesser_update] :
identifier[self] . identifier[label_guesser_updater] . identifier[add_doc] ( identifier[doc] )
keyword[if] identifier[doc] . identifier[docid] keyword[not] keyword[in] identifier[self] . identifier[_docs_by_id] :
identifier[self] . identifier[_docs_by_id] [ identifier[doc] . identifier[docid] ]= identifier[doc] | def add_doc(self, doc, index_update=True, label_guesser_update=True):
"""
Add a document to the index
"""
if not self.index_writer and index_update:
self.index_writer = self.index.writer() # depends on [control=['if'], data=[]]
if not self.label_guesser_updater and label_guesser_update:
self.label_guesser_updater = self.label_guesser.get_updater() # depends on [control=['if'], data=[]]
logger.info('Indexing new doc: %s' % doc)
if index_update:
self._update_doc_in_index(self.index_writer, doc) # depends on [control=['if'], data=[]]
if label_guesser_update:
self.label_guesser_updater.add_doc(doc) # depends on [control=['if'], data=[]]
if doc.docid not in self._docs_by_id:
self._docs_by_id[doc.docid] = doc # depends on [control=['if'], data=[]] |
def get_password(request, mapping) -> None:
"""
Resolve the given credential request in the provided mapping definition.
The result is printed automatically.
Args:
request:
The credential request specified as a dict of key-value pairs.
mapping:
The mapping configuration as a ConfigParser instance.
"""
LOGGER.debug('Received request "%s"', request)
if 'host' not in request:
LOGGER.error('host= entry missing in request. '
'Cannot query without a host')
return
host = request['host']
if 'path' in request:
host = '/'.join([host, request['path']])
def skip(line, skip):
return line[skip:]
LOGGER.debug('Iterating mapping to match against host "%s"', host)
for section in mapping.sections():
if fnmatch.fnmatch(host, section):
LOGGER.debug('Section "%s" matches requested host "%s"',
section, host)
# TODO handle exceptions
pass_target = mapping.get(section, 'target').replace(
"${host}", request['host'])
password_extractor = SpecificLineExtractor(
0, 0, option_suffix='_password')
password_extractor.configure(mapping[section])
# username_extractor = SpecificLineExtractor(
# 1, 0, option_suffix='_username')
username_extractor = _username_extractors[mapping[section].get(
'username_extractor', fallback=_line_extractor_name)]
username_extractor.configure(mapping[section])
LOGGER.debug('Requesting entry "%s" from pass', pass_target)
output = subprocess.check_output(
['pass', 'show', pass_target]).decode('utf-8')
lines = output.splitlines()
password = password_extractor.get_value(pass_target, lines)
username = username_extractor.get_value(pass_target, lines)
if password:
print('password={password}'.format( # noqa: T001
password=password))
if 'username' not in request and username:
print('username={username}'.format( # noqa: T001
username=username))
return
LOGGER.warning('No mapping matched')
sys.exit(1) | def function[get_password, parameter[request, mapping]]:
constant[
Resolve the given credential request in the provided mapping definition.
The result is printed automatically.
Args:
request:
The credential request specified as a dict of key-value pairs.
mapping:
The mapping configuration as a ConfigParser instance.
]
call[name[LOGGER].debug, parameter[constant[Received request "%s"], name[request]]]
if compare[constant[host] <ast.NotIn object at 0x7da2590d7190> name[request]] begin[:]
call[name[LOGGER].error, parameter[constant[host= entry missing in request. Cannot query without a host]]]
return[None]
variable[host] assign[=] call[name[request]][constant[host]]
if compare[constant[path] in name[request]] begin[:]
variable[host] assign[=] call[constant[/].join, parameter[list[[<ast.Name object at 0x7da18f58d9f0>, <ast.Subscript object at 0x7da18f58d150>]]]]
def function[skip, parameter[line, skip]]:
return[call[name[line]][<ast.Slice object at 0x7da18f58e0e0>]]
call[name[LOGGER].debug, parameter[constant[Iterating mapping to match against host "%s"], name[host]]]
for taget[name[section]] in starred[call[name[mapping].sections, parameter[]]] begin[:]
if call[name[fnmatch].fnmatch, parameter[name[host], name[section]]] begin[:]
call[name[LOGGER].debug, parameter[constant[Section "%s" matches requested host "%s"], name[section], name[host]]]
variable[pass_target] assign[=] call[call[name[mapping].get, parameter[name[section], constant[target]]].replace, parameter[constant[${host}], call[name[request]][constant[host]]]]
variable[password_extractor] assign[=] call[name[SpecificLineExtractor], parameter[constant[0], constant[0]]]
call[name[password_extractor].configure, parameter[call[name[mapping]][name[section]]]]
variable[username_extractor] assign[=] call[name[_username_extractors]][call[call[name[mapping]][name[section]].get, parameter[constant[username_extractor]]]]
call[name[username_extractor].configure, parameter[call[name[mapping]][name[section]]]]
call[name[LOGGER].debug, parameter[constant[Requesting entry "%s" from pass], name[pass_target]]]
variable[output] assign[=] call[call[name[subprocess].check_output, parameter[list[[<ast.Constant object at 0x7da18dc05900>, <ast.Constant object at 0x7da18dc06200>, <ast.Name object at 0x7da18dc059c0>]]]].decode, parameter[constant[utf-8]]]
variable[lines] assign[=] call[name[output].splitlines, parameter[]]
variable[password] assign[=] call[name[password_extractor].get_value, parameter[name[pass_target], name[lines]]]
variable[username] assign[=] call[name[username_extractor].get_value, parameter[name[pass_target], name[lines]]]
if name[password] begin[:]
call[name[print], parameter[call[constant[password={password}].format, parameter[]]]]
if <ast.BoolOp object at 0x7da18dc05180> begin[:]
call[name[print], parameter[call[constant[username={username}].format, parameter[]]]]
return[None]
call[name[LOGGER].warning, parameter[constant[No mapping matched]]]
call[name[sys].exit, parameter[constant[1]]] | keyword[def] identifier[get_password] ( identifier[request] , identifier[mapping] )-> keyword[None] :
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[request] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[request] :
identifier[LOGGER] . identifier[error] ( literal[string]
literal[string] )
keyword[return]
identifier[host] = identifier[request] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[request] :
identifier[host] = literal[string] . identifier[join] ([ identifier[host] , identifier[request] [ literal[string] ]])
keyword[def] identifier[skip] ( identifier[line] , identifier[skip] ):
keyword[return] identifier[line] [ identifier[skip] :]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[host] )
keyword[for] identifier[section] keyword[in] identifier[mapping] . identifier[sections] ():
keyword[if] identifier[fnmatch] . identifier[fnmatch] ( identifier[host] , identifier[section] ):
identifier[LOGGER] . identifier[debug] ( literal[string] ,
identifier[section] , identifier[host] )
identifier[pass_target] = identifier[mapping] . identifier[get] ( identifier[section] , literal[string] ). identifier[replace] (
literal[string] , identifier[request] [ literal[string] ])
identifier[password_extractor] = identifier[SpecificLineExtractor] (
literal[int] , literal[int] , identifier[option_suffix] = literal[string] )
identifier[password_extractor] . identifier[configure] ( identifier[mapping] [ identifier[section] ])
identifier[username_extractor] = identifier[_username_extractors] [ identifier[mapping] [ identifier[section] ]. identifier[get] (
literal[string] , identifier[fallback] = identifier[_line_extractor_name] )]
identifier[username_extractor] . identifier[configure] ( identifier[mapping] [ identifier[section] ])
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[pass_target] )
identifier[output] = identifier[subprocess] . identifier[check_output] (
[ literal[string] , literal[string] , identifier[pass_target] ]). identifier[decode] ( literal[string] )
identifier[lines] = identifier[output] . identifier[splitlines] ()
identifier[password] = identifier[password_extractor] . identifier[get_value] ( identifier[pass_target] , identifier[lines] )
identifier[username] = identifier[username_extractor] . identifier[get_value] ( identifier[pass_target] , identifier[lines] )
keyword[if] identifier[password] :
identifier[print] ( literal[string] . identifier[format] (
identifier[password] = identifier[password] ))
keyword[if] literal[string] keyword[not] keyword[in] identifier[request] keyword[and] identifier[username] :
identifier[print] ( literal[string] . identifier[format] (
identifier[username] = identifier[username] ))
keyword[return]
identifier[LOGGER] . identifier[warning] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] ) | def get_password(request, mapping) -> None:
"""
Resolve the given credential request in the provided mapping definition.
The result is printed automatically.
Args:
request:
The credential request specified as a dict of key-value pairs.
mapping:
The mapping configuration as a ConfigParser instance.
"""
LOGGER.debug('Received request "%s"', request)
if 'host' not in request:
LOGGER.error('host= entry missing in request. Cannot query without a host')
return # depends on [control=['if'], data=[]]
host = request['host']
if 'path' in request:
host = '/'.join([host, request['path']]) # depends on [control=['if'], data=['request']]
def skip(line, skip):
return line[skip:]
LOGGER.debug('Iterating mapping to match against host "%s"', host)
for section in mapping.sections():
if fnmatch.fnmatch(host, section):
LOGGER.debug('Section "%s" matches requested host "%s"', section, host)
# TODO handle exceptions
pass_target = mapping.get(section, 'target').replace('${host}', request['host'])
password_extractor = SpecificLineExtractor(0, 0, option_suffix='_password')
password_extractor.configure(mapping[section])
# username_extractor = SpecificLineExtractor(
# 1, 0, option_suffix='_username')
username_extractor = _username_extractors[mapping[section].get('username_extractor', fallback=_line_extractor_name)]
username_extractor.configure(mapping[section])
LOGGER.debug('Requesting entry "%s" from pass', pass_target)
output = subprocess.check_output(['pass', 'show', pass_target]).decode('utf-8')
lines = output.splitlines()
password = password_extractor.get_value(pass_target, lines)
username = username_extractor.get_value(pass_target, lines)
if password: # noqa: T001
print('password={password}'.format(password=password)) # depends on [control=['if'], data=[]]
if 'username' not in request and username: # noqa: T001
print('username={username}'.format(username=username)) # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['section']]
LOGGER.warning('No mapping matched')
sys.exit(1) |
def encodeSpecialChars(self, input):
"""Do a global encoding of a string, replacing the predefined
entities this routine is reentrant, and result must be
deallocated. """
ret = libxml2mod.xmlEncodeSpecialChars(self._o, input)
return ret | def function[encodeSpecialChars, parameter[self, input]]:
constant[Do a global encoding of a string, replacing the predefined
entities this routine is reentrant, and result must be
deallocated. ]
variable[ret] assign[=] call[name[libxml2mod].xmlEncodeSpecialChars, parameter[name[self]._o, name[input]]]
return[name[ret]] | keyword[def] identifier[encodeSpecialChars] ( identifier[self] , identifier[input] ):
literal[string]
identifier[ret] = identifier[libxml2mod] . identifier[xmlEncodeSpecialChars] ( identifier[self] . identifier[_o] , identifier[input] )
keyword[return] identifier[ret] | def encodeSpecialChars(self, input):
"""Do a global encoding of a string, replacing the predefined
entities this routine is reentrant, and result must be
deallocated. """
ret = libxml2mod.xmlEncodeSpecialChars(self._o, input)
return ret |
def plot_cumulative_density(self, **kwargs):
"""
Plots a pretty figure of {0}.{1}
Matplotlib plot arguments can be passed in inside the kwargs, plus
Parameters
-----------
show_censors: bool
place markers at censorship events. Default: False
censor_styles: bool
If show_censors, this dictionary will be passed into the plot call.
ci_alpha: bool
the transparency level of the confidence interval. Default: 0.3
ci_force_lines: bool
force the confidence intervals to be line plots (versus default shaded areas). Default: False
ci_show: bool
show confidence intervals. Default: True
ci_legend: bool
if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False
at_risk_counts: bool
show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False
loc: slice
specify a time-based subsection of the curves to plot, ex:
>>> model.plot(loc=slice(0.,10.))
will plot the time values between t=0. and t=10.
iloc: slice
specify a location-based subsection of the curves to plot, ex:
>>> model.plot(iloc=slice(0,10))
will plot the first 10 time points.
invert_y_axis: bool
boolean to invert the y-axis, useful to show cumulative graphs instead of survival graphs. (Deprecated, use ``plot_cumulative_density()``)
Returns
-------
ax:
a pyplot axis object
"""
return _plot_estimate(
self,
estimate=self.cumulative_density_,
confidence_intervals=self.confidence_interval_cumulative_density_,
**kwargs
) | def function[plot_cumulative_density, parameter[self]]:
constant[
Plots a pretty figure of {0}.{1}
Matplotlib plot arguments can be passed in inside the kwargs, plus
Parameters
-----------
show_censors: bool
place markers at censorship events. Default: False
censor_styles: bool
If show_censors, this dictionary will be passed into the plot call.
ci_alpha: bool
the transparency level of the confidence interval. Default: 0.3
ci_force_lines: bool
force the confidence intervals to be line plots (versus default shaded areas). Default: False
ci_show: bool
show confidence intervals. Default: True
ci_legend: bool
if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False
at_risk_counts: bool
show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False
loc: slice
specify a time-based subsection of the curves to plot, ex:
>>> model.plot(loc=slice(0.,10.))
will plot the time values between t=0. and t=10.
iloc: slice
specify a location-based subsection of the curves to plot, ex:
>>> model.plot(iloc=slice(0,10))
will plot the first 10 time points.
invert_y_axis: bool
boolean to invert the y-axis, useful to show cumulative graphs instead of survival graphs. (Deprecated, use ``plot_cumulative_density()``)
Returns
-------
ax:
a pyplot axis object
]
return[call[name[_plot_estimate], parameter[name[self]]]] | keyword[def] identifier[plot_cumulative_density] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[_plot_estimate] (
identifier[self] ,
identifier[estimate] = identifier[self] . identifier[cumulative_density_] ,
identifier[confidence_intervals] = identifier[self] . identifier[confidence_interval_cumulative_density_] ,
** identifier[kwargs]
) | def plot_cumulative_density(self, **kwargs):
"""
Plots a pretty figure of {0}.{1}
Matplotlib plot arguments can be passed in inside the kwargs, plus
Parameters
-----------
show_censors: bool
place markers at censorship events. Default: False
censor_styles: bool
If show_censors, this dictionary will be passed into the plot call.
ci_alpha: bool
the transparency level of the confidence interval. Default: 0.3
ci_force_lines: bool
force the confidence intervals to be line plots (versus default shaded areas). Default: False
ci_show: bool
show confidence intervals. Default: True
ci_legend: bool
if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False
at_risk_counts: bool
show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False
loc: slice
specify a time-based subsection of the curves to plot, ex:
>>> model.plot(loc=slice(0.,10.))
will plot the time values between t=0. and t=10.
iloc: slice
specify a location-based subsection of the curves to plot, ex:
>>> model.plot(iloc=slice(0,10))
will plot the first 10 time points.
invert_y_axis: bool
boolean to invert the y-axis, useful to show cumulative graphs instead of survival graphs. (Deprecated, use ``plot_cumulative_density()``)
Returns
-------
ax:
a pyplot axis object
"""
return _plot_estimate(self, estimate=self.cumulative_density_, confidence_intervals=self.confidence_interval_cumulative_density_, **kwargs) |
def remove_obsolete_items(self):
"""Removing obsolete items"""
self.rdata = [(filename, data) for filename, data in self.rdata
if is_module_or_package(filename)] | def function[remove_obsolete_items, parameter[self]]:
constant[Removing obsolete items]
name[self].rdata assign[=] <ast.ListComp object at 0x7da2045647c0> | keyword[def] identifier[remove_obsolete_items] ( identifier[self] ):
literal[string]
identifier[self] . identifier[rdata] =[( identifier[filename] , identifier[data] ) keyword[for] identifier[filename] , identifier[data] keyword[in] identifier[self] . identifier[rdata]
keyword[if] identifier[is_module_or_package] ( identifier[filename] )] | def remove_obsolete_items(self):
"""Removing obsolete items"""
self.rdata = [(filename, data) for (filename, data) in self.rdata if is_module_or_package(filename)] |
def append(self, filename, content, binary=False):
"""Appends given content to given filename."""
self._raise_if_none()
fn = path_join(self.path, filename)
if binary:
flags = 'ab'
else:
flags = 'a'
with open(fn, 'a') as f:
f.write(content)
return True | def function[append, parameter[self, filename, content, binary]]:
constant[Appends given content to given filename.]
call[name[self]._raise_if_none, parameter[]]
variable[fn] assign[=] call[name[path_join], parameter[name[self].path, name[filename]]]
if name[binary] begin[:]
variable[flags] assign[=] constant[ab]
with call[name[open], parameter[name[fn], constant[a]]] begin[:]
call[name[f].write, parameter[name[content]]]
return[constant[True]] | keyword[def] identifier[append] ( identifier[self] , identifier[filename] , identifier[content] , identifier[binary] = keyword[False] ):
literal[string]
identifier[self] . identifier[_raise_if_none] ()
identifier[fn] = identifier[path_join] ( identifier[self] . identifier[path] , identifier[filename] )
keyword[if] identifier[binary] :
identifier[flags] = literal[string]
keyword[else] :
identifier[flags] = literal[string]
keyword[with] identifier[open] ( identifier[fn] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[content] )
keyword[return] keyword[True] | def append(self, filename, content, binary=False):
"""Appends given content to given filename."""
self._raise_if_none()
fn = path_join(self.path, filename)
if binary:
flags = 'ab' # depends on [control=['if'], data=[]]
else:
flags = 'a'
with open(fn, 'a') as f:
f.write(content)
return True # depends on [control=['with'], data=['f']] |
def _validate_simple_fault_geometry(self, node, _float_re):
"""
Validates a node representation of a simple fault geometry
"""
try:
# Parse the geometry
coords = split_coords_2d(~node.LineString.posList)
trace = geo.Line([geo.Point(*p) for p in coords])
except ValueError:
# If the geometry cannot be created then use the LogicTreeError
# to point the user to the incorrect node. Hence, if trace is
# compiled successfully then len(trace) is True, otherwise it is
# False
trace = []
if len(trace):
return
raise LogicTreeError(
node, self.filename,
"'simpleFaultGeometry' node is not valid") | def function[_validate_simple_fault_geometry, parameter[self, node, _float_re]]:
constant[
Validates a node representation of a simple fault geometry
]
<ast.Try object at 0x7da20c9907f0>
if call[name[len], parameter[name[trace]]] begin[:]
return[None]
<ast.Raise object at 0x7da2054a7c70> | keyword[def] identifier[_validate_simple_fault_geometry] ( identifier[self] , identifier[node] , identifier[_float_re] ):
literal[string]
keyword[try] :
identifier[coords] = identifier[split_coords_2d] (~ identifier[node] . identifier[LineString] . identifier[posList] )
identifier[trace] = identifier[geo] . identifier[Line] ([ identifier[geo] . identifier[Point] (* identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[coords] ])
keyword[except] identifier[ValueError] :
identifier[trace] =[]
keyword[if] identifier[len] ( identifier[trace] ):
keyword[return]
keyword[raise] identifier[LogicTreeError] (
identifier[node] , identifier[self] . identifier[filename] ,
literal[string] ) | def _validate_simple_fault_geometry(self, node, _float_re):
"""
Validates a node representation of a simple fault geometry
"""
try:
# Parse the geometry
coords = split_coords_2d(~node.LineString.posList)
trace = geo.Line([geo.Point(*p) for p in coords]) # depends on [control=['try'], data=[]]
except ValueError:
# If the geometry cannot be created then use the LogicTreeError
# to point the user to the incorrect node. Hence, if trace is
# compiled successfully then len(trace) is True, otherwise it is
# False
trace = [] # depends on [control=['except'], data=[]]
if len(trace):
return # depends on [control=['if'], data=[]]
raise LogicTreeError(node, self.filename, "'simpleFaultGeometry' node is not valid") |
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes)) | def function[_proc_gnusparse_00, parameter[self, next, pax_headers, buf]]:
constant[Process a GNU tar extended sparse header, version 0.0.
]
variable[offsets] assign[=] list[[]]
for taget[name[match]] in starred[call[name[re].finditer, parameter[constant[b'\\d+ GNU.sparse.offset=(\\d+)\\n'], name[buf]]]] begin[:]
call[name[offsets].append, parameter[call[name[int], parameter[call[name[match].group, parameter[constant[1]]]]]]]
variable[numbytes] assign[=] list[[]]
for taget[name[match]] in starred[call[name[re].finditer, parameter[constant[b'\\d+ GNU.sparse.numbytes=(\\d+)\\n'], name[buf]]]] begin[:]
call[name[numbytes].append, parameter[call[name[int], parameter[call[name[match].group, parameter[constant[1]]]]]]]
name[next].sparse assign[=] call[name[list], parameter[call[name[zip], parameter[name[offsets], name[numbytes]]]]] | keyword[def] identifier[_proc_gnusparse_00] ( identifier[self] , identifier[next] , identifier[pax_headers] , identifier[buf] ):
literal[string]
identifier[offsets] =[]
keyword[for] identifier[match] keyword[in] identifier[re] . identifier[finditer] ( literal[string] , identifier[buf] ):
identifier[offsets] . identifier[append] ( identifier[int] ( identifier[match] . identifier[group] ( literal[int] )))
identifier[numbytes] =[]
keyword[for] identifier[match] keyword[in] identifier[re] . identifier[finditer] ( literal[string] , identifier[buf] ):
identifier[numbytes] . identifier[append] ( identifier[int] ( identifier[match] . identifier[group] ( literal[int] )))
identifier[next] . identifier[sparse] = identifier[list] ( identifier[zip] ( identifier[offsets] , identifier[numbytes] )) | def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(b'\\d+ GNU.sparse.offset=(\\d+)\\n', buf):
offsets.append(int(match.group(1))) # depends on [control=['for'], data=['match']]
numbytes = []
for match in re.finditer(b'\\d+ GNU.sparse.numbytes=(\\d+)\\n', buf):
numbytes.append(int(match.group(1))) # depends on [control=['for'], data=['match']]
next.sparse = list(zip(offsets, numbytes)) |
def restore_state(self, system):
"""Called after unpickling to restore some attributes manually."""
for space in self._spaces.values():
space.restore_state(system) | def function[restore_state, parameter[self, system]]:
constant[Called after unpickling to restore some attributes manually.]
for taget[name[space]] in starred[call[name[self]._spaces.values, parameter[]]] begin[:]
call[name[space].restore_state, parameter[name[system]]] | keyword[def] identifier[restore_state] ( identifier[self] , identifier[system] ):
literal[string]
keyword[for] identifier[space] keyword[in] identifier[self] . identifier[_spaces] . identifier[values] ():
identifier[space] . identifier[restore_state] ( identifier[system] ) | def restore_state(self, system):
"""Called after unpickling to restore some attributes manually."""
for space in self._spaces.values():
space.restore_state(system) # depends on [control=['for'], data=['space']] |
def check(definition, data, *args, **kwargs):
"""Checks if the input follows the definition"""
checker = checker_factory(definition)
return checker(data, *args, **kwargs) | def function[check, parameter[definition, data]]:
constant[Checks if the input follows the definition]
variable[checker] assign[=] call[name[checker_factory], parameter[name[definition]]]
return[call[name[checker], parameter[name[data], <ast.Starred object at 0x7da20e963af0>]]] | keyword[def] identifier[check] ( identifier[definition] , identifier[data] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[checker] = identifier[checker_factory] ( identifier[definition] )
keyword[return] identifier[checker] ( identifier[data] ,* identifier[args] ,** identifier[kwargs] ) | def check(definition, data, *args, **kwargs):
"""Checks if the input follows the definition"""
checker = checker_factory(definition)
return checker(data, *args, **kwargs) |
def spline_base1d(length, nr_knots = 20, spline_order = 5, marginal = None):
"""Computes a 1D spline basis
Input:
length: int
length of each basis
nr_knots: int
Number of knots, i.e. number of basis functions.
spline_order: int
Order of the splines.
marginal: array, optional
Estimate of the marginal distribution of the input to be fitted.
If given, it is used to determine the positioning of knots, each
knot will cover the same amount of probability mass. If not given,
knots are equally spaced.
"""
if marginal is None:
knots = augknt(np.linspace(0,length+1, nr_knots), spline_order)
else:
knots = knots_from_marginal(marginal, nr_knots, spline_order)
x_eval = np.arange(1,length+1).astype(float)
Bsplines = spcol(x_eval,knots,spline_order)
return Bsplines, knots | def function[spline_base1d, parameter[length, nr_knots, spline_order, marginal]]:
constant[Computes a 1D spline basis
Input:
length: int
length of each basis
nr_knots: int
Number of knots, i.e. number of basis functions.
spline_order: int
Order of the splines.
marginal: array, optional
Estimate of the marginal distribution of the input to be fitted.
If given, it is used to determine the positioning of knots, each
knot will cover the same amount of probability mass. If not given,
knots are equally spaced.
]
if compare[name[marginal] is constant[None]] begin[:]
variable[knots] assign[=] call[name[augknt], parameter[call[name[np].linspace, parameter[constant[0], binary_operation[name[length] + constant[1]], name[nr_knots]]], name[spline_order]]]
variable[x_eval] assign[=] call[call[name[np].arange, parameter[constant[1], binary_operation[name[length] + constant[1]]]].astype, parameter[name[float]]]
variable[Bsplines] assign[=] call[name[spcol], parameter[name[x_eval], name[knots], name[spline_order]]]
return[tuple[[<ast.Name object at 0x7da20c6aba60>, <ast.Name object at 0x7da20c6a90c0>]]] | keyword[def] identifier[spline_base1d] ( identifier[length] , identifier[nr_knots] = literal[int] , identifier[spline_order] = literal[int] , identifier[marginal] = keyword[None] ):
literal[string]
keyword[if] identifier[marginal] keyword[is] keyword[None] :
identifier[knots] = identifier[augknt] ( identifier[np] . identifier[linspace] ( literal[int] , identifier[length] + literal[int] , identifier[nr_knots] ), identifier[spline_order] )
keyword[else] :
identifier[knots] = identifier[knots_from_marginal] ( identifier[marginal] , identifier[nr_knots] , identifier[spline_order] )
identifier[x_eval] = identifier[np] . identifier[arange] ( literal[int] , identifier[length] + literal[int] ). identifier[astype] ( identifier[float] )
identifier[Bsplines] = identifier[spcol] ( identifier[x_eval] , identifier[knots] , identifier[spline_order] )
keyword[return] identifier[Bsplines] , identifier[knots] | def spline_base1d(length, nr_knots=20, spline_order=5, marginal=None):
"""Computes a 1D spline basis
Input:
length: int
length of each basis
nr_knots: int
Number of knots, i.e. number of basis functions.
spline_order: int
Order of the splines.
marginal: array, optional
Estimate of the marginal distribution of the input to be fitted.
If given, it is used to determine the positioning of knots, each
knot will cover the same amount of probability mass. If not given,
knots are equally spaced.
"""
if marginal is None:
knots = augknt(np.linspace(0, length + 1, nr_knots), spline_order) # depends on [control=['if'], data=[]]
else:
knots = knots_from_marginal(marginal, nr_knots, spline_order)
x_eval = np.arange(1, length + 1).astype(float)
Bsplines = spcol(x_eval, knots, spline_order)
return (Bsplines, knots) |
def initdb(self):
'''initdb will check for writability of the data folder, meaning
that it is bound to the local machine. If the folder isn't bound,
expfactory runs in demo mode (not saving data)
'''
self.database = EXPFACTORY_DATABASE
bot.info("DATABASE: %s" %self.database)
# Supported database options
valid = ('sqlite', 'postgres', 'mysql', 'filesystem')
if not self.database.startswith(valid):
bot.warning('%s is not yet a supported type, saving to filesystem.' % self.database)
self.database = 'filesystem'
# Add functions specific to database type
self.init_db() # uses url in self.database
bot.log("Data base: %s" % self.database) | def function[initdb, parameter[self]]:
constant[initdb will check for writability of the data folder, meaning
that it is bound to the local machine. If the folder isn't bound,
expfactory runs in demo mode (not saving data)
]
name[self].database assign[=] name[EXPFACTORY_DATABASE]
call[name[bot].info, parameter[binary_operation[constant[DATABASE: %s] <ast.Mod object at 0x7da2590d6920> name[self].database]]]
variable[valid] assign[=] tuple[[<ast.Constant object at 0x7da2041d94e0>, <ast.Constant object at 0x7da2041d9240>, <ast.Constant object at 0x7da2041d8940>, <ast.Constant object at 0x7da2041d8bb0>]]
if <ast.UnaryOp object at 0x7da2041d81f0> begin[:]
call[name[bot].warning, parameter[binary_operation[constant[%s is not yet a supported type, saving to filesystem.] <ast.Mod object at 0x7da2590d6920> name[self].database]]]
name[self].database assign[=] constant[filesystem]
call[name[self].init_db, parameter[]]
call[name[bot].log, parameter[binary_operation[constant[Data base: %s] <ast.Mod object at 0x7da2590d6920> name[self].database]]] | keyword[def] identifier[initdb] ( identifier[self] ):
literal[string]
identifier[self] . identifier[database] = identifier[EXPFACTORY_DATABASE]
identifier[bot] . identifier[info] ( literal[string] % identifier[self] . identifier[database] )
identifier[valid] =( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[database] . identifier[startswith] ( identifier[valid] ):
identifier[bot] . identifier[warning] ( literal[string] % identifier[self] . identifier[database] )
identifier[self] . identifier[database] = literal[string]
identifier[self] . identifier[init_db] ()
identifier[bot] . identifier[log] ( literal[string] % identifier[self] . identifier[database] ) | def initdb(self):
"""initdb will check for writability of the data folder, meaning
that it is bound to the local machine. If the folder isn't bound,
expfactory runs in demo mode (not saving data)
"""
self.database = EXPFACTORY_DATABASE
bot.info('DATABASE: %s' % self.database)
# Supported database options
valid = ('sqlite', 'postgres', 'mysql', 'filesystem')
if not self.database.startswith(valid):
bot.warning('%s is not yet a supported type, saving to filesystem.' % self.database)
self.database = 'filesystem' # depends on [control=['if'], data=[]]
# Add functions specific to database type
self.init_db() # uses url in self.database
bot.log('Data base: %s' % self.database) |
def calculate_between_class_scatter_matrix(X, y):
"""Calculates the Between-Class Scatter matrix
Parameters:
-----------
X : array-like, shape (m, n) - the samples
y : array-like, shape (m, ) - the class labels
Returns:
--------
between_class_scatter_matrix : array-like, shape (n, n)
"""
mean_vectors = calculate_mean_vectors(X, y)
n_features = X.shape[1]
Sb = np.zeros((n_features, n_features))
m = np.mean(X, axis=0).reshape(n_features, 1)
for cl, m_i in zip(np.unique(y), mean_vectors):
v = m_i.reshape(n_features, 1) - m
Sb += X[y == cl, :].shape[0] * v @ v.T
return Sb | def function[calculate_between_class_scatter_matrix, parameter[X, y]]:
constant[Calculates the Between-Class Scatter matrix
Parameters:
-----------
X : array-like, shape (m, n) - the samples
y : array-like, shape (m, ) - the class labels
Returns:
--------
between_class_scatter_matrix : array-like, shape (n, n)
]
variable[mean_vectors] assign[=] call[name[calculate_mean_vectors], parameter[name[X], name[y]]]
variable[n_features] assign[=] call[name[X].shape][constant[1]]
variable[Sb] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b209dd80>, <ast.Name object at 0x7da1b209cb50>]]]]
variable[m] assign[=] call[call[name[np].mean, parameter[name[X]]].reshape, parameter[name[n_features], constant[1]]]
for taget[tuple[[<ast.Name object at 0x7da1b209c730>, <ast.Name object at 0x7da1b209d6c0>]]] in starred[call[name[zip], parameter[call[name[np].unique, parameter[name[y]]], name[mean_vectors]]]] begin[:]
variable[v] assign[=] binary_operation[call[name[m_i].reshape, parameter[name[n_features], constant[1]]] - name[m]]
<ast.AugAssign object at 0x7da1b209e170>
return[name[Sb]] | keyword[def] identifier[calculate_between_class_scatter_matrix] ( identifier[X] , identifier[y] ):
literal[string]
identifier[mean_vectors] = identifier[calculate_mean_vectors] ( identifier[X] , identifier[y] )
identifier[n_features] = identifier[X] . identifier[shape] [ literal[int] ]
identifier[Sb] = identifier[np] . identifier[zeros] (( identifier[n_features] , identifier[n_features] ))
identifier[m] = identifier[np] . identifier[mean] ( identifier[X] , identifier[axis] = literal[int] ). identifier[reshape] ( identifier[n_features] , literal[int] )
keyword[for] identifier[cl] , identifier[m_i] keyword[in] identifier[zip] ( identifier[np] . identifier[unique] ( identifier[y] ), identifier[mean_vectors] ):
identifier[v] = identifier[m_i] . identifier[reshape] ( identifier[n_features] , literal[int] )- identifier[m]
identifier[Sb] += identifier[X] [ identifier[y] == identifier[cl] ,:]. identifier[shape] [ literal[int] ]* identifier[v] @ identifier[v] . identifier[T]
keyword[return] identifier[Sb] | def calculate_between_class_scatter_matrix(X, y):
"""Calculates the Between-Class Scatter matrix
Parameters:
-----------
X : array-like, shape (m, n) - the samples
y : array-like, shape (m, ) - the class labels
Returns:
--------
between_class_scatter_matrix : array-like, shape (n, n)
"""
mean_vectors = calculate_mean_vectors(X, y)
n_features = X.shape[1]
Sb = np.zeros((n_features, n_features))
m = np.mean(X, axis=0).reshape(n_features, 1)
for (cl, m_i) in zip(np.unique(y), mean_vectors):
v = m_i.reshape(n_features, 1) - m
Sb += X[y == cl, :].shape[0] * v @ v.T # depends on [control=['for'], data=[]]
return Sb |
def all(cls, state=None, include_deactivated=False):
"""
Get all organisations
:param state: State of organisation
:param include_deactivated: Flag to include deactivated
:returns: list of Organisation instances
:raises: SocketError, CouchException
"""
if state and state not in validators.VALID_STATES:
raise exceptions.ValidationError('Invalid "state"')
elif state:
organisations = yield views.organisations.get(key=state,
include_docs=True)
elif include_deactivated:
organisations = yield views.organisations.get(include_docs=True)
else:
organisations = yield views.active_organisations.get(include_docs=True)
raise Return([cls(**org['doc']) for org in organisations['rows']]) | def function[all, parameter[cls, state, include_deactivated]]:
constant[
Get all organisations
:param state: State of organisation
:param include_deactivated: Flag to include deactivated
:returns: list of Organisation instances
:raises: SocketError, CouchException
]
if <ast.BoolOp object at 0x7da1b15c6f20> begin[:]
<ast.Raise object at 0x7da1b15c6ad0>
<ast.Raise object at 0x7da1b15f2860> | keyword[def] identifier[all] ( identifier[cls] , identifier[state] = keyword[None] , identifier[include_deactivated] = keyword[False] ):
literal[string]
keyword[if] identifier[state] keyword[and] identifier[state] keyword[not] keyword[in] identifier[validators] . identifier[VALID_STATES] :
keyword[raise] identifier[exceptions] . identifier[ValidationError] ( literal[string] )
keyword[elif] identifier[state] :
identifier[organisations] = keyword[yield] identifier[views] . identifier[organisations] . identifier[get] ( identifier[key] = identifier[state] ,
identifier[include_docs] = keyword[True] )
keyword[elif] identifier[include_deactivated] :
identifier[organisations] = keyword[yield] identifier[views] . identifier[organisations] . identifier[get] ( identifier[include_docs] = keyword[True] )
keyword[else] :
identifier[organisations] = keyword[yield] identifier[views] . identifier[active_organisations] . identifier[get] ( identifier[include_docs] = keyword[True] )
keyword[raise] identifier[Return] ([ identifier[cls] (** identifier[org] [ literal[string] ]) keyword[for] identifier[org] keyword[in] identifier[organisations] [ literal[string] ]]) | def all(cls, state=None, include_deactivated=False):
"""
Get all organisations
:param state: State of organisation
:param include_deactivated: Flag to include deactivated
:returns: list of Organisation instances
:raises: SocketError, CouchException
"""
if state and state not in validators.VALID_STATES:
raise exceptions.ValidationError('Invalid "state"') # depends on [control=['if'], data=[]]
elif state:
organisations = (yield views.organisations.get(key=state, include_docs=True)) # depends on [control=['if'], data=[]]
elif include_deactivated:
organisations = (yield views.organisations.get(include_docs=True)) # depends on [control=['if'], data=[]]
else:
organisations = (yield views.active_organisations.get(include_docs=True))
raise Return([cls(**org['doc']) for org in organisations['rows']]) |
def interval_intersection_width(a, b, c, d):
"""returns the width of the intersection of intervals [a,b] and [c,d]
(thinking of these as intervals on the real number line)"""
return max(0, min(b, d) - max(a, c)) | def function[interval_intersection_width, parameter[a, b, c, d]]:
constant[returns the width of the intersection of intervals [a,b] and [c,d]
(thinking of these as intervals on the real number line)]
return[call[name[max], parameter[constant[0], binary_operation[call[name[min], parameter[name[b], name[d]]] - call[name[max], parameter[name[a], name[c]]]]]]] | keyword[def] identifier[interval_intersection_width] ( identifier[a] , identifier[b] , identifier[c] , identifier[d] ):
literal[string]
keyword[return] identifier[max] ( literal[int] , identifier[min] ( identifier[b] , identifier[d] )- identifier[max] ( identifier[a] , identifier[c] )) | def interval_intersection_width(a, b, c, d):
"""returns the width of the intersection of intervals [a,b] and [c,d]
(thinking of these as intervals on the real number line)"""
return max(0, min(b, d) - max(a, c)) |
def getbugfields(self, force_refresh=False):
"""
Calls getBugFields, which returns a list of fields in each bug
for this bugzilla instance. This can be used to set the list of attrs
on the Bug object.
"""
if force_refresh or not self._cache.bugfields:
log.debug("Refreshing bugfields")
self._cache.bugfields = self._getbugfields()
self._cache.bugfields.sort()
log.debug("bugfields = %s", self._cache.bugfields)
return self._cache.bugfields | def function[getbugfields, parameter[self, force_refresh]]:
constant[
Calls getBugFields, which returns a list of fields in each bug
for this bugzilla instance. This can be used to set the list of attrs
on the Bug object.
]
if <ast.BoolOp object at 0x7da2041db6a0> begin[:]
call[name[log].debug, parameter[constant[Refreshing bugfields]]]
name[self]._cache.bugfields assign[=] call[name[self]._getbugfields, parameter[]]
call[name[self]._cache.bugfields.sort, parameter[]]
call[name[log].debug, parameter[constant[bugfields = %s], name[self]._cache.bugfields]]
return[name[self]._cache.bugfields] | keyword[def] identifier[getbugfields] ( identifier[self] , identifier[force_refresh] = keyword[False] ):
literal[string]
keyword[if] identifier[force_refresh] keyword[or] keyword[not] identifier[self] . identifier[_cache] . identifier[bugfields] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_cache] . identifier[bugfields] = identifier[self] . identifier[_getbugfields] ()
identifier[self] . identifier[_cache] . identifier[bugfields] . identifier[sort] ()
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[_cache] . identifier[bugfields] )
keyword[return] identifier[self] . identifier[_cache] . identifier[bugfields] | def getbugfields(self, force_refresh=False):
"""
Calls getBugFields, which returns a list of fields in each bug
for this bugzilla instance. This can be used to set the list of attrs
on the Bug object.
"""
if force_refresh or not self._cache.bugfields:
log.debug('Refreshing bugfields')
self._cache.bugfields = self._getbugfields()
self._cache.bugfields.sort()
log.debug('bugfields = %s', self._cache.bugfields) # depends on [control=['if'], data=[]]
return self._cache.bugfields |
def GetEntries(self, parser_mediator, data=None, **unused_kwargs):
"""Extract data from Transmission's resume folder files.
This is the main parsing engine for the parser. It determines if
the selected file is the proper file to parse and extracts current
running torrents.
Transmission stores an individual Bencoded file for each active download
in a folder named resume under the user's application data folder.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
data (Optional[dict[str, object]]): bencode data values.
"""
seeding_time = data.get('seeding-time-seconds', None)
event_data = TransmissionEventData()
event_data.destination = data.get('destination', None)
# Convert seconds to minutes.
event_data.seedtime, _ = divmod(seeding_time, 60)
# Create timeline events based on extracted values.
timestamp = data.get('added-date', None)
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = data.get('done-date', None)
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = data.get('activity-date', None)
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data) | def function[GetEntries, parameter[self, parser_mediator, data]]:
constant[Extract data from Transmission's resume folder files.
This is the main parsing engine for the parser. It determines if
the selected file is the proper file to parse and extracts current
running torrents.
Transmission stores an individual Bencoded file for each active download
in a folder named resume under the user's application data folder.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
data (Optional[dict[str, object]]): bencode data values.
]
variable[seeding_time] assign[=] call[name[data].get, parameter[constant[seeding-time-seconds], constant[None]]]
variable[event_data] assign[=] call[name[TransmissionEventData], parameter[]]
name[event_data].destination assign[=] call[name[data].get, parameter[constant[destination], constant[None]]]
<ast.Tuple object at 0x7da1b26acfd0> assign[=] call[name[divmod], parameter[name[seeding_time], constant[60]]]
variable[timestamp] assign[=] call[name[data].get, parameter[constant[added-date], constant[None]]]
if name[timestamp] begin[:]
variable[date_time] assign[=] call[name[dfdatetime_posix_time].PosixTime, parameter[]]
variable[event] assign[=] call[name[time_events].DateTimeValuesEvent, parameter[name[date_time], name[definitions].TIME_DESCRIPTION_ADDED]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]]
variable[timestamp] assign[=] call[name[data].get, parameter[constant[done-date], constant[None]]]
if name[timestamp] begin[:]
variable[date_time] assign[=] call[name[dfdatetime_posix_time].PosixTime, parameter[]]
variable[event] assign[=] call[name[time_events].DateTimeValuesEvent, parameter[name[date_time], name[definitions].TIME_DESCRIPTION_FILE_DOWNLOADED]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]]
variable[timestamp] assign[=] call[name[data].get, parameter[constant[activity-date], constant[None]]]
if name[timestamp] begin[:]
variable[date_time] assign[=] call[name[dfdatetime_posix_time].PosixTime, parameter[]]
variable[event] assign[=] call[name[time_events].DateTimeValuesEvent, parameter[name[date_time], name[definitions].TIME_DESCRIPTION_LAST_ACCESS]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]] | keyword[def] identifier[GetEntries] ( identifier[self] , identifier[parser_mediator] , identifier[data] = keyword[None] ,** identifier[unused_kwargs] ):
literal[string]
identifier[seeding_time] = identifier[data] . identifier[get] ( literal[string] , keyword[None] )
identifier[event_data] = identifier[TransmissionEventData] ()
identifier[event_data] . identifier[destination] = identifier[data] . identifier[get] ( literal[string] , keyword[None] )
identifier[event_data] . identifier[seedtime] , identifier[_] = identifier[divmod] ( identifier[seeding_time] , literal[int] )
identifier[timestamp] = identifier[data] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[timestamp] :
identifier[date_time] = identifier[dfdatetime_posix_time] . identifier[PosixTime] ( identifier[timestamp] = identifier[timestamp] )
identifier[event] = identifier[time_events] . identifier[DateTimeValuesEvent] (
identifier[date_time] , identifier[definitions] . identifier[TIME_DESCRIPTION_ADDED] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] )
identifier[timestamp] = identifier[data] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[timestamp] :
identifier[date_time] = identifier[dfdatetime_posix_time] . identifier[PosixTime] ( identifier[timestamp] = identifier[timestamp] )
identifier[event] = identifier[time_events] . identifier[DateTimeValuesEvent] (
identifier[date_time] , identifier[definitions] . identifier[TIME_DESCRIPTION_FILE_DOWNLOADED] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] )
identifier[timestamp] = identifier[data] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[timestamp] :
identifier[date_time] = identifier[dfdatetime_posix_time] . identifier[PosixTime] ( identifier[timestamp] = identifier[timestamp] )
identifier[event] = identifier[time_events] . identifier[DateTimeValuesEvent] (
identifier[date_time] , identifier[definitions] . identifier[TIME_DESCRIPTION_LAST_ACCESS] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] ) | def GetEntries(self, parser_mediator, data=None, **unused_kwargs):
"""Extract data from Transmission's resume folder files.
This is the main parsing engine for the parser. It determines if
the selected file is the proper file to parse and extracts current
running torrents.
Transmission stores an individual Bencoded file for each active download
in a folder named resume under the user's application data folder.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
data (Optional[dict[str, object]]): bencode data values.
"""
seeding_time = data.get('seeding-time-seconds', None)
event_data = TransmissionEventData()
event_data.destination = data.get('destination', None)
# Convert seconds to minutes.
(event_data.seedtime, _) = divmod(seeding_time, 60)
# Create timeline events based on extracted values.
timestamp = data.get('added-date', None)
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data) # depends on [control=['if'], data=[]]
timestamp = data.get('done-date', None)
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
parser_mediator.ProduceEventWithEventData(event, event_data) # depends on [control=['if'], data=[]]
timestamp = data.get('activity-date', None)
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data) # depends on [control=['if'], data=[]] |
def nr_genes(self, build=None):
"""Return the number of hgnc genes in collection
If build is used, return the number of genes of a certain build
Returns:
result()
"""
if build:
LOG.info("Fetching all genes from build %s", build)
else:
LOG.info("Fetching all genes")
return self.hgnc_collection.find({'build':build}).count() | def function[nr_genes, parameter[self, build]]:
constant[Return the number of hgnc genes in collection
If build is used, return the number of genes of a certain build
Returns:
result()
]
if name[build] begin[:]
call[name[LOG].info, parameter[constant[Fetching all genes from build %s], name[build]]]
return[call[call[name[self].hgnc_collection.find, parameter[dictionary[[<ast.Constant object at 0x7da20e9b0fa0>], [<ast.Name object at 0x7da20e9b00d0>]]]].count, parameter[]]] | keyword[def] identifier[nr_genes] ( identifier[self] , identifier[build] = keyword[None] ):
literal[string]
keyword[if] identifier[build] :
identifier[LOG] . identifier[info] ( literal[string] , identifier[build] )
keyword[else] :
identifier[LOG] . identifier[info] ( literal[string] )
keyword[return] identifier[self] . identifier[hgnc_collection] . identifier[find] ({ literal[string] : identifier[build] }). identifier[count] () | def nr_genes(self, build=None):
"""Return the number of hgnc genes in collection
If build is used, return the number of genes of a certain build
Returns:
result()
"""
if build:
LOG.info('Fetching all genes from build %s', build) # depends on [control=['if'], data=[]]
else:
LOG.info('Fetching all genes')
return self.hgnc_collection.find({'build': build}).count() |
def send_and_require(self,
send,
regexps,
not_there=False,
echo=None,
note=None,
loglevel=logging.INFO):
"""Send string and require the item in the output.
See send_until
"""
shutit = self.shutit
echo = shutit.get_echo_override(echo)
return self.send_until(send,
regexps,
not_there=not_there,
cadence=0,
retries=1,
echo=echo,
note=note,
loglevel=loglevel) | def function[send_and_require, parameter[self, send, regexps, not_there, echo, note, loglevel]]:
constant[Send string and require the item in the output.
See send_until
]
variable[shutit] assign[=] name[self].shutit
variable[echo] assign[=] call[name[shutit].get_echo_override, parameter[name[echo]]]
return[call[name[self].send_until, parameter[name[send], name[regexps]]]] | keyword[def] identifier[send_and_require] ( identifier[self] ,
identifier[send] ,
identifier[regexps] ,
identifier[not_there] = keyword[False] ,
identifier[echo] = keyword[None] ,
identifier[note] = keyword[None] ,
identifier[loglevel] = identifier[logging] . identifier[INFO] ):
literal[string]
identifier[shutit] = identifier[self] . identifier[shutit]
identifier[echo] = identifier[shutit] . identifier[get_echo_override] ( identifier[echo] )
keyword[return] identifier[self] . identifier[send_until] ( identifier[send] ,
identifier[regexps] ,
identifier[not_there] = identifier[not_there] ,
identifier[cadence] = literal[int] ,
identifier[retries] = literal[int] ,
identifier[echo] = identifier[echo] ,
identifier[note] = identifier[note] ,
identifier[loglevel] = identifier[loglevel] ) | def send_and_require(self, send, regexps, not_there=False, echo=None, note=None, loglevel=logging.INFO):
"""Send string and require the item in the output.
See send_until
"""
shutit = self.shutit
echo = shutit.get_echo_override(echo)
return self.send_until(send, regexps, not_there=not_there, cadence=0, retries=1, echo=echo, note=note, loglevel=loglevel) |
def _hasCredentials(self):
""" Return True, if credentials is given """
cred = self.options.get('credentials')
return (
cred and
'clientId' in cred and
'accessToken' in cred and
cred['clientId'] and
cred['accessToken']
) | def function[_hasCredentials, parameter[self]]:
constant[ Return True, if credentials is given ]
variable[cred] assign[=] call[name[self].options.get, parameter[constant[credentials]]]
return[<ast.BoolOp object at 0x7da18f00c8e0>] | keyword[def] identifier[_hasCredentials] ( identifier[self] ):
literal[string]
identifier[cred] = identifier[self] . identifier[options] . identifier[get] ( literal[string] )
keyword[return] (
identifier[cred] keyword[and]
literal[string] keyword[in] identifier[cred] keyword[and]
literal[string] keyword[in] identifier[cred] keyword[and]
identifier[cred] [ literal[string] ] keyword[and]
identifier[cred] [ literal[string] ]
) | def _hasCredentials(self):
""" Return True, if credentials is given """
cred = self.options.get('credentials')
return cred and 'clientId' in cred and ('accessToken' in cred) and cred['clientId'] and cred['accessToken'] |
def _validate_noneof(self, definitions, field, value):
""" {'type': 'list', 'logical': 'noneof'} """
valids, _errors = \
self.__validate_logical('noneof', definitions, field, value)
if valids > 0:
self._error(field, errors.NONEOF, _errors,
valids, len(definitions)) | def function[_validate_noneof, parameter[self, definitions, field, value]]:
constant[ {'type': 'list', 'logical': 'noneof'} ]
<ast.Tuple object at 0x7da18ede4910> assign[=] call[name[self].__validate_logical, parameter[constant[noneof], name[definitions], name[field], name[value]]]
if compare[name[valids] greater[>] constant[0]] begin[:]
call[name[self]._error, parameter[name[field], name[errors].NONEOF, name[_errors], name[valids], call[name[len], parameter[name[definitions]]]]] | keyword[def] identifier[_validate_noneof] ( identifier[self] , identifier[definitions] , identifier[field] , identifier[value] ):
literal[string]
identifier[valids] , identifier[_errors] = identifier[self] . identifier[__validate_logical] ( literal[string] , identifier[definitions] , identifier[field] , identifier[value] )
keyword[if] identifier[valids] > literal[int] :
identifier[self] . identifier[_error] ( identifier[field] , identifier[errors] . identifier[NONEOF] , identifier[_errors] ,
identifier[valids] , identifier[len] ( identifier[definitions] )) | def _validate_noneof(self, definitions, field, value):
""" {'type': 'list', 'logical': 'noneof'} """
(valids, _errors) = self.__validate_logical('noneof', definitions, field, value)
if valids > 0:
self._error(field, errors.NONEOF, _errors, valids, len(definitions)) # depends on [control=['if'], data=['valids']] |
def match_repository_configuration(url, page_size=10, page_index=0, sort=""):
"""
Search for Repository Configurations based on internal or external url with exact match
"""
content = match_repository_configuration_raw(url, page_size, page_index, sort)
if content:
return utils.format_json_list(content) | def function[match_repository_configuration, parameter[url, page_size, page_index, sort]]:
constant[
Search for Repository Configurations based on internal or external url with exact match
]
variable[content] assign[=] call[name[match_repository_configuration_raw], parameter[name[url], name[page_size], name[page_index], name[sort]]]
if name[content] begin[:]
return[call[name[utils].format_json_list, parameter[name[content]]]] | keyword[def] identifier[match_repository_configuration] ( identifier[url] , identifier[page_size] = literal[int] , identifier[page_index] = literal[int] , identifier[sort] = literal[string] ):
literal[string]
identifier[content] = identifier[match_repository_configuration_raw] ( identifier[url] , identifier[page_size] , identifier[page_index] , identifier[sort] )
keyword[if] identifier[content] :
keyword[return] identifier[utils] . identifier[format_json_list] ( identifier[content] ) | def match_repository_configuration(url, page_size=10, page_index=0, sort=''):
"""
Search for Repository Configurations based on internal or external url with exact match
"""
content = match_repository_configuration_raw(url, page_size, page_index, sort)
if content:
return utils.format_json_list(content) # depends on [control=['if'], data=[]] |
def get_locations_from_coords(self, longitude, latitude, levels=None):
"""
Returns a list of geographies containing this point.
"""
resp = requests.get(SETTINGS['url'] + '/point/4326/%s,%s?generation=%s' % (longitude, latitude, SETTINGS['generation']))
resp.raise_for_status()
geos = []
for feature in resp.json().itervalues():
try:
geo = self.get_geography(feature['codes']['MDB'],
feature['type_name'].lower())
if not levels or geo.geo_level in levels:
geos.append(geo)
except LocationNotFound as e:
log.warn("Couldn't find geo that Mapit gave us: %s" % feature, exc_info=e)
return geos | def function[get_locations_from_coords, parameter[self, longitude, latitude, levels]]:
constant[
Returns a list of geographies containing this point.
]
variable[resp] assign[=] call[name[requests].get, parameter[binary_operation[call[name[SETTINGS]][constant[url]] + binary_operation[constant[/point/4326/%s,%s?generation=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c993ca0>, <ast.Name object at 0x7da20c993160>, <ast.Subscript object at 0x7da20c993910>]]]]]]
call[name[resp].raise_for_status, parameter[]]
variable[geos] assign[=] list[[]]
for taget[name[feature]] in starred[call[call[name[resp].json, parameter[]].itervalues, parameter[]]] begin[:]
<ast.Try object at 0x7da20cabee00>
return[name[geos]] | keyword[def] identifier[get_locations_from_coords] ( identifier[self] , identifier[longitude] , identifier[latitude] , identifier[levels] = keyword[None] ):
literal[string]
identifier[resp] = identifier[requests] . identifier[get] ( identifier[SETTINGS] [ literal[string] ]+ literal[string] %( identifier[longitude] , identifier[latitude] , identifier[SETTINGS] [ literal[string] ]))
identifier[resp] . identifier[raise_for_status] ()
identifier[geos] =[]
keyword[for] identifier[feature] keyword[in] identifier[resp] . identifier[json] (). identifier[itervalues] ():
keyword[try] :
identifier[geo] = identifier[self] . identifier[get_geography] ( identifier[feature] [ literal[string] ][ literal[string] ],
identifier[feature] [ literal[string] ]. identifier[lower] ())
keyword[if] keyword[not] identifier[levels] keyword[or] identifier[geo] . identifier[geo_level] keyword[in] identifier[levels] :
identifier[geos] . identifier[append] ( identifier[geo] )
keyword[except] identifier[LocationNotFound] keyword[as] identifier[e] :
identifier[log] . identifier[warn] ( literal[string] % identifier[feature] , identifier[exc_info] = identifier[e] )
keyword[return] identifier[geos] | def get_locations_from_coords(self, longitude, latitude, levels=None):
"""
Returns a list of geographies containing this point.
"""
resp = requests.get(SETTINGS['url'] + '/point/4326/%s,%s?generation=%s' % (longitude, latitude, SETTINGS['generation']))
resp.raise_for_status()
geos = []
for feature in resp.json().itervalues():
try:
geo = self.get_geography(feature['codes']['MDB'], feature['type_name'].lower())
if not levels or geo.geo_level in levels:
geos.append(geo) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except LocationNotFound as e:
log.warn("Couldn't find geo that Mapit gave us: %s" % feature, exc_info=e) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['feature']]
return geos |
def _decode_stat_data(self, chunk):
"""
Return all items found in this chunk
"""
for date_str, statistics in chunk.iteritems():
date_obj = datetime.datetime.strptime(
date_str.split(".")[0], '%Y-%m-%d %H:%M:%S')
chunk_date = int(time.mktime(date_obj.timetuple()))
instances = 0
for benchmark_name, benchmark in statistics.iteritems():
if not benchmark_name.startswith("benchmark_io"):
continue
for method, meth_obj in benchmark.iteritems():
if "mmtasks" in meth_obj:
instances += meth_obj["mmtasks"][2]
offset = chunk_date - 1 - self.start_time
reqps = 0
if 0 <= offset < len(self.phantom_info.steps):
reqps = self.phantom_info.steps[offset][0]
yield self.stats_item(chunk_date - 1, instances, reqps) | def function[_decode_stat_data, parameter[self, chunk]]:
constant[
Return all items found in this chunk
]
for taget[tuple[[<ast.Name object at 0x7da1b0395ed0>, <ast.Name object at 0x7da1b0394370>]]] in starred[call[name[chunk].iteritems, parameter[]]] begin[:]
variable[date_obj] assign[=] call[name[datetime].datetime.strptime, parameter[call[call[name[date_str].split, parameter[constant[.]]]][constant[0]], constant[%Y-%m-%d %H:%M:%S]]]
variable[chunk_date] assign[=] call[name[int], parameter[call[name[time].mktime, parameter[call[name[date_obj].timetuple, parameter[]]]]]]
variable[instances] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b03967d0>, <ast.Name object at 0x7da1b03942e0>]]] in starred[call[name[statistics].iteritems, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0395360> begin[:]
continue
for taget[tuple[[<ast.Name object at 0x7da1b0394c10>, <ast.Name object at 0x7da1b0395cf0>]]] in starred[call[name[benchmark].iteritems, parameter[]]] begin[:]
if compare[constant[mmtasks] in name[meth_obj]] begin[:]
<ast.AugAssign object at 0x7da1b03953f0>
variable[offset] assign[=] binary_operation[binary_operation[name[chunk_date] - constant[1]] - name[self].start_time]
variable[reqps] assign[=] constant[0]
if compare[constant[0] less_or_equal[<=] name[offset]] begin[:]
variable[reqps] assign[=] call[call[name[self].phantom_info.steps][name[offset]]][constant[0]]
<ast.Yield object at 0x7da1b03a82e0> | keyword[def] identifier[_decode_stat_data] ( identifier[self] , identifier[chunk] ):
literal[string]
keyword[for] identifier[date_str] , identifier[statistics] keyword[in] identifier[chunk] . identifier[iteritems] ():
identifier[date_obj] = identifier[datetime] . identifier[datetime] . identifier[strptime] (
identifier[date_str] . identifier[split] ( literal[string] )[ literal[int] ], literal[string] )
identifier[chunk_date] = identifier[int] ( identifier[time] . identifier[mktime] ( identifier[date_obj] . identifier[timetuple] ()))
identifier[instances] = literal[int]
keyword[for] identifier[benchmark_name] , identifier[benchmark] keyword[in] identifier[statistics] . identifier[iteritems] ():
keyword[if] keyword[not] identifier[benchmark_name] . identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[for] identifier[method] , identifier[meth_obj] keyword[in] identifier[benchmark] . identifier[iteritems] ():
keyword[if] literal[string] keyword[in] identifier[meth_obj] :
identifier[instances] += identifier[meth_obj] [ literal[string] ][ literal[int] ]
identifier[offset] = identifier[chunk_date] - literal[int] - identifier[self] . identifier[start_time]
identifier[reqps] = literal[int]
keyword[if] literal[int] <= identifier[offset] < identifier[len] ( identifier[self] . identifier[phantom_info] . identifier[steps] ):
identifier[reqps] = identifier[self] . identifier[phantom_info] . identifier[steps] [ identifier[offset] ][ literal[int] ]
keyword[yield] identifier[self] . identifier[stats_item] ( identifier[chunk_date] - literal[int] , identifier[instances] , identifier[reqps] ) | def _decode_stat_data(self, chunk):
"""
Return all items found in this chunk
"""
for (date_str, statistics) in chunk.iteritems():
date_obj = datetime.datetime.strptime(date_str.split('.')[0], '%Y-%m-%d %H:%M:%S')
chunk_date = int(time.mktime(date_obj.timetuple()))
instances = 0
for (benchmark_name, benchmark) in statistics.iteritems():
if not benchmark_name.startswith('benchmark_io'):
continue # depends on [control=['if'], data=[]]
for (method, meth_obj) in benchmark.iteritems():
if 'mmtasks' in meth_obj:
instances += meth_obj['mmtasks'][2] # depends on [control=['if'], data=['meth_obj']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
offset = chunk_date - 1 - self.start_time
reqps = 0
if 0 <= offset < len(self.phantom_info.steps):
reqps = self.phantom_info.steps[offset][0] # depends on [control=['if'], data=['offset']]
yield self.stats_item(chunk_date - 1, instances, reqps) # depends on [control=['for'], data=[]] |
def to_match(self):
"""Return a unicode object with the MATCH representation of this TernaryConditional."""
self.validate()
# For MATCH, an additional validation step is needed -- we currently do not support
# emitting MATCH code for TernaryConditional that contains another TernaryConditional
# anywhere within the predicate expression. This is because the predicate expression
# must be surrounded in quotes, and it is unclear whether nested/escaped quotes would work.
def visitor_fn(expression):
"""Visitor function that ensures the predicate does not contain TernaryConditionals."""
if isinstance(expression, TernaryConditional):
raise ValueError(u'Cannot emit MATCH code for TernaryConditional that contains '
u'in its predicate another TernaryConditional: '
u'{} {}'.format(expression, self))
return expression
self.predicate.visit_and_update(visitor_fn)
format_spec = u'if(eval("%(predicate)s"), %(if_true)s, %(if_false)s)'
predicate_string = self.predicate.to_match()
if u'"' in predicate_string:
raise AssertionError(u'Found a double-quote within the predicate string, this would '
u'have terminated the if(eval()) early and should be fixed: '
u'{} {}'.format(predicate_string, self))
return format_spec % dict(predicate=predicate_string,
if_true=self.if_true.to_match(),
if_false=self.if_false.to_match()) | def function[to_match, parameter[self]]:
constant[Return a unicode object with the MATCH representation of this TernaryConditional.]
call[name[self].validate, parameter[]]
def function[visitor_fn, parameter[expression]]:
constant[Visitor function that ensures the predicate does not contain TernaryConditionals.]
if call[name[isinstance], parameter[name[expression], name[TernaryConditional]]] begin[:]
<ast.Raise object at 0x7da1b17240a0>
return[name[expression]]
call[name[self].predicate.visit_and_update, parameter[name[visitor_fn]]]
variable[format_spec] assign[=] constant[if(eval("%(predicate)s"), %(if_true)s, %(if_false)s)]
variable[predicate_string] assign[=] call[name[self].predicate.to_match, parameter[]]
if compare[constant["] in name[predicate_string]] begin[:]
<ast.Raise object at 0x7da1b1726230>
return[binary_operation[name[format_spec] <ast.Mod object at 0x7da2590d6920> call[name[dict], parameter[]]]] | keyword[def] identifier[to_match] ( identifier[self] ):
literal[string]
identifier[self] . identifier[validate] ()
keyword[def] identifier[visitor_fn] ( identifier[expression] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[expression] , identifier[TernaryConditional] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[expression] , identifier[self] ))
keyword[return] identifier[expression]
identifier[self] . identifier[predicate] . identifier[visit_and_update] ( identifier[visitor_fn] )
identifier[format_spec] = literal[string]
identifier[predicate_string] = identifier[self] . identifier[predicate] . identifier[to_match] ()
keyword[if] literal[string] keyword[in] identifier[predicate_string] :
keyword[raise] identifier[AssertionError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[predicate_string] , identifier[self] ))
keyword[return] identifier[format_spec] % identifier[dict] ( identifier[predicate] = identifier[predicate_string] ,
identifier[if_true] = identifier[self] . identifier[if_true] . identifier[to_match] (),
identifier[if_false] = identifier[self] . identifier[if_false] . identifier[to_match] ()) | def to_match(self):
"""Return a unicode object with the MATCH representation of this TernaryConditional."""
self.validate()
# For MATCH, an additional validation step is needed -- we currently do not support
# emitting MATCH code for TernaryConditional that contains another TernaryConditional
# anywhere within the predicate expression. This is because the predicate expression
# must be surrounded in quotes, and it is unclear whether nested/escaped quotes would work.
def visitor_fn(expression):
"""Visitor function that ensures the predicate does not contain TernaryConditionals."""
if isinstance(expression, TernaryConditional):
raise ValueError(u'Cannot emit MATCH code for TernaryConditional that contains in its predicate another TernaryConditional: {} {}'.format(expression, self)) # depends on [control=['if'], data=[]]
return expression
self.predicate.visit_and_update(visitor_fn)
format_spec = u'if(eval("%(predicate)s"), %(if_true)s, %(if_false)s)'
predicate_string = self.predicate.to_match()
if u'"' in predicate_string:
raise AssertionError(u'Found a double-quote within the predicate string, this would have terminated the if(eval()) early and should be fixed: {} {}'.format(predicate_string, self)) # depends on [control=['if'], data=['predicate_string']]
return format_spec % dict(predicate=predicate_string, if_true=self.if_true.to_match(), if_false=self.if_false.to_match()) |
def get_obsolete_acc_to_uniparc(acc):
''' Tries to determine the UniParc ID for obsolete ACCs which are not returned using uniprot_map.
:param acc: The UniProt accession number.
:return: The corresponding UniParc ID.
Warning: This is a fragile function as the underlying website generation or URL could change.
'''
contents = http_get('www.uniprot.org/uniparc/?query={0}'.format(acc))
mtchs = re.findall(r'"UPI[A-Z0-9]+?"', contents, re.DOTALL)
uniparc_id = set([m[1:-1] for m in mtchs])
if len(uniparc_id) == 1:
return uniparc_id.pop()
elif len(uniparc_id) > 1:
raise Exception('Multiple UPI identifiers found.')
return None | def function[get_obsolete_acc_to_uniparc, parameter[acc]]:
constant[ Tries to determine the UniParc ID for obsolete ACCs which are not returned using uniprot_map.
:param acc: The UniProt accession number.
:return: The corresponding UniParc ID.
Warning: This is a fragile function as the underlying website generation or URL could change.
]
variable[contents] assign[=] call[name[http_get], parameter[call[constant[www.uniprot.org/uniparc/?query={0}].format, parameter[name[acc]]]]]
variable[mtchs] assign[=] call[name[re].findall, parameter[constant["UPI[A-Z0-9]+?"], name[contents], name[re].DOTALL]]
variable[uniparc_id] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da20c795030>]]
if compare[call[name[len], parameter[name[uniparc_id]]] equal[==] constant[1]] begin[:]
return[call[name[uniparc_id].pop, parameter[]]]
return[constant[None]] | keyword[def] identifier[get_obsolete_acc_to_uniparc] ( identifier[acc] ):
literal[string]
identifier[contents] = identifier[http_get] ( literal[string] . identifier[format] ( identifier[acc] ))
identifier[mtchs] = identifier[re] . identifier[findall] ( literal[string] , identifier[contents] , identifier[re] . identifier[DOTALL] )
identifier[uniparc_id] = identifier[set] ([ identifier[m] [ literal[int] :- literal[int] ] keyword[for] identifier[m] keyword[in] identifier[mtchs] ])
keyword[if] identifier[len] ( identifier[uniparc_id] )== literal[int] :
keyword[return] identifier[uniparc_id] . identifier[pop] ()
keyword[elif] identifier[len] ( identifier[uniparc_id] )> literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] keyword[None] | def get_obsolete_acc_to_uniparc(acc):
""" Tries to determine the UniParc ID for obsolete ACCs which are not returned using uniprot_map.
:param acc: The UniProt accession number.
:return: The corresponding UniParc ID.
Warning: This is a fragile function as the underlying website generation or URL could change.
"""
contents = http_get('www.uniprot.org/uniparc/?query={0}'.format(acc))
mtchs = re.findall('"UPI[A-Z0-9]+?"', contents, re.DOTALL)
uniparc_id = set([m[1:-1] for m in mtchs])
if len(uniparc_id) == 1:
return uniparc_id.pop() # depends on [control=['if'], data=[]]
elif len(uniparc_id) > 1:
raise Exception('Multiple UPI identifiers found.') # depends on [control=['if'], data=[]]
return None |
def variance(self, param):
"""
Return the variance in a given parameter as found by the fit.
:param param: ``Parameter`` Instance.
:return: Variance of ``param``.
"""
param_number = self.model.params.index(param)
try:
return self.covariance_matrix[param_number, param_number]
except TypeError:
# covariance_matrix can be None
return None | def function[variance, parameter[self, param]]:
constant[
Return the variance in a given parameter as found by the fit.
:param param: ``Parameter`` Instance.
:return: Variance of ``param``.
]
variable[param_number] assign[=] call[name[self].model.params.index, parameter[name[param]]]
<ast.Try object at 0x7da207f03730> | keyword[def] identifier[variance] ( identifier[self] , identifier[param] ):
literal[string]
identifier[param_number] = identifier[self] . identifier[model] . identifier[params] . identifier[index] ( identifier[param] )
keyword[try] :
keyword[return] identifier[self] . identifier[covariance_matrix] [ identifier[param_number] , identifier[param_number] ]
keyword[except] identifier[TypeError] :
keyword[return] keyword[None] | def variance(self, param):
"""
Return the variance in a given parameter as found by the fit.
:param param: ``Parameter`` Instance.
:return: Variance of ``param``.
"""
param_number = self.model.params.index(param)
try:
return self.covariance_matrix[param_number, param_number] # depends on [control=['try'], data=[]]
except TypeError:
# covariance_matrix can be None
return None # depends on [control=['except'], data=[]] |
def _index_unknown_and_reserved_tokens(self, unknown_token, reserved_tokens):
"""Indexes unknown and reserved tokens."""
self._unknown_token = unknown_token
# Thus, constants.UNKNOWN_IDX must be 0.
self._idx_to_token = [unknown_token]
if reserved_tokens is None:
self._reserved_tokens = None
else:
self._reserved_tokens = reserved_tokens[:]
self._idx_to_token.extend(reserved_tokens)
self._token_to_idx = {token: idx for idx, token in enumerate(self._idx_to_token)} | def function[_index_unknown_and_reserved_tokens, parameter[self, unknown_token, reserved_tokens]]:
constant[Indexes unknown and reserved tokens.]
name[self]._unknown_token assign[=] name[unknown_token]
name[self]._idx_to_token assign[=] list[[<ast.Name object at 0x7da1b20657b0>]]
if compare[name[reserved_tokens] is constant[None]] begin[:]
name[self]._reserved_tokens assign[=] constant[None]
name[self]._token_to_idx assign[=] <ast.DictComp object at 0x7da1b1f62bc0> | keyword[def] identifier[_index_unknown_and_reserved_tokens] ( identifier[self] , identifier[unknown_token] , identifier[reserved_tokens] ):
literal[string]
identifier[self] . identifier[_unknown_token] = identifier[unknown_token]
identifier[self] . identifier[_idx_to_token] =[ identifier[unknown_token] ]
keyword[if] identifier[reserved_tokens] keyword[is] keyword[None] :
identifier[self] . identifier[_reserved_tokens] = keyword[None]
keyword[else] :
identifier[self] . identifier[_reserved_tokens] = identifier[reserved_tokens] [:]
identifier[self] . identifier[_idx_to_token] . identifier[extend] ( identifier[reserved_tokens] )
identifier[self] . identifier[_token_to_idx] ={ identifier[token] : identifier[idx] keyword[for] identifier[idx] , identifier[token] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_idx_to_token] )} | def _index_unknown_and_reserved_tokens(self, unknown_token, reserved_tokens):
"""Indexes unknown and reserved tokens."""
self._unknown_token = unknown_token
# Thus, constants.UNKNOWN_IDX must be 0.
self._idx_to_token = [unknown_token]
if reserved_tokens is None:
self._reserved_tokens = None # depends on [control=['if'], data=[]]
else:
self._reserved_tokens = reserved_tokens[:]
self._idx_to_token.extend(reserved_tokens)
self._token_to_idx = {token: idx for (idx, token) in enumerate(self._idx_to_token)} |
def ConsultarTiposOperacion(self, sep="||"):
"Consulta tipo de Operación por Actividad."
ops = []
ret = self.client.tipoActividadConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoActividadReturn']
self.__analizar_errores(ret)
for it_act in ret.get('tiposActividad', []):
ret = self.client.tipoOperacionXActividadConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
nroActLiquida=it_act['codigoDescripcion']['codigo'],
)['tipoOperacionReturn']
self.__analizar_errores(ret)
array = ret.get('tiposOperacion', [])
if sep:
ops.extend([("%s %%s %s %%s %s %%s %s" % (sep, sep, sep, sep)) %
(it_act['codigoDescripcion']['codigo'],
it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array])
else:
ops.extend([(it_act['codigoDescripcion']['codigo'],
it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array])
return ops | def function[ConsultarTiposOperacion, parameter[self, sep]]:
constant[Consulta tipo de Operación por Actividad.]
variable[ops] assign[=] list[[]]
variable[ret] assign[=] call[call[name[self].client.tipoActividadConsultar, parameter[]]][constant[tipoActividadReturn]]
call[name[self].__analizar_errores, parameter[name[ret]]]
for taget[name[it_act]] in starred[call[name[ret].get, parameter[constant[tiposActividad], list[[]]]]] begin[:]
variable[ret] assign[=] call[call[name[self].client.tipoOperacionXActividadConsultar, parameter[]]][constant[tipoOperacionReturn]]
call[name[self].__analizar_errores, parameter[name[ret]]]
variable[array] assign[=] call[name[ret].get, parameter[constant[tiposOperacion], list[[]]]]
if name[sep] begin[:]
call[name[ops].extend, parameter[<ast.ListComp object at 0x7da1b1d0ca60>]]
return[name[ops]] | keyword[def] identifier[ConsultarTiposOperacion] ( identifier[self] , identifier[sep] = literal[string] ):
literal[string]
identifier[ops] =[]
identifier[ret] = identifier[self] . identifier[client] . identifier[tipoActividadConsultar] (
identifier[auth] ={
literal[string] : identifier[self] . identifier[Token] , literal[string] : identifier[self] . identifier[Sign] ,
literal[string] : identifier[self] . identifier[Cuit] ,},
)[ literal[string] ]
identifier[self] . identifier[__analizar_errores] ( identifier[ret] )
keyword[for] identifier[it_act] keyword[in] identifier[ret] . identifier[get] ( literal[string] ,[]):
identifier[ret] = identifier[self] . identifier[client] . identifier[tipoOperacionXActividadConsultar] (
identifier[auth] ={
literal[string] : identifier[self] . identifier[Token] , literal[string] : identifier[self] . identifier[Sign] ,
literal[string] : identifier[self] . identifier[Cuit] ,},
identifier[nroActLiquida] = identifier[it_act] [ literal[string] ][ literal[string] ],
)[ literal[string] ]
identifier[self] . identifier[__analizar_errores] ( identifier[ret] )
identifier[array] = identifier[ret] . identifier[get] ( literal[string] ,[])
keyword[if] identifier[sep] :
identifier[ops] . identifier[extend] ([( literal[string] %( identifier[sep] , identifier[sep] , identifier[sep] , identifier[sep] ))%
( identifier[it_act] [ literal[string] ][ literal[string] ],
identifier[it] [ literal[string] ][ literal[string] ],
identifier[it] [ literal[string] ][ literal[string] ])
keyword[for] identifier[it] keyword[in] identifier[array] ])
keyword[else] :
identifier[ops] . identifier[extend] ([( identifier[it_act] [ literal[string] ][ literal[string] ],
identifier[it] [ literal[string] ][ literal[string] ],
identifier[it] [ literal[string] ][ literal[string] ])
keyword[for] identifier[it] keyword[in] identifier[array] ])
keyword[return] identifier[ops] | def ConsultarTiposOperacion(self, sep='||'):
"""Consulta tipo de Operación por Actividad."""
ops = []
ret = self.client.tipoActividadConsultar(auth={'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit})['tipoActividadReturn']
self.__analizar_errores(ret)
for it_act in ret.get('tiposActividad', []):
ret = self.client.tipoOperacionXActividadConsultar(auth={'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit}, nroActLiquida=it_act['codigoDescripcion']['codigo'])['tipoOperacionReturn']
self.__analizar_errores(ret)
array = ret.get('tiposOperacion', [])
if sep:
ops.extend(['%s %%s %s %%s %s %%s %s' % (sep, sep, sep, sep) % (it_act['codigoDescripcion']['codigo'], it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array]) # depends on [control=['if'], data=[]]
else:
ops.extend([(it_act['codigoDescripcion']['codigo'], it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array]) # depends on [control=['for'], data=['it_act']]
return ops |
def get_wifi_packet(frame, no_rtap=False):
"""Discriminates Wi-Fi packet and creates
packet object.
:frame: ctypes.Structure
:no_rtap: Bool
:return: obj
Wi-Fi packet
"""
_, packet = WiHelper._strip_rtap(frame)
frame_control = struct.unpack('BB', packet[:2])
cat = (frame_control[0] >> 2) & 0b0011
s_type = frame_control[0] >> 4
if cat not in _CATEGORIES_.keys():
logging.warning("unknown category: %d" % (cat))
return Unknown(frame, no_rtap)
if s_type not in _SUBTYPES_[cat].keys():
logging.warning("unknown subtype %d in %s category" % (s_type, _CATEGORIES_[cat]))
return Unknown(frame, no_rtap)
if cat == 0:
if s_type == 4:
return ProbeReq(frame, no_rtap)
elif s_type == 5:
return ProbeResp(frame, no_rtap)
elif s_type == 8:
return Beacon(frame, no_rtap)
else:
return Management(frame, no_rtap)
elif cat == 1:
if s_type == 11:
return RTS(frame, no_rtap)
elif s_type == 12:
return CTS(frame, no_rtap)
elif s_type == 9:
return BACK(frame, no_rtap)
else:
return Control(frame, no_rtap)
elif cat == 2:
if s_type == 8:
return QosData(frame, no_rtap, parse_amsdu=True)
else:
return Data(frame, no_rtap) | def function[get_wifi_packet, parameter[frame, no_rtap]]:
constant[Discriminates Wi-Fi packet and creates
packet object.
:frame: ctypes.Structure
:no_rtap: Bool
:return: obj
Wi-Fi packet
]
<ast.Tuple object at 0x7da1aff6e170> assign[=] call[name[WiHelper]._strip_rtap, parameter[name[frame]]]
variable[frame_control] assign[=] call[name[struct].unpack, parameter[constant[BB], call[name[packet]][<ast.Slice object at 0x7da1aff6e260>]]]
variable[cat] assign[=] binary_operation[binary_operation[call[name[frame_control]][constant[0]] <ast.RShift object at 0x7da2590d6a40> constant[2]] <ast.BitAnd object at 0x7da2590d6b60> constant[3]]
variable[s_type] assign[=] binary_operation[call[name[frame_control]][constant[0]] <ast.RShift object at 0x7da2590d6a40> constant[4]]
if compare[name[cat] <ast.NotIn object at 0x7da2590d7190> call[name[_CATEGORIES_].keys, parameter[]]] begin[:]
call[name[logging].warning, parameter[binary_operation[constant[unknown category: %d] <ast.Mod object at 0x7da2590d6920> name[cat]]]]
return[call[name[Unknown], parameter[name[frame], name[no_rtap]]]]
if compare[name[s_type] <ast.NotIn object at 0x7da2590d7190> call[call[name[_SUBTYPES_]][name[cat]].keys, parameter[]]] begin[:]
call[name[logging].warning, parameter[binary_operation[constant[unknown subtype %d in %s category] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1affe5d20>, <ast.Subscript object at 0x7da1affe5db0>]]]]]
return[call[name[Unknown], parameter[name[frame], name[no_rtap]]]]
if compare[name[cat] equal[==] constant[0]] begin[:]
if compare[name[s_type] equal[==] constant[4]] begin[:]
return[call[name[ProbeReq], parameter[name[frame], name[no_rtap]]]] | keyword[def] identifier[get_wifi_packet] ( identifier[frame] , identifier[no_rtap] = keyword[False] ):
literal[string]
identifier[_] , identifier[packet] = identifier[WiHelper] . identifier[_strip_rtap] ( identifier[frame] )
identifier[frame_control] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[packet] [: literal[int] ])
identifier[cat] =( identifier[frame_control] [ literal[int] ]>> literal[int] )& literal[int]
identifier[s_type] = identifier[frame_control] [ literal[int] ]>> literal[int]
keyword[if] identifier[cat] keyword[not] keyword[in] identifier[_CATEGORIES_] . identifier[keys] ():
identifier[logging] . identifier[warning] ( literal[string] %( identifier[cat] ))
keyword[return] identifier[Unknown] ( identifier[frame] , identifier[no_rtap] )
keyword[if] identifier[s_type] keyword[not] keyword[in] identifier[_SUBTYPES_] [ identifier[cat] ]. identifier[keys] ():
identifier[logging] . identifier[warning] ( literal[string] %( identifier[s_type] , identifier[_CATEGORIES_] [ identifier[cat] ]))
keyword[return] identifier[Unknown] ( identifier[frame] , identifier[no_rtap] )
keyword[if] identifier[cat] == literal[int] :
keyword[if] identifier[s_type] == literal[int] :
keyword[return] identifier[ProbeReq] ( identifier[frame] , identifier[no_rtap] )
keyword[elif] identifier[s_type] == literal[int] :
keyword[return] identifier[ProbeResp] ( identifier[frame] , identifier[no_rtap] )
keyword[elif] identifier[s_type] == literal[int] :
keyword[return] identifier[Beacon] ( identifier[frame] , identifier[no_rtap] )
keyword[else] :
keyword[return] identifier[Management] ( identifier[frame] , identifier[no_rtap] )
keyword[elif] identifier[cat] == literal[int] :
keyword[if] identifier[s_type] == literal[int] :
keyword[return] identifier[RTS] ( identifier[frame] , identifier[no_rtap] )
keyword[elif] identifier[s_type] == literal[int] :
keyword[return] identifier[CTS] ( identifier[frame] , identifier[no_rtap] )
keyword[elif] identifier[s_type] == literal[int] :
keyword[return] identifier[BACK] ( identifier[frame] , identifier[no_rtap] )
keyword[else] :
keyword[return] identifier[Control] ( identifier[frame] , identifier[no_rtap] )
keyword[elif] identifier[cat] == literal[int] :
keyword[if] identifier[s_type] == literal[int] :
keyword[return] identifier[QosData] ( identifier[frame] , identifier[no_rtap] , identifier[parse_amsdu] = keyword[True] )
keyword[else] :
keyword[return] identifier[Data] ( identifier[frame] , identifier[no_rtap] ) | def get_wifi_packet(frame, no_rtap=False):
"""Discriminates Wi-Fi packet and creates
packet object.
:frame: ctypes.Structure
:no_rtap: Bool
:return: obj
Wi-Fi packet
"""
(_, packet) = WiHelper._strip_rtap(frame)
frame_control = struct.unpack('BB', packet[:2])
cat = frame_control[0] >> 2 & 3
s_type = frame_control[0] >> 4
if cat not in _CATEGORIES_.keys():
logging.warning('unknown category: %d' % cat)
return Unknown(frame, no_rtap) # depends on [control=['if'], data=['cat']]
if s_type not in _SUBTYPES_[cat].keys():
logging.warning('unknown subtype %d in %s category' % (s_type, _CATEGORIES_[cat]))
return Unknown(frame, no_rtap) # depends on [control=['if'], data=['s_type']]
if cat == 0:
if s_type == 4:
return ProbeReq(frame, no_rtap) # depends on [control=['if'], data=[]]
elif s_type == 5:
return ProbeResp(frame, no_rtap) # depends on [control=['if'], data=[]]
elif s_type == 8:
return Beacon(frame, no_rtap) # depends on [control=['if'], data=[]]
else:
return Management(frame, no_rtap) # depends on [control=['if'], data=[]]
elif cat == 1:
if s_type == 11:
return RTS(frame, no_rtap) # depends on [control=['if'], data=[]]
elif s_type == 12:
return CTS(frame, no_rtap) # depends on [control=['if'], data=[]]
elif s_type == 9:
return BACK(frame, no_rtap) # depends on [control=['if'], data=[]]
else:
return Control(frame, no_rtap) # depends on [control=['if'], data=[]]
elif cat == 2:
if s_type == 8:
return QosData(frame, no_rtap, parse_amsdu=True) # depends on [control=['if'], data=[]]
else:
return Data(frame, no_rtap) # depends on [control=['if'], data=[]] |
def choose(is_accepted, accepted, rejected, name=None):
"""Helper which expand_dims `is_accepted` then applies tf.where."""
if not is_namedtuple_like(accepted):
return _choose_base_case(is_accepted, accepted, rejected, name=name)
if not isinstance(accepted, type(rejected)):
raise TypeError('Type of `accepted` ({}) must be identical to '
'type of `rejected` ({})'.format(
type(accepted).__name__,
type(rejected).__name__))
return type(accepted)(**dict(
[(fn,
choose(is_accepted,
getattr(accepted, fn),
getattr(rejected, fn),
name=name))
for fn in accepted._fields])) | def function[choose, parameter[is_accepted, accepted, rejected, name]]:
constant[Helper which expand_dims `is_accepted` then applies tf.where.]
if <ast.UnaryOp object at 0x7da1b0235fc0> begin[:]
return[call[name[_choose_base_case], parameter[name[is_accepted], name[accepted], name[rejected]]]]
if <ast.UnaryOp object at 0x7da1b0237640> begin[:]
<ast.Raise object at 0x7da1b02374c0>
return[call[call[name[type], parameter[name[accepted]]], parameter[]]] | keyword[def] identifier[choose] ( identifier[is_accepted] , identifier[accepted] , identifier[rejected] , identifier[name] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[is_namedtuple_like] ( identifier[accepted] ):
keyword[return] identifier[_choose_base_case] ( identifier[is_accepted] , identifier[accepted] , identifier[rejected] , identifier[name] = identifier[name] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[accepted] , identifier[type] ( identifier[rejected] )):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] . identifier[format] (
identifier[type] ( identifier[accepted] ). identifier[__name__] ,
identifier[type] ( identifier[rejected] ). identifier[__name__] ))
keyword[return] identifier[type] ( identifier[accepted] )(** identifier[dict] (
[( identifier[fn] ,
identifier[choose] ( identifier[is_accepted] ,
identifier[getattr] ( identifier[accepted] , identifier[fn] ),
identifier[getattr] ( identifier[rejected] , identifier[fn] ),
identifier[name] = identifier[name] ))
keyword[for] identifier[fn] keyword[in] identifier[accepted] . identifier[_fields] ])) | def choose(is_accepted, accepted, rejected, name=None):
"""Helper which expand_dims `is_accepted` then applies tf.where."""
if not is_namedtuple_like(accepted):
return _choose_base_case(is_accepted, accepted, rejected, name=name) # depends on [control=['if'], data=[]]
if not isinstance(accepted, type(rejected)):
raise TypeError('Type of `accepted` ({}) must be identical to type of `rejected` ({})'.format(type(accepted).__name__, type(rejected).__name__)) # depends on [control=['if'], data=[]]
return type(accepted)(**dict([(fn, choose(is_accepted, getattr(accepted, fn), getattr(rejected, fn), name=name)) for fn in accepted._fields])) |
def resolve_remote(self, uri):
"""Resolve a uri or relative path to a schema."""
try:
return super(LocalRefResolver, self).resolve_remote(uri)
except ValueError:
return super(LocalRefResolver, self).resolve_remote(
'file://' + get_schema_path(uri.rsplit('.json', 1)[0])
) | def function[resolve_remote, parameter[self, uri]]:
constant[Resolve a uri or relative path to a schema.]
<ast.Try object at 0x7da1b242dc60> | keyword[def] identifier[resolve_remote] ( identifier[self] , identifier[uri] ):
literal[string]
keyword[try] :
keyword[return] identifier[super] ( identifier[LocalRefResolver] , identifier[self] ). identifier[resolve_remote] ( identifier[uri] )
keyword[except] identifier[ValueError] :
keyword[return] identifier[super] ( identifier[LocalRefResolver] , identifier[self] ). identifier[resolve_remote] (
literal[string] + identifier[get_schema_path] ( identifier[uri] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ])
) | def resolve_remote(self, uri):
"""Resolve a uri or relative path to a schema."""
try:
return super(LocalRefResolver, self).resolve_remote(uri) # depends on [control=['try'], data=[]]
except ValueError:
return super(LocalRefResolver, self).resolve_remote('file://' + get_schema_path(uri.rsplit('.json', 1)[0])) # depends on [control=['except'], data=[]] |
def filter_record(self, record):
"""
Filter a record, truncating or dropping at an 'N'
"""
nloc = record.seq.find('N')
if nloc == -1:
return record
elif self.action == 'truncate':
return record[:nloc]
elif self.action == 'drop':
raise FailedFilter()
else:
assert False | def function[filter_record, parameter[self, record]]:
constant[
Filter a record, truncating or dropping at an 'N'
]
variable[nloc] assign[=] call[name[record].seq.find, parameter[constant[N]]]
if compare[name[nloc] equal[==] <ast.UnaryOp object at 0x7da1b1b60ca0>] begin[:]
return[name[record]] | keyword[def] identifier[filter_record] ( identifier[self] , identifier[record] ):
literal[string]
identifier[nloc] = identifier[record] . identifier[seq] . identifier[find] ( literal[string] )
keyword[if] identifier[nloc] ==- literal[int] :
keyword[return] identifier[record]
keyword[elif] identifier[self] . identifier[action] == literal[string] :
keyword[return] identifier[record] [: identifier[nloc] ]
keyword[elif] identifier[self] . identifier[action] == literal[string] :
keyword[raise] identifier[FailedFilter] ()
keyword[else] :
keyword[assert] keyword[False] | def filter_record(self, record):
"""
Filter a record, truncating or dropping at an 'N'
"""
nloc = record.seq.find('N')
if nloc == -1:
return record # depends on [control=['if'], data=[]]
elif self.action == 'truncate':
return record[:nloc] # depends on [control=['if'], data=[]]
elif self.action == 'drop':
raise FailedFilter() # depends on [control=['if'], data=[]]
else:
assert False |
def __create_file_name(self, message_no):
""" Create the filename to save to """
cwd = os.getcwd()
filename = '{0}_{1}.xml'.format(self.output_prefix, message_no)
return os.path.join(cwd, filename) | def function[__create_file_name, parameter[self, message_no]]:
constant[ Create the filename to save to ]
variable[cwd] assign[=] call[name[os].getcwd, parameter[]]
variable[filename] assign[=] call[constant[{0}_{1}.xml].format, parameter[name[self].output_prefix, name[message_no]]]
return[call[name[os].path.join, parameter[name[cwd], name[filename]]]] | keyword[def] identifier[__create_file_name] ( identifier[self] , identifier[message_no] ):
literal[string]
identifier[cwd] = identifier[os] . identifier[getcwd] ()
identifier[filename] = literal[string] . identifier[format] ( identifier[self] . identifier[output_prefix] , identifier[message_no] )
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[cwd] , identifier[filename] ) | def __create_file_name(self, message_no):
""" Create the filename to save to """
cwd = os.getcwd()
filename = '{0}_{1}.xml'.format(self.output_prefix, message_no)
return os.path.join(cwd, filename) |
def make_context(self, **kwargs):
"""Create a new context for reading data"""
self.check_schema()
return Context(self.driver, self.config, **kwargs) | def function[make_context, parameter[self]]:
constant[Create a new context for reading data]
call[name[self].check_schema, parameter[]]
return[call[name[Context], parameter[name[self].driver, name[self].config]]] | keyword[def] identifier[make_context] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[check_schema] ()
keyword[return] identifier[Context] ( identifier[self] . identifier[driver] , identifier[self] . identifier[config] ,** identifier[kwargs] ) | def make_context(self, **kwargs):
"""Create a new context for reading data"""
self.check_schema()
return Context(self.driver, self.config, **kwargs) |
async def resume(self):
"""Resumes playback if paused"""
self.logger.debug("resume command")
if not self.state == 'ready':
return
if self.streamer is None:
return
try:
if not self.streamer.is_playing():
play_state = "Streaming" if self.is_live else "Playing"
self.statuslog.info(play_state)
self.streamer.resume()
if self.pause_time is not None:
self.vclient_starttime += (self.vclient.loop.time() - self.pause_time)
self.pause_time = None
except Exception as e:
logger.error(e)
pass | <ast.AsyncFunctionDef object at 0x7da1b196b9a0> | keyword[async] keyword[def] identifier[resume] ( identifier[self] ):
literal[string]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[state] == literal[string] :
keyword[return]
keyword[if] identifier[self] . identifier[streamer] keyword[is] keyword[None] :
keyword[return]
keyword[try] :
keyword[if] keyword[not] identifier[self] . identifier[streamer] . identifier[is_playing] ():
identifier[play_state] = literal[string] keyword[if] identifier[self] . identifier[is_live] keyword[else] literal[string]
identifier[self] . identifier[statuslog] . identifier[info] ( identifier[play_state] )
identifier[self] . identifier[streamer] . identifier[resume] ()
keyword[if] identifier[self] . identifier[pause_time] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[vclient_starttime] +=( identifier[self] . identifier[vclient] . identifier[loop] . identifier[time] ()- identifier[self] . identifier[pause_time] )
identifier[self] . identifier[pause_time] = keyword[None]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( identifier[e] )
keyword[pass] | async def resume(self):
"""Resumes playback if paused"""
self.logger.debug('resume command')
if not self.state == 'ready':
return # depends on [control=['if'], data=[]]
if self.streamer is None:
return # depends on [control=['if'], data=[]]
try:
if not self.streamer.is_playing():
play_state = 'Streaming' if self.is_live else 'Playing'
self.statuslog.info(play_state)
self.streamer.resume()
if self.pause_time is not None:
self.vclient_starttime += self.vclient.loop.time() - self.pause_time # depends on [control=['if'], data=[]]
self.pause_time = None # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
logger.error(e)
pass # depends on [control=['except'], data=['e']] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.