code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def request(self, target):
"""Delete a configuration datastore.
*target* specifies the name or URL of configuration datastore to delete
:seealso: :ref:`srctarget_params`"""
node = new_ele("delete-config")
node.append(util.datastore_or_url("target", target, self._assert))
return self._request(node) | def function[request, parameter[self, target]]:
constant[Delete a configuration datastore.
*target* specifies the name or URL of configuration datastore to delete
:seealso: :ref:`srctarget_params`]
variable[node] assign[=] call[name[new_ele], parameter[constant[delete-config]]]
call[name[node].append, parameter[call[name[util].datastore_or_url, parameter[constant[target], name[target], name[self]._assert]]]]
return[call[name[self]._request, parameter[name[node]]]] | keyword[def] identifier[request] ( identifier[self] , identifier[target] ):
literal[string]
identifier[node] = identifier[new_ele] ( literal[string] )
identifier[node] . identifier[append] ( identifier[util] . identifier[datastore_or_url] ( literal[string] , identifier[target] , identifier[self] . identifier[_assert] ))
keyword[return] identifier[self] . identifier[_request] ( identifier[node] ) | def request(self, target):
"""Delete a configuration datastore.
*target* specifies the name or URL of configuration datastore to delete
:seealso: :ref:`srctarget_params`"""
node = new_ele('delete-config')
node.append(util.datastore_or_url('target', target, self._assert))
return self._request(node) |
def sort_data(data, cols):
"""Sort `data` rows and order columns"""
return data.sort_values(cols)[cols + ['value']].reset_index(drop=True) | def function[sort_data, parameter[data, cols]]:
constant[Sort `data` rows and order columns]
return[call[call[call[name[data].sort_values, parameter[name[cols]]]][binary_operation[name[cols] + list[[<ast.Constant object at 0x7da18dc9a1d0>]]]].reset_index, parameter[]]] | keyword[def] identifier[sort_data] ( identifier[data] , identifier[cols] ):
literal[string]
keyword[return] identifier[data] . identifier[sort_values] ( identifier[cols] )[ identifier[cols] +[ literal[string] ]]. identifier[reset_index] ( identifier[drop] = keyword[True] ) | def sort_data(data, cols):
"""Sort `data` rows and order columns"""
return data.sort_values(cols)[cols + ['value']].reset_index(drop=True) |
def wrap(text, indent=' '):
"""Wrap text to terminal width with default indentation"""
wrapper = textwrap.TextWrapper(
width=int(os.environ.get('COLUMNS', 80)),
initial_indent=indent,
subsequent_indent=indent
)
return '\n'.join(wrapper.wrap(text)) | def function[wrap, parameter[text, indent]]:
constant[Wrap text to terminal width with default indentation]
variable[wrapper] assign[=] call[name[textwrap].TextWrapper, parameter[]]
return[call[constant[
].join, parameter[call[name[wrapper].wrap, parameter[name[text]]]]]] | keyword[def] identifier[wrap] ( identifier[text] , identifier[indent] = literal[string] ):
literal[string]
identifier[wrapper] = identifier[textwrap] . identifier[TextWrapper] (
identifier[width] = identifier[int] ( identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[int] )),
identifier[initial_indent] = identifier[indent] ,
identifier[subsequent_indent] = identifier[indent]
)
keyword[return] literal[string] . identifier[join] ( identifier[wrapper] . identifier[wrap] ( identifier[text] )) | def wrap(text, indent=' '):
"""Wrap text to terminal width with default indentation"""
wrapper = textwrap.TextWrapper(width=int(os.environ.get('COLUMNS', 80)), initial_indent=indent, subsequent_indent=indent)
return '\n'.join(wrapper.wrap(text)) |
def _convert_listlike_datetimes(arg, box, format, name=None, tz=None,
unit=None, errors=None,
infer_datetime_format=None, dayfirst=None,
yearfirst=None, exact=None):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parced
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
ndarray of parsed dates
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
from pandas import DatetimeIndex
from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import (
maybe_convert_dtype, objects_to_datetime64ns)
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
# these are shortcutable
if is_datetime64tz_dtype(arg):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == 'utc':
arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, (DatetimeArray, DatetimeIndex)):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit,
errors=errors)
if box:
if errors == 'ignore':
from pandas import Index
result = Index(result, name=name)
# GH 23758: We may still need to localize the result with tz
try:
return result.tz_localize(tz)
except AttributeError:
return result
return DatetimeIndex(result, tz=tz, name=name)
return result
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
# warn if passing timedelta64, raise for PeriodDtype
# NB: this must come after unit transformation
orig_arg = arg
arg, _ = maybe_convert_dtype(arg, copy=False)
arg = ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
tz_parsed = None
result = None
if format is not None:
try:
# shortcut formatting here
if format == '%Y%m%d':
try:
# pass orig_arg as float-dtype may have been converted to
# datetime64[ns]
orig_arg = ensure_object(orig_arg)
result = _attempt_YYYYMMDD(orig_arg, errors=errors)
except (ValueError, TypeError, tslibs.OutOfBoundsDatetime):
raise ValueError("cannot convert the input to "
"'%Y%m%d' date format")
# fallback
if result is None:
try:
result, timezones = array_strptime(
arg, format, exact=exact, errors=errors)
if '%Z' in format or '%z' in format:
return _return_parsed_timezone_results(
result, timezones, box, tz, name)
except tslibs.OutOfBoundsDatetime:
if errors == 'raise':
raise
elif errors == 'coerce':
result = np.empty(arg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == 'raise':
raise
elif errors == 'coerce':
result = np.empty(arg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError as e:
# Fallback to try to convert datetime objects if timezone-aware
# datetime objects are found without passing `utc=True`
try:
values, tz = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if result is None:
assert format is None or infer_datetime_format
utc = tz == 'utc'
result, tz_parsed = objects_to_datetime64ns(
arg, dayfirst=dayfirst, yearfirst=yearfirst,
utc=utc, errors=errors, require_iso8601=require_iso8601,
allow_object=True)
if tz_parsed is not None:
if box:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
return DatetimeIndex._simple_new(result, name=name,
tz=tz_parsed)
else:
# Convert the datetime64 numpy array to an numpy array
# of datetime objects
result = [Timestamp(ts, tz=tz_parsed).to_pydatetime()
for ts in result]
return np.array(result, dtype=object)
if box:
# Ensure we return an Index in all cases where box=True
if is_datetime64_dtype(result):
return DatetimeIndex(result, tz=tz, name=name)
elif is_object_dtype(result):
# e.g. an Index of datetime objects
from pandas import Index
return Index(result, name=name)
return result | def function[_convert_listlike_datetimes, parameter[arg, box, format, name, tz, unit, errors, infer_datetime_format, dayfirst, yearfirst, exact]]:
constant[
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parced
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
ndarray of parsed dates
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
]
from relative_module[pandas] import module[DatetimeIndex]
from relative_module[pandas.core.arrays] import module[DatetimeArray]
from relative_module[pandas.core.arrays.datetimes] import module[maybe_convert_dtype], module[objects_to_datetime64ns]
if call[name[isinstance], parameter[name[arg], tuple[[<ast.Name object at 0x7da1b26ac070>, <ast.Name object at 0x7da1b26af2b0>]]]] begin[:]
variable[arg] assign[=] call[name[np].array, parameter[name[arg]]]
if call[name[is_datetime64tz_dtype], parameter[name[arg]]] begin[:]
if <ast.UnaryOp object at 0x7da1b26aef50> begin[:]
return[call[name[DatetimeIndex], parameter[name[arg]]]]
if compare[name[tz] equal[==] constant[utc]] begin[:]
variable[arg] assign[=] call[call[name[arg].tz_convert, parameter[constant[None]]].tz_localize, parameter[name[tz]]]
return[name[arg]]
variable[orig_arg] assign[=] name[arg]
<ast.Tuple object at 0x7da1b26aee00> assign[=] call[name[maybe_convert_dtype], parameter[name[arg]]]
variable[arg] assign[=] call[name[ensure_object], parameter[name[arg]]]
variable[require_iso8601] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b26ae950> begin[:]
variable[format] assign[=] call[name[_guess_datetime_format_for_array], parameter[name[arg]]]
if compare[name[format] is_not constant[None]] begin[:]
variable[format_is_iso8601] assign[=] call[name[_format_is_iso], parameter[name[format]]]
if name[format_is_iso8601] begin[:]
variable[require_iso8601] assign[=] <ast.UnaryOp object at 0x7da237eef0d0>
variable[format] assign[=] constant[None]
variable[tz_parsed] assign[=] constant[None]
variable[result] assign[=] constant[None]
if compare[name[format] is_not constant[None]] begin[:]
<ast.Try object at 0x7da20c6c57e0>
if compare[name[result] is constant[None]] begin[:]
assert[<ast.BoolOp object at 0x7da20c6c6e30>]
variable[utc] assign[=] compare[name[tz] equal[==] constant[utc]]
<ast.Tuple object at 0x7da20c6c4430> assign[=] call[name[objects_to_datetime64ns], parameter[name[arg]]]
if compare[name[tz_parsed] is_not constant[None]] begin[:]
if name[box] begin[:]
return[call[name[DatetimeIndex]._simple_new, parameter[name[result]]]]
if name[box] begin[:]
if call[name[is_datetime64_dtype], parameter[name[result]]] begin[:]
return[call[name[DatetimeIndex], parameter[name[result]]]]
return[name[result]] | keyword[def] identifier[_convert_listlike_datetimes] ( identifier[arg] , identifier[box] , identifier[format] , identifier[name] = keyword[None] , identifier[tz] = keyword[None] ,
identifier[unit] = keyword[None] , identifier[errors] = keyword[None] ,
identifier[infer_datetime_format] = keyword[None] , identifier[dayfirst] = keyword[None] ,
identifier[yearfirst] = keyword[None] , identifier[exact] = keyword[None] ):
literal[string]
keyword[from] identifier[pandas] keyword[import] identifier[DatetimeIndex]
keyword[from] identifier[pandas] . identifier[core] . identifier[arrays] keyword[import] identifier[DatetimeArray]
keyword[from] identifier[pandas] . identifier[core] . identifier[arrays] . identifier[datetimes] keyword[import] (
identifier[maybe_convert_dtype] , identifier[objects_to_datetime64ns] )
keyword[if] identifier[isinstance] ( identifier[arg] ,( identifier[list] , identifier[tuple] )):
identifier[arg] = identifier[np] . identifier[array] ( identifier[arg] , identifier[dtype] = literal[string] )
keyword[if] identifier[is_datetime64tz_dtype] ( identifier[arg] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[arg] ,( identifier[DatetimeArray] , identifier[DatetimeIndex] )):
keyword[return] identifier[DatetimeIndex] ( identifier[arg] , identifier[tz] = identifier[tz] , identifier[name] = identifier[name] )
keyword[if] identifier[tz] == literal[string] :
identifier[arg] = identifier[arg] . identifier[tz_convert] ( keyword[None] ). identifier[tz_localize] ( identifier[tz] )
keyword[return] identifier[arg]
keyword[elif] identifier[is_datetime64_ns_dtype] ( identifier[arg] ):
keyword[if] identifier[box] keyword[and] keyword[not] identifier[isinstance] ( identifier[arg] ,( identifier[DatetimeArray] , identifier[DatetimeIndex] )):
keyword[try] :
keyword[return] identifier[DatetimeIndex] ( identifier[arg] , identifier[tz] = identifier[tz] , identifier[name] = identifier[name] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[arg]
keyword[elif] identifier[unit] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[format] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[arg] = identifier[getattr] ( identifier[arg] , literal[string] , identifier[arg] )
identifier[result] = identifier[tslib] . identifier[array_with_unit_to_datetime] ( identifier[arg] , identifier[unit] ,
identifier[errors] = identifier[errors] )
keyword[if] identifier[box] :
keyword[if] identifier[errors] == literal[string] :
keyword[from] identifier[pandas] keyword[import] identifier[Index]
identifier[result] = identifier[Index] ( identifier[result] , identifier[name] = identifier[name] )
keyword[try] :
keyword[return] identifier[result] . identifier[tz_localize] ( identifier[tz] )
keyword[except] identifier[AttributeError] :
keyword[return] identifier[result]
keyword[return] identifier[DatetimeIndex] ( identifier[result] , identifier[tz] = identifier[tz] , identifier[name] = identifier[name] )
keyword[return] identifier[result]
keyword[elif] identifier[getattr] ( identifier[arg] , literal[string] , literal[int] )> literal[int] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
identifier[orig_arg] = identifier[arg]
identifier[arg] , identifier[_] = identifier[maybe_convert_dtype] ( identifier[arg] , identifier[copy] = keyword[False] )
identifier[arg] = identifier[ensure_object] ( identifier[arg] )
identifier[require_iso8601] = keyword[False]
keyword[if] identifier[infer_datetime_format] keyword[and] identifier[format] keyword[is] keyword[None] :
identifier[format] = identifier[_guess_datetime_format_for_array] ( identifier[arg] , identifier[dayfirst] = identifier[dayfirst] )
keyword[if] identifier[format] keyword[is] keyword[not] keyword[None] :
identifier[format_is_iso8601] = identifier[_format_is_iso] ( identifier[format] )
keyword[if] identifier[format_is_iso8601] :
identifier[require_iso8601] = keyword[not] identifier[infer_datetime_format]
identifier[format] = keyword[None]
identifier[tz_parsed] = keyword[None]
identifier[result] = keyword[None]
keyword[if] identifier[format] keyword[is] keyword[not] keyword[None] :
keyword[try] :
keyword[if] identifier[format] == literal[string] :
keyword[try] :
identifier[orig_arg] = identifier[ensure_object] ( identifier[orig_arg] )
identifier[result] = identifier[_attempt_YYYYMMDD] ( identifier[orig_arg] , identifier[errors] = identifier[errors] )
keyword[except] ( identifier[ValueError] , identifier[TypeError] , identifier[tslibs] . identifier[OutOfBoundsDatetime] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] identifier[result] keyword[is] keyword[None] :
keyword[try] :
identifier[result] , identifier[timezones] = identifier[array_strptime] (
identifier[arg] , identifier[format] , identifier[exact] = identifier[exact] , identifier[errors] = identifier[errors] )
keyword[if] literal[string] keyword[in] identifier[format] keyword[or] literal[string] keyword[in] identifier[format] :
keyword[return] identifier[_return_parsed_timezone_results] (
identifier[result] , identifier[timezones] , identifier[box] , identifier[tz] , identifier[name] )
keyword[except] identifier[tslibs] . identifier[OutOfBoundsDatetime] :
keyword[if] identifier[errors] == literal[string] :
keyword[raise]
keyword[elif] identifier[errors] == literal[string] :
identifier[result] = identifier[np] . identifier[empty] ( identifier[arg] . identifier[shape] , identifier[dtype] = literal[string] )
identifier[iresult] = identifier[result] . identifier[view] ( literal[string] )
identifier[iresult] . identifier[fill] ( identifier[tslibs] . identifier[iNaT] )
keyword[else] :
identifier[result] = identifier[arg]
keyword[except] identifier[ValueError] :
keyword[if] keyword[not] identifier[infer_datetime_format] :
keyword[if] identifier[errors] == literal[string] :
keyword[raise]
keyword[elif] identifier[errors] == literal[string] :
identifier[result] = identifier[np] . identifier[empty] ( identifier[arg] . identifier[shape] , identifier[dtype] = literal[string] )
identifier[iresult] = identifier[result] . identifier[view] ( literal[string] )
identifier[iresult] . identifier[fill] ( identifier[tslibs] . identifier[iNaT] )
keyword[else] :
identifier[result] = identifier[arg]
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
keyword[try] :
identifier[values] , identifier[tz] = identifier[conversion] . identifier[datetime_to_datetime64] ( identifier[arg] )
keyword[return] identifier[DatetimeIndex] . identifier[_simple_new] ( identifier[values] , identifier[name] = identifier[name] , identifier[tz] = identifier[tz] )
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
keyword[raise] identifier[e]
keyword[if] identifier[result] keyword[is] keyword[None] :
keyword[assert] identifier[format] keyword[is] keyword[None] keyword[or] identifier[infer_datetime_format]
identifier[utc] = identifier[tz] == literal[string]
identifier[result] , identifier[tz_parsed] = identifier[objects_to_datetime64ns] (
identifier[arg] , identifier[dayfirst] = identifier[dayfirst] , identifier[yearfirst] = identifier[yearfirst] ,
identifier[utc] = identifier[utc] , identifier[errors] = identifier[errors] , identifier[require_iso8601] = identifier[require_iso8601] ,
identifier[allow_object] = keyword[True] )
keyword[if] identifier[tz_parsed] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[box] :
keyword[return] identifier[DatetimeIndex] . identifier[_simple_new] ( identifier[result] , identifier[name] = identifier[name] ,
identifier[tz] = identifier[tz_parsed] )
keyword[else] :
identifier[result] =[ identifier[Timestamp] ( identifier[ts] , identifier[tz] = identifier[tz_parsed] ). identifier[to_pydatetime] ()
keyword[for] identifier[ts] keyword[in] identifier[result] ]
keyword[return] identifier[np] . identifier[array] ( identifier[result] , identifier[dtype] = identifier[object] )
keyword[if] identifier[box] :
keyword[if] identifier[is_datetime64_dtype] ( identifier[result] ):
keyword[return] identifier[DatetimeIndex] ( identifier[result] , identifier[tz] = identifier[tz] , identifier[name] = identifier[name] )
keyword[elif] identifier[is_object_dtype] ( identifier[result] ):
keyword[from] identifier[pandas] keyword[import] identifier[Index]
keyword[return] identifier[Index] ( identifier[result] , identifier[name] = identifier[name] )
keyword[return] identifier[result] | def _convert_listlike_datetimes(arg, box, format, name=None, tz=None, unit=None, errors=None, infer_datetime_format=None, dayfirst=None, yearfirst=None, exact=None):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parced
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
ndarray of parsed dates
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
from pandas import DatetimeIndex
from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import maybe_convert_dtype, objects_to_datetime64ns
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O') # depends on [control=['if'], data=[]]
# these are shortcutable
if is_datetime64tz_dtype(arg):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name) # depends on [control=['if'], data=[]]
if tz == 'utc':
arg = arg.tz_convert(None).tz_localize(tz) # depends on [control=['if'], data=['tz']]
return arg # depends on [control=['if'], data=[]]
elif is_datetime64_ns_dtype(arg):
if box and (not isinstance(arg, (DatetimeArray, DatetimeIndex))):
try:
return DatetimeIndex(arg, tz=tz, name=name) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return arg # depends on [control=['if'], data=[]]
elif unit is not None:
if format is not None:
raise ValueError('cannot specify both format and unit') # depends on [control=['if'], data=[]]
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit, errors=errors)
if box:
if errors == 'ignore':
from pandas import Index
result = Index(result, name=name)
# GH 23758: We may still need to localize the result with tz
try:
return result.tz_localize(tz) # depends on [control=['try'], data=[]]
except AttributeError:
return result # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return DatetimeIndex(result, tz=tz, name=name) # depends on [control=['if'], data=[]]
return result # depends on [control=['if'], data=['unit']]
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, 1-d array, or Series') # depends on [control=['if'], data=[]]
# warn if passing timedelta64, raise for PeriodDtype
# NB: this must come after unit transformation
orig_arg = arg
(arg, _) = maybe_convert_dtype(arg, copy=False)
arg = ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst) # depends on [control=['if'], data=[]]
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['format']]
tz_parsed = None
result = None
if format is not None:
try:
# shortcut formatting here
if format == '%Y%m%d':
try:
# pass orig_arg as float-dtype may have been converted to
# datetime64[ns]
orig_arg = ensure_object(orig_arg)
result = _attempt_YYYYMMDD(orig_arg, errors=errors) # depends on [control=['try'], data=[]]
except (ValueError, TypeError, tslibs.OutOfBoundsDatetime):
raise ValueError("cannot convert the input to '%Y%m%d' date format") # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# fallback
if result is None:
try:
(result, timezones) = array_strptime(arg, format, exact=exact, errors=errors)
if '%Z' in format or '%z' in format:
return _return_parsed_timezone_results(result, timezones, box, tz, name) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except tslibs.OutOfBoundsDatetime:
if errors == 'raise':
raise # depends on [control=['if'], data=[]]
elif errors == 'coerce':
result = np.empty(arg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult.fill(tslibs.iNaT) # depends on [control=['if'], data=[]]
else:
result = arg # depends on [control=['except'], data=[]]
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == 'raise':
raise # depends on [control=['if'], data=[]]
elif errors == 'coerce':
result = np.empty(arg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult.fill(tslibs.iNaT) # depends on [control=['if'], data=[]]
else:
result = arg # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['result']] # depends on [control=['try'], data=[]]
except ValueError as e:
# Fallback to try to convert datetime objects if timezone-aware
# datetime objects are found without passing `utc=True`
try:
(values, tz) = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz) # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
raise e # depends on [control=['except'], data=[]] # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=['format']]
if result is None:
assert format is None or infer_datetime_format
utc = tz == 'utc'
(result, tz_parsed) = objects_to_datetime64ns(arg, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, errors=errors, require_iso8601=require_iso8601, allow_object=True) # depends on [control=['if'], data=['result']]
if tz_parsed is not None:
if box:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
return DatetimeIndex._simple_new(result, name=name, tz=tz_parsed) # depends on [control=['if'], data=[]]
else:
# Convert the datetime64 numpy array to an numpy array
# of datetime objects
result = [Timestamp(ts, tz=tz_parsed).to_pydatetime() for ts in result]
return np.array(result, dtype=object) # depends on [control=['if'], data=['tz_parsed']]
if box:
# Ensure we return an Index in all cases where box=True
if is_datetime64_dtype(result):
return DatetimeIndex(result, tz=tz, name=name) # depends on [control=['if'], data=[]]
elif is_object_dtype(result):
# e.g. an Index of datetime objects
from pandas import Index
return Index(result, name=name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return result |
def _process_info_installed_output(out, filter_attrs):
'''
Helper function for info_installed()
Processes stdout output from a single invocation of
'opkg status'.
'''
ret = {}
name = None
attrs = {}
attr = None
for line in salt.utils.itertools.split(out, '\n'):
if line and line[0] == ' ':
# This is a continuation of the last attr
if filter_attrs is None or attr in filter_attrs:
line = line.strip()
if attrs[attr]:
# If attr is empty, don't add leading newline
attrs[attr] += '\n'
attrs[attr] += line
continue
line = line.strip()
if not line:
# Separator between different packages
if name:
ret[name] = attrs
name = None
attrs = {}
attr = None
continue
key, value = line.split(':', 1)
value = value.lstrip()
attr = _convert_to_standard_attr(key)
if attr == 'name':
name = value
elif filter_attrs is None or attr in filter_attrs:
attrs[attr] = value
if name:
ret[name] = attrs
return ret | def function[_process_info_installed_output, parameter[out, filter_attrs]]:
constant[
Helper function for info_installed()
Processes stdout output from a single invocation of
'opkg status'.
]
variable[ret] assign[=] dictionary[[], []]
variable[name] assign[=] constant[None]
variable[attrs] assign[=] dictionary[[], []]
variable[attr] assign[=] constant[None]
for taget[name[line]] in starred[call[name[salt].utils.itertools.split, parameter[name[out], constant[
]]]] begin[:]
if <ast.BoolOp object at 0x7da20c76dc90> begin[:]
if <ast.BoolOp object at 0x7da20c76fdc0> begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
if call[name[attrs]][name[attr]] begin[:]
<ast.AugAssign object at 0x7da20c76c0d0>
<ast.AugAssign object at 0x7da20c76fee0>
continue
variable[line] assign[=] call[name[line].strip, parameter[]]
if <ast.UnaryOp object at 0x7da20c76ffa0> begin[:]
if name[name] begin[:]
call[name[ret]][name[name]] assign[=] name[attrs]
variable[name] assign[=] constant[None]
variable[attrs] assign[=] dictionary[[], []]
variable[attr] assign[=] constant[None]
continue
<ast.Tuple object at 0x7da18eb55300> assign[=] call[name[line].split, parameter[constant[:], constant[1]]]
variable[value] assign[=] call[name[value].lstrip, parameter[]]
variable[attr] assign[=] call[name[_convert_to_standard_attr], parameter[name[key]]]
if compare[name[attr] equal[==] constant[name]] begin[:]
variable[name] assign[=] name[value]
if name[name] begin[:]
call[name[ret]][name[name]] assign[=] name[attrs]
return[name[ret]] | keyword[def] identifier[_process_info_installed_output] ( identifier[out] , identifier[filter_attrs] ):
literal[string]
identifier[ret] ={}
identifier[name] = keyword[None]
identifier[attrs] ={}
identifier[attr] = keyword[None]
keyword[for] identifier[line] keyword[in] identifier[salt] . identifier[utils] . identifier[itertools] . identifier[split] ( identifier[out] , literal[string] ):
keyword[if] identifier[line] keyword[and] identifier[line] [ literal[int] ]== literal[string] :
keyword[if] identifier[filter_attrs] keyword[is] keyword[None] keyword[or] identifier[attr] keyword[in] identifier[filter_attrs] :
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] identifier[attrs] [ identifier[attr] ]:
identifier[attrs] [ identifier[attr] ]+= literal[string]
identifier[attrs] [ identifier[attr] ]+= identifier[line]
keyword[continue]
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] keyword[not] identifier[line] :
keyword[if] identifier[name] :
identifier[ret] [ identifier[name] ]= identifier[attrs]
identifier[name] = keyword[None]
identifier[attrs] ={}
identifier[attr] = keyword[None]
keyword[continue]
identifier[key] , identifier[value] = identifier[line] . identifier[split] ( literal[string] , literal[int] )
identifier[value] = identifier[value] . identifier[lstrip] ()
identifier[attr] = identifier[_convert_to_standard_attr] ( identifier[key] )
keyword[if] identifier[attr] == literal[string] :
identifier[name] = identifier[value]
keyword[elif] identifier[filter_attrs] keyword[is] keyword[None] keyword[or] identifier[attr] keyword[in] identifier[filter_attrs] :
identifier[attrs] [ identifier[attr] ]= identifier[value]
keyword[if] identifier[name] :
identifier[ret] [ identifier[name] ]= identifier[attrs]
keyword[return] identifier[ret] | def _process_info_installed_output(out, filter_attrs):
"""
Helper function for info_installed()
Processes stdout output from a single invocation of
'opkg status'.
"""
ret = {}
name = None
attrs = {}
attr = None
for line in salt.utils.itertools.split(out, '\n'):
if line and line[0] == ' ':
# This is a continuation of the last attr
if filter_attrs is None or attr in filter_attrs:
line = line.strip()
if attrs[attr]:
# If attr is empty, don't add leading newline
attrs[attr] += '\n' # depends on [control=['if'], data=[]]
attrs[attr] += line # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
line = line.strip()
if not line:
# Separator between different packages
if name:
ret[name] = attrs # depends on [control=['if'], data=[]]
name = None
attrs = {}
attr = None
continue # depends on [control=['if'], data=[]]
(key, value) = line.split(':', 1)
value = value.lstrip()
attr = _convert_to_standard_attr(key)
if attr == 'name':
name = value # depends on [control=['if'], data=[]]
elif filter_attrs is None or attr in filter_attrs:
attrs[attr] = value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
if name:
ret[name] = attrs # depends on [control=['if'], data=[]]
return ret |
def download_and_bootstrap(src, name, prereq=None):
"""
Download and install something if 'prerequisite' fails
"""
if prereq:
prereq_cmd = '{0} -c "{1}"'.format(PY_EXE, prereq)
rv = os.system(prereq_cmd)
if rv == 0:
return
ulp = urllib2.urlopen(src)
fp = open(name, "wb")
fp.write(ulp.read())
fp.close()
cmdline = "{0} {1}".format(PY_EXE, name)
rv = os.system(cmdline)
assert rv == 0 | def function[download_and_bootstrap, parameter[src, name, prereq]]:
constant[
Download and install something if 'prerequisite' fails
]
if name[prereq] begin[:]
variable[prereq_cmd] assign[=] call[constant[{0} -c "{1}"].format, parameter[name[PY_EXE], name[prereq]]]
variable[rv] assign[=] call[name[os].system, parameter[name[prereq_cmd]]]
if compare[name[rv] equal[==] constant[0]] begin[:]
return[None]
variable[ulp] assign[=] call[name[urllib2].urlopen, parameter[name[src]]]
variable[fp] assign[=] call[name[open], parameter[name[name], constant[wb]]]
call[name[fp].write, parameter[call[name[ulp].read, parameter[]]]]
call[name[fp].close, parameter[]]
variable[cmdline] assign[=] call[constant[{0} {1}].format, parameter[name[PY_EXE], name[name]]]
variable[rv] assign[=] call[name[os].system, parameter[name[cmdline]]]
assert[compare[name[rv] equal[==] constant[0]]] | keyword[def] identifier[download_and_bootstrap] ( identifier[src] , identifier[name] , identifier[prereq] = keyword[None] ):
literal[string]
keyword[if] identifier[prereq] :
identifier[prereq_cmd] = literal[string] . identifier[format] ( identifier[PY_EXE] , identifier[prereq] )
identifier[rv] = identifier[os] . identifier[system] ( identifier[prereq_cmd] )
keyword[if] identifier[rv] == literal[int] :
keyword[return]
identifier[ulp] = identifier[urllib2] . identifier[urlopen] ( identifier[src] )
identifier[fp] = identifier[open] ( identifier[name] , literal[string] )
identifier[fp] . identifier[write] ( identifier[ulp] . identifier[read] ())
identifier[fp] . identifier[close] ()
identifier[cmdline] = literal[string] . identifier[format] ( identifier[PY_EXE] , identifier[name] )
identifier[rv] = identifier[os] . identifier[system] ( identifier[cmdline] )
keyword[assert] identifier[rv] == literal[int] | def download_and_bootstrap(src, name, prereq=None):
"""
Download and install something if 'prerequisite' fails
"""
if prereq:
prereq_cmd = '{0} -c "{1}"'.format(PY_EXE, prereq)
rv = os.system(prereq_cmd)
if rv == 0:
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
ulp = urllib2.urlopen(src)
fp = open(name, 'wb')
fp.write(ulp.read())
fp.close()
cmdline = '{0} {1}'.format(PY_EXE, name)
rv = os.system(cmdline)
assert rv == 0 |
def seek(self, timestamp):
"""Seek to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Raises:
ValueError: if the given timestamp is invalid.
"""
if not re.match(r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp):
raise ValueError('invalid timestamp, use HH:MM:SS format')
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'REL_TIME'),
('Target', timestamp)
]) | def function[seek, parameter[self, timestamp]]:
constant[Seek to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Raises:
ValueError: if the given timestamp is invalid.
]
if <ast.UnaryOp object at 0x7da18f721b70> begin[:]
<ast.Raise object at 0x7da18f7206a0>
call[name[self].avTransport.Seek, parameter[list[[<ast.Tuple object at 0x7da20c796770>, <ast.Tuple object at 0x7da20c7949d0>, <ast.Tuple object at 0x7da20c7951e0>]]]] | keyword[def] identifier[seek] ( identifier[self] , identifier[timestamp] ):
literal[string]
keyword[if] keyword[not] identifier[re] . identifier[match] ( literal[string] , identifier[timestamp] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[avTransport] . identifier[Seek] ([
( literal[string] , literal[int] ),
( literal[string] , literal[string] ),
( literal[string] , identifier[timestamp] )
]) | def seek(self, timestamp):
"""Seek to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Raises:
ValueError: if the given timestamp is invalid.
"""
if not re.match('^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp):
raise ValueError('invalid timestamp, use HH:MM:SS format') # depends on [control=['if'], data=[]]
self.avTransport.Seek([('InstanceID', 0), ('Unit', 'REL_TIME'), ('Target', timestamp)]) |
def exists(self):
"""
Returns true if the job is still running or zero-os still knows about this job ID
After a job is finished, a job remains on zero-os for max of 5min where you still can read the job result
after the 5 min is gone, the job result is no more fetchable
:return: bool
"""
r = self._client._redis
flag = '{}:flag'.format(self._queue)
return bool(r.exists(flag)) | def function[exists, parameter[self]]:
constant[
Returns true if the job is still running or zero-os still knows about this job ID
After a job is finished, a job remains on zero-os for max of 5min where you still can read the job result
after the 5 min is gone, the job result is no more fetchable
:return: bool
]
variable[r] assign[=] name[self]._client._redis
variable[flag] assign[=] call[constant[{}:flag].format, parameter[name[self]._queue]]
return[call[name[bool], parameter[call[name[r].exists, parameter[name[flag]]]]]] | keyword[def] identifier[exists] ( identifier[self] ):
literal[string]
identifier[r] = identifier[self] . identifier[_client] . identifier[_redis]
identifier[flag] = literal[string] . identifier[format] ( identifier[self] . identifier[_queue] )
keyword[return] identifier[bool] ( identifier[r] . identifier[exists] ( identifier[flag] )) | def exists(self):
"""
Returns true if the job is still running or zero-os still knows about this job ID
After a job is finished, a job remains on zero-os for max of 5min where you still can read the job result
after the 5 min is gone, the job result is no more fetchable
:return: bool
"""
r = self._client._redis
flag = '{}:flag'.format(self._queue)
return bool(r.exists(flag)) |
def cancel_job(agent, project_name, job_id):
"""
cancel a job.
If the job is pending, it will be removed. If the job is running, it will be terminated.
"""
prevstate = agent.cancel(project_name, job_id)['prevstate']
if prevstate == 'pending':
sqllite_agent.execute(ScrapydJobExtInfoSQLSet.DELETE_BY_ID, (job_id,)) | def function[cancel_job, parameter[agent, project_name, job_id]]:
constant[
cancel a job.
If the job is pending, it will be removed. If the job is running, it will be terminated.
]
variable[prevstate] assign[=] call[call[name[agent].cancel, parameter[name[project_name], name[job_id]]]][constant[prevstate]]
if compare[name[prevstate] equal[==] constant[pending]] begin[:]
call[name[sqllite_agent].execute, parameter[name[ScrapydJobExtInfoSQLSet].DELETE_BY_ID, tuple[[<ast.Name object at 0x7da18ede4cd0>]]]] | keyword[def] identifier[cancel_job] ( identifier[agent] , identifier[project_name] , identifier[job_id] ):
literal[string]
identifier[prevstate] = identifier[agent] . identifier[cancel] ( identifier[project_name] , identifier[job_id] )[ literal[string] ]
keyword[if] identifier[prevstate] == literal[string] :
identifier[sqllite_agent] . identifier[execute] ( identifier[ScrapydJobExtInfoSQLSet] . identifier[DELETE_BY_ID] ,( identifier[job_id] ,)) | def cancel_job(agent, project_name, job_id):
"""
cancel a job.
If the job is pending, it will be removed. If the job is running, it will be terminated.
"""
prevstate = agent.cancel(project_name, job_id)['prevstate']
if prevstate == 'pending':
sqllite_agent.execute(ScrapydJobExtInfoSQLSet.DELETE_BY_ID, (job_id,)) # depends on [control=['if'], data=[]] |
def to_xyz100(self, data, description):
"""Input: J or Q; C, M or s; H or h
"""
# Steps 1-5
rgb_ = compute_to(data, description, self)
# Step 6: Calculate RC, GC and BC
# rgb_c = dot(self.M_cat02, solve(self.M_hpe, rgb_))
#
# Step 7: Calculate R, G and B
# rgb = (rgb_c.T / self.D_RGB).T
#
# Step 8: Calculate X, Y and Z
# xyz = solve(self.M_cat02, rgb)
return dot(self.invM_, rgb_) | def function[to_xyz100, parameter[self, data, description]]:
constant[Input: J or Q; C, M or s; H or h
]
variable[rgb_] assign[=] call[name[compute_to], parameter[name[data], name[description], name[self]]]
return[call[name[dot], parameter[name[self].invM_, name[rgb_]]]] | keyword[def] identifier[to_xyz100] ( identifier[self] , identifier[data] , identifier[description] ):
literal[string]
identifier[rgb_] = identifier[compute_to] ( identifier[data] , identifier[description] , identifier[self] )
keyword[return] identifier[dot] ( identifier[self] . identifier[invM_] , identifier[rgb_] ) | def to_xyz100(self, data, description):
"""Input: J or Q; C, M or s; H or h
"""
# Steps 1-5
rgb_ = compute_to(data, description, self)
# Step 6: Calculate RC, GC and BC
# rgb_c = dot(self.M_cat02, solve(self.M_hpe, rgb_))
#
# Step 7: Calculate R, G and B
# rgb = (rgb_c.T / self.D_RGB).T
#
# Step 8: Calculate X, Y and Z
# xyz = solve(self.M_cat02, rgb)
return dot(self.invM_, rgb_) |
def pointer_gate(num_qubits, U):
"""
Make a pointer gate on `num_qubits`. The one-qubit gate U will act on the
qubit addressed by the pointer qubits interpreted as an unsigned binary
integer.
There are P = floor(lg(num_qubits)) pointer qubits, and qubits numbered
N - 1
N - 2
...
N - P
are those reserved to represent the pointer. The first N - P qubits
are the qubits which the one-qubit gate U can act on.
"""
ptr_bits = int(floor(np.log2(num_qubits)))
data_bits = num_qubits - ptr_bits
ptr_state = 0
assert ptr_bits > 0
program = pq.Program()
program.defgate("CU", controlled(ptr_bits, U))
for _, target_qubit, changed in gray(ptr_bits):
if changed is None:
for ptr_qubit in range(num_qubits - ptr_bits, num_qubits):
program.inst(X(ptr_qubit))
ptr_state ^= 1 << (ptr_qubit - data_bits)
else:
program.inst(X(data_bits + changed))
ptr_state ^= 1 << changed
if target_qubit < data_bits:
control_qubits = tuple(data_bits + i for i in range(ptr_bits))
program.inst(("CU",) + control_qubits + (target_qubit,))
fixup(program, data_bits, ptr_bits, ptr_state)
return program | def function[pointer_gate, parameter[num_qubits, U]]:
constant[
Make a pointer gate on `num_qubits`. The one-qubit gate U will act on the
qubit addressed by the pointer qubits interpreted as an unsigned binary
integer.
There are P = floor(lg(num_qubits)) pointer qubits, and qubits numbered
N - 1
N - 2
...
N - P
are those reserved to represent the pointer. The first N - P qubits
are the qubits which the one-qubit gate U can act on.
]
variable[ptr_bits] assign[=] call[name[int], parameter[call[name[floor], parameter[call[name[np].log2, parameter[name[num_qubits]]]]]]]
variable[data_bits] assign[=] binary_operation[name[num_qubits] - name[ptr_bits]]
variable[ptr_state] assign[=] constant[0]
assert[compare[name[ptr_bits] greater[>] constant[0]]]
variable[program] assign[=] call[name[pq].Program, parameter[]]
call[name[program].defgate, parameter[constant[CU], call[name[controlled], parameter[name[ptr_bits], name[U]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b1c5ab90>, <ast.Name object at 0x7da1b1c5be50>, <ast.Name object at 0x7da1b1c596c0>]]] in starred[call[name[gray], parameter[name[ptr_bits]]]] begin[:]
if compare[name[changed] is constant[None]] begin[:]
for taget[name[ptr_qubit]] in starred[call[name[range], parameter[binary_operation[name[num_qubits] - name[ptr_bits]], name[num_qubits]]]] begin[:]
call[name[program].inst, parameter[call[name[X], parameter[name[ptr_qubit]]]]]
<ast.AugAssign object at 0x7da1b1c59c00>
if compare[name[target_qubit] less[<] name[data_bits]] begin[:]
variable[control_qubits] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b1bc9b70>]]
call[name[program].inst, parameter[binary_operation[binary_operation[tuple[[<ast.Constant object at 0x7da1b1bcaa10>]] + name[control_qubits]] + tuple[[<ast.Name object at 0x7da1b1bca8c0>]]]]]
call[name[fixup], parameter[name[program], name[data_bits], name[ptr_bits], name[ptr_state]]]
return[name[program]] | keyword[def] identifier[pointer_gate] ( identifier[num_qubits] , identifier[U] ):
literal[string]
identifier[ptr_bits] = identifier[int] ( identifier[floor] ( identifier[np] . identifier[log2] ( identifier[num_qubits] )))
identifier[data_bits] = identifier[num_qubits] - identifier[ptr_bits]
identifier[ptr_state] = literal[int]
keyword[assert] identifier[ptr_bits] > literal[int]
identifier[program] = identifier[pq] . identifier[Program] ()
identifier[program] . identifier[defgate] ( literal[string] , identifier[controlled] ( identifier[ptr_bits] , identifier[U] ))
keyword[for] identifier[_] , identifier[target_qubit] , identifier[changed] keyword[in] identifier[gray] ( identifier[ptr_bits] ):
keyword[if] identifier[changed] keyword[is] keyword[None] :
keyword[for] identifier[ptr_qubit] keyword[in] identifier[range] ( identifier[num_qubits] - identifier[ptr_bits] , identifier[num_qubits] ):
identifier[program] . identifier[inst] ( identifier[X] ( identifier[ptr_qubit] ))
identifier[ptr_state] ^= literal[int] <<( identifier[ptr_qubit] - identifier[data_bits] )
keyword[else] :
identifier[program] . identifier[inst] ( identifier[X] ( identifier[data_bits] + identifier[changed] ))
identifier[ptr_state] ^= literal[int] << identifier[changed]
keyword[if] identifier[target_qubit] < identifier[data_bits] :
identifier[control_qubits] = identifier[tuple] ( identifier[data_bits] + identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[ptr_bits] ))
identifier[program] . identifier[inst] (( literal[string] ,)+ identifier[control_qubits] +( identifier[target_qubit] ,))
identifier[fixup] ( identifier[program] , identifier[data_bits] , identifier[ptr_bits] , identifier[ptr_state] )
keyword[return] identifier[program] | def pointer_gate(num_qubits, U):
"""
Make a pointer gate on `num_qubits`. The one-qubit gate U will act on the
qubit addressed by the pointer qubits interpreted as an unsigned binary
integer.
There are P = floor(lg(num_qubits)) pointer qubits, and qubits numbered
N - 1
N - 2
...
N - P
are those reserved to represent the pointer. The first N - P qubits
are the qubits which the one-qubit gate U can act on.
"""
ptr_bits = int(floor(np.log2(num_qubits)))
data_bits = num_qubits - ptr_bits
ptr_state = 0
assert ptr_bits > 0
program = pq.Program()
program.defgate('CU', controlled(ptr_bits, U))
for (_, target_qubit, changed) in gray(ptr_bits):
if changed is None:
for ptr_qubit in range(num_qubits - ptr_bits, num_qubits):
program.inst(X(ptr_qubit))
ptr_state ^= 1 << ptr_qubit - data_bits # depends on [control=['for'], data=['ptr_qubit']] # depends on [control=['if'], data=[]]
else:
program.inst(X(data_bits + changed))
ptr_state ^= 1 << changed
if target_qubit < data_bits:
control_qubits = tuple((data_bits + i for i in range(ptr_bits)))
program.inst(('CU',) + control_qubits + (target_qubit,)) # depends on [control=['if'], data=['target_qubit', 'data_bits']] # depends on [control=['for'], data=[]]
fixup(program, data_bits, ptr_bits, ptr_state)
return program |
def cli(ctx, feature_id, organism="", sequence=""):
"""Flip the strand of a feature
Output:
A standard apollo feature dictionary ({"features": [{...}]})
"""
return ctx.gi.annotations.flip_strand(feature_id, organism=organism, sequence=sequence) | def function[cli, parameter[ctx, feature_id, organism, sequence]]:
constant[Flip the strand of a feature
Output:
A standard apollo feature dictionary ({"features": [{...}]})
]
return[call[name[ctx].gi.annotations.flip_strand, parameter[name[feature_id]]]] | keyword[def] identifier[cli] ( identifier[ctx] , identifier[feature_id] , identifier[organism] = literal[string] , identifier[sequence] = literal[string] ):
literal[string]
keyword[return] identifier[ctx] . identifier[gi] . identifier[annotations] . identifier[flip_strand] ( identifier[feature_id] , identifier[organism] = identifier[organism] , identifier[sequence] = identifier[sequence] ) | def cli(ctx, feature_id, organism='', sequence=''):
"""Flip the strand of a feature
Output:
A standard apollo feature dictionary ({"features": [{...}]})
"""
return ctx.gi.annotations.flip_strand(feature_id, organism=organism, sequence=sequence) |
def get_solr_assocs_url(self, use_amigo=False):
"""
Return solr URL to be used for assocation (enhanced triple) queries
A solr assocs URL is used to query triple-patterns in Solr, ie subject-relation-object
There are two possible schemas: Monarch and AmiGO. The AmiGO schema is used for
querying the GO and Planteome Golr instances
"""
url = self.endpoint_url(self.solr_assocs)
if use_amigo:
url = self.endpoint_url(self.amigo_solr_assocs)
return url | def function[get_solr_assocs_url, parameter[self, use_amigo]]:
constant[
Return solr URL to be used for assocation (enhanced triple) queries
A solr assocs URL is used to query triple-patterns in Solr, ie subject-relation-object
There are two possible schemas: Monarch and AmiGO. The AmiGO schema is used for
querying the GO and Planteome Golr instances
]
variable[url] assign[=] call[name[self].endpoint_url, parameter[name[self].solr_assocs]]
if name[use_amigo] begin[:]
variable[url] assign[=] call[name[self].endpoint_url, parameter[name[self].amigo_solr_assocs]]
return[name[url]] | keyword[def] identifier[get_solr_assocs_url] ( identifier[self] , identifier[use_amigo] = keyword[False] ):
literal[string]
identifier[url] = identifier[self] . identifier[endpoint_url] ( identifier[self] . identifier[solr_assocs] )
keyword[if] identifier[use_amigo] :
identifier[url] = identifier[self] . identifier[endpoint_url] ( identifier[self] . identifier[amigo_solr_assocs] )
keyword[return] identifier[url] | def get_solr_assocs_url(self, use_amigo=False):
"""
Return solr URL to be used for assocation (enhanced triple) queries
A solr assocs URL is used to query triple-patterns in Solr, ie subject-relation-object
There are two possible schemas: Monarch and AmiGO. The AmiGO schema is used for
querying the GO and Planteome Golr instances
"""
url = self.endpoint_url(self.solr_assocs)
if use_amigo:
url = self.endpoint_url(self.amigo_solr_assocs) # depends on [control=['if'], data=[]]
return url |
def parameters(self):
""" Get parameters of transform """
libfn = utils.get_lib_fn('getTransformParameters%s'%self._libsuffix)
return np.asarray(libfn(self.pointer), order='F') | def function[parameters, parameter[self]]:
constant[ Get parameters of transform ]
variable[libfn] assign[=] call[name[utils].get_lib_fn, parameter[binary_operation[constant[getTransformParameters%s] <ast.Mod object at 0x7da2590d6920> name[self]._libsuffix]]]
return[call[name[np].asarray, parameter[call[name[libfn], parameter[name[self].pointer]]]]] | keyword[def] identifier[parameters] ( identifier[self] ):
literal[string]
identifier[libfn] = identifier[utils] . identifier[get_lib_fn] ( literal[string] % identifier[self] . identifier[_libsuffix] )
keyword[return] identifier[np] . identifier[asarray] ( identifier[libfn] ( identifier[self] . identifier[pointer] ), identifier[order] = literal[string] ) | def parameters(self):
""" Get parameters of transform """
libfn = utils.get_lib_fn('getTransformParameters%s' % self._libsuffix)
return np.asarray(libfn(self.pointer), order='F') |
def running_objects(self):
"""Return the objects associated with this workflow."""
return [obj for obj in self.database_objects
if obj.status in [obj.known_statuses.RUNNING]] | def function[running_objects, parameter[self]]:
constant[Return the objects associated with this workflow.]
return[<ast.ListComp object at 0x7da18f810df0>] | keyword[def] identifier[running_objects] ( identifier[self] ):
literal[string]
keyword[return] [ identifier[obj] keyword[for] identifier[obj] keyword[in] identifier[self] . identifier[database_objects]
keyword[if] identifier[obj] . identifier[status] keyword[in] [ identifier[obj] . identifier[known_statuses] . identifier[RUNNING] ]] | def running_objects(self):
"""Return the objects associated with this workflow."""
return [obj for obj in self.database_objects if obj.status in [obj.known_statuses.RUNNING]] |
def _set_ip_config(self, v, load=False):
"""
Setter method for ip_config, mapped from YANG variable /interface/gigabitethernet/ip/ip_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_config() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ip_config.ip_config, is_container='container', presence=False, yang_name="ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'phy-intf-ip-cfg-cp', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-ip-config', defining_module='brocade-ip-config', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip_config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ip_config.ip_config, is_container='container', presence=False, yang_name="ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'phy-intf-ip-cfg-cp', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-ip-config', defining_module='brocade-ip-config', yang_type='container', is_config=True)""",
})
self.__ip_config = t
if hasattr(self, '_set'):
self._set() | def function[_set_ip_config, parameter[self, v, load]]:
constant[
Setter method for ip_config, mapped from YANG variable /interface/gigabitethernet/ip/ip_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_config() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18f00e620>
name[self].__ip_config assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_ip_config] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[ip_config] . identifier[ip_config] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__ip_config] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_ip_config(self, v, load=False):
"""
Setter method for ip_config, mapped from YANG variable /interface/gigabitethernet/ip/ip_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_config() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=ip_config.ip_config, is_container='container', presence=False, yang_name='ip-config', rest_name='', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'phy-intf-ip-cfg-cp', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-ip-config', defining_module='brocade-ip-config', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'ip_config must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=ip_config.ip_config, is_container=\'container\', presence=False, yang_name="ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-drop-node-name\': None, u\'callpoint\': u\'phy-intf-ip-cfg-cp\', u\'sort-priority\': u\'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG\'}}, namespace=\'urn:brocade.com:mgmt:brocade-ip-config\', defining_module=\'brocade-ip-config\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__ip_config = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def download_file(url, path=None, clobber=False):
"""
thanks to: https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
path : str
local path to download to.
"""
if path is None:
local_filename = os.path.join(directory, url.split('/')[-1])
else:
local_filename = path
if os.path.exists(local_filename) and not clobber:
logging.info('{} exists; not downloading.'.format(local_filename))
return local_filename
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
#f.flush() commented by recommendation from J.F.Sebastian
return local_filename | def function[download_file, parameter[url, path, clobber]]:
constant[
thanks to: https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
path : str
local path to download to.
]
if compare[name[path] is constant[None]] begin[:]
variable[local_filename] assign[=] call[name[os].path.join, parameter[name[directory], call[call[name[url].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da2046224a0>]]]
if <ast.BoolOp object at 0x7da204623040> begin[:]
call[name[logging].info, parameter[call[constant[{} exists; not downloading.].format, parameter[name[local_filename]]]]]
return[name[local_filename]]
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
with call[name[open], parameter[name[local_filename], constant[wb]]] begin[:]
for taget[name[chunk]] in starred[call[name[r].iter_content, parameter[]]] begin[:]
if name[chunk] begin[:]
call[name[f].write, parameter[name[chunk]]]
return[name[local_filename]] | keyword[def] identifier[download_file] ( identifier[url] , identifier[path] = keyword[None] , identifier[clobber] = keyword[False] ):
literal[string]
keyword[if] identifier[path] keyword[is] keyword[None] :
identifier[local_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[url] . identifier[split] ( literal[string] )[- literal[int] ])
keyword[else] :
identifier[local_filename] = identifier[path]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[local_filename] ) keyword[and] keyword[not] identifier[clobber] :
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[local_filename] ))
keyword[return] identifier[local_filename]
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] , identifier[stream] = keyword[True] )
keyword[with] identifier[open] ( identifier[local_filename] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[chunk] keyword[in] identifier[r] . identifier[iter_content] ( identifier[chunk_size] = literal[int] ):
keyword[if] identifier[chunk] :
identifier[f] . identifier[write] ( identifier[chunk] )
keyword[return] identifier[local_filename] | def download_file(url, path=None, clobber=False):
"""
thanks to: https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
path : str
local path to download to.
"""
if path is None:
local_filename = os.path.join(directory, url.split('/')[-1]) # depends on [control=['if'], data=[]]
else:
local_filename = path
if os.path.exists(local_filename) and (not clobber):
logging.info('{} exists; not downloading.'.format(local_filename))
return local_filename # depends on [control=['if'], data=[]]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['chunk']] # depends on [control=['with'], data=['f']]
#f.flush() commented by recommendation from J.F.Sebastian
return local_filename |
def get_layout(self, name):
'''
Returns the layout with the given name
'''
layout_chain = []
# Retrieve initial layout file
try:
json_data = self.json_files[self.layout_names[name]]
except KeyError:
log.error('Could not find layout: %s', name)
log.error('Layouts path: %s', self.layout_path)
raise
layout_chain.append(Layout(name, json_data))
# Recursively locate parent layout files
parent = layout_chain[-1].parent()
while parent is not None:
# Find the parent
parent_path = None
for path in self.json_file_paths:
if os.path.normcase(os.path.normpath(parent)) in os.path.normcase(path):
parent_path = path
# Make sure a path was found
if parent_path is None:
raise UnknownLayoutPathException('Could not find: {}'.format(parent_path))
# Build layout for parent
json_data = self.json_files[parent_path]
layout_chain.append(Layout(parent_path, json_data))
# Check parent of parent
parent = layout_chain[-1].parent()
# Squash layout files
layout = self.squash_layouts(layout_chain)
return layout | def function[get_layout, parameter[self, name]]:
constant[
Returns the layout with the given name
]
variable[layout_chain] assign[=] list[[]]
<ast.Try object at 0x7da1b1139fc0>
call[name[layout_chain].append, parameter[call[name[Layout], parameter[name[name], name[json_data]]]]]
variable[parent] assign[=] call[call[name[layout_chain]][<ast.UnaryOp object at 0x7da1b113ad10>].parent, parameter[]]
while compare[name[parent] is_not constant[None]] begin[:]
variable[parent_path] assign[=] constant[None]
for taget[name[path]] in starred[name[self].json_file_paths] begin[:]
if compare[call[name[os].path.normcase, parameter[call[name[os].path.normpath, parameter[name[parent]]]]] in call[name[os].path.normcase, parameter[name[path]]]] begin[:]
variable[parent_path] assign[=] name[path]
if compare[name[parent_path] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b113ab90>
variable[json_data] assign[=] call[name[self].json_files][name[parent_path]]
call[name[layout_chain].append, parameter[call[name[Layout], parameter[name[parent_path], name[json_data]]]]]
variable[parent] assign[=] call[call[name[layout_chain]][<ast.UnaryOp object at 0x7da1b11a1c90>].parent, parameter[]]
variable[layout] assign[=] call[name[self].squash_layouts, parameter[name[layout_chain]]]
return[name[layout]] | keyword[def] identifier[get_layout] ( identifier[self] , identifier[name] ):
literal[string]
identifier[layout_chain] =[]
keyword[try] :
identifier[json_data] = identifier[self] . identifier[json_files] [ identifier[self] . identifier[layout_names] [ identifier[name] ]]
keyword[except] identifier[KeyError] :
identifier[log] . identifier[error] ( literal[string] , identifier[name] )
identifier[log] . identifier[error] ( literal[string] , identifier[self] . identifier[layout_path] )
keyword[raise]
identifier[layout_chain] . identifier[append] ( identifier[Layout] ( identifier[name] , identifier[json_data] ))
identifier[parent] = identifier[layout_chain] [- literal[int] ]. identifier[parent] ()
keyword[while] identifier[parent] keyword[is] keyword[not] keyword[None] :
identifier[parent_path] = keyword[None]
keyword[for] identifier[path] keyword[in] identifier[self] . identifier[json_file_paths] :
keyword[if] identifier[os] . identifier[path] . identifier[normcase] ( identifier[os] . identifier[path] . identifier[normpath] ( identifier[parent] )) keyword[in] identifier[os] . identifier[path] . identifier[normcase] ( identifier[path] ):
identifier[parent_path] = identifier[path]
keyword[if] identifier[parent_path] keyword[is] keyword[None] :
keyword[raise] identifier[UnknownLayoutPathException] ( literal[string] . identifier[format] ( identifier[parent_path] ))
identifier[json_data] = identifier[self] . identifier[json_files] [ identifier[parent_path] ]
identifier[layout_chain] . identifier[append] ( identifier[Layout] ( identifier[parent_path] , identifier[json_data] ))
identifier[parent] = identifier[layout_chain] [- literal[int] ]. identifier[parent] ()
identifier[layout] = identifier[self] . identifier[squash_layouts] ( identifier[layout_chain] )
keyword[return] identifier[layout] | def get_layout(self, name):
"""
Returns the layout with the given name
"""
layout_chain = []
# Retrieve initial layout file
try:
json_data = self.json_files[self.layout_names[name]] # depends on [control=['try'], data=[]]
except KeyError:
log.error('Could not find layout: %s', name)
log.error('Layouts path: %s', self.layout_path)
raise # depends on [control=['except'], data=[]]
layout_chain.append(Layout(name, json_data))
# Recursively locate parent layout files
parent = layout_chain[-1].parent()
while parent is not None:
# Find the parent
parent_path = None
for path in self.json_file_paths:
if os.path.normcase(os.path.normpath(parent)) in os.path.normcase(path):
parent_path = path # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']]
# Make sure a path was found
if parent_path is None:
raise UnknownLayoutPathException('Could not find: {}'.format(parent_path)) # depends on [control=['if'], data=['parent_path']]
# Build layout for parent
json_data = self.json_files[parent_path]
layout_chain.append(Layout(parent_path, json_data))
# Check parent of parent
parent = layout_chain[-1].parent() # depends on [control=['while'], data=['parent']]
# Squash layout files
layout = self.squash_layouts(layout_chain)
return layout |
def task(arg = None):
""" Task decorator """
# make sure stdout is patched
if not hasattr(sys.stdout, 'indent_level'):
sys.stdout = IndentedFile(sys.stdout)
def decorator(base):
info = ': ' + arg if type(arg) is str else ''
header = fore.green('** ' + fore.cyan(base.__name__) + info)
def func(*args, **kwargs):
sys.stdout.indent_level += 1
puts(header)
base(*args, **kwargs)
sys.stdout.indent_level -= 1
params = inspect.formatargspec(*inspect.getargspec(base))[1:-1]
specformat = fore.cyan('%s') + ' ' + fore.white('%s')
func._task = True
func._spec = specformat % (base.__name__, params)
func._desc = re.sub('\s+', ' ', inspect.getdoc(base) or '')
return func
if type(arg) == types.FunctionType:
return decorator(arg)
else:
return decorator | def function[task, parameter[arg]]:
constant[ Task decorator ]
if <ast.UnaryOp object at 0x7da20e74beb0> begin[:]
name[sys].stdout assign[=] call[name[IndentedFile], parameter[name[sys].stdout]]
def function[decorator, parameter[base]]:
variable[info] assign[=] <ast.IfExp object at 0x7da20e749720>
variable[header] assign[=] call[name[fore].green, parameter[binary_operation[binary_operation[constant[** ] + call[name[fore].cyan, parameter[name[base].__name__]]] + name[info]]]]
def function[func, parameter[]]:
<ast.AugAssign object at 0x7da20c796890>
call[name[puts], parameter[name[header]]]
call[name[base], parameter[<ast.Starred object at 0x7da20c794520>]]
<ast.AugAssign object at 0x7da20c7951b0>
variable[params] assign[=] call[call[name[inspect].formatargspec, parameter[<ast.Starred object at 0x7da20c794e80>]]][<ast.Slice object at 0x7da20c795180>]
variable[specformat] assign[=] binary_operation[binary_operation[call[name[fore].cyan, parameter[constant[%s]]] + constant[ ]] + call[name[fore].white, parameter[constant[%s]]]]
name[func]._task assign[=] constant[True]
name[func]._spec assign[=] binary_operation[name[specformat] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c7952a0>, <ast.Name object at 0x7da20c796440>]]]
name[func]._desc assign[=] call[name[re].sub, parameter[constant[\s+], constant[ ], <ast.BoolOp object at 0x7da20c794d30>]]
return[name[func]]
if compare[call[name[type], parameter[name[arg]]] equal[==] name[types].FunctionType] begin[:]
return[call[name[decorator], parameter[name[arg]]]] | keyword[def] identifier[task] ( identifier[arg] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[sys] . identifier[stdout] , literal[string] ):
identifier[sys] . identifier[stdout] = identifier[IndentedFile] ( identifier[sys] . identifier[stdout] )
keyword[def] identifier[decorator] ( identifier[base] ):
identifier[info] = literal[string] + identifier[arg] keyword[if] identifier[type] ( identifier[arg] ) keyword[is] identifier[str] keyword[else] literal[string]
identifier[header] = identifier[fore] . identifier[green] ( literal[string] + identifier[fore] . identifier[cyan] ( identifier[base] . identifier[__name__] )+ identifier[info] )
keyword[def] identifier[func] (* identifier[args] ,** identifier[kwargs] ):
identifier[sys] . identifier[stdout] . identifier[indent_level] += literal[int]
identifier[puts] ( identifier[header] )
identifier[base] (* identifier[args] ,** identifier[kwargs] )
identifier[sys] . identifier[stdout] . identifier[indent_level] -= literal[int]
identifier[params] = identifier[inspect] . identifier[formatargspec] (* identifier[inspect] . identifier[getargspec] ( identifier[base] ))[ literal[int] :- literal[int] ]
identifier[specformat] = identifier[fore] . identifier[cyan] ( literal[string] )+ literal[string] + identifier[fore] . identifier[white] ( literal[string] )
identifier[func] . identifier[_task] = keyword[True]
identifier[func] . identifier[_spec] = identifier[specformat] %( identifier[base] . identifier[__name__] , identifier[params] )
identifier[func] . identifier[_desc] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[inspect] . identifier[getdoc] ( identifier[base] ) keyword[or] literal[string] )
keyword[return] identifier[func]
keyword[if] identifier[type] ( identifier[arg] )== identifier[types] . identifier[FunctionType] :
keyword[return] identifier[decorator] ( identifier[arg] )
keyword[else] :
keyword[return] identifier[decorator] | def task(arg=None):
""" Task decorator """ # make sure stdout is patched
if not hasattr(sys.stdout, 'indent_level'):
sys.stdout = IndentedFile(sys.stdout) # depends on [control=['if'], data=[]]
def decorator(base):
info = ': ' + arg if type(arg) is str else ''
header = fore.green('** ' + fore.cyan(base.__name__) + info)
def func(*args, **kwargs):
sys.stdout.indent_level += 1
puts(header)
base(*args, **kwargs)
sys.stdout.indent_level -= 1
params = inspect.formatargspec(*inspect.getargspec(base))[1:-1]
specformat = fore.cyan('%s') + ' ' + fore.white('%s')
func._task = True
func._spec = specformat % (base.__name__, params)
func._desc = re.sub('\\s+', ' ', inspect.getdoc(base) or '')
return func
if type(arg) == types.FunctionType:
return decorator(arg) # depends on [control=['if'], data=[]]
else:
return decorator |
def stop(self):
"""Stop session."""
if self.transport:
self.transport.write(self.method.TEARDOWN().encode())
self.transport.close()
self.rtp.stop() | def function[stop, parameter[self]]:
constant[Stop session.]
if name[self].transport begin[:]
call[name[self].transport.write, parameter[call[call[name[self].method.TEARDOWN, parameter[]].encode, parameter[]]]]
call[name[self].transport.close, parameter[]]
call[name[self].rtp.stop, parameter[]] | keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[transport] :
identifier[self] . identifier[transport] . identifier[write] ( identifier[self] . identifier[method] . identifier[TEARDOWN] (). identifier[encode] ())
identifier[self] . identifier[transport] . identifier[close] ()
identifier[self] . identifier[rtp] . identifier[stop] () | def stop(self):
"""Stop session."""
if self.transport:
self.transport.write(self.method.TEARDOWN().encode())
self.transport.close() # depends on [control=['if'], data=[]]
self.rtp.stop() |
def _Plot_HorProj_Ves(V, ax=None, Elt='PI', Nstep=_def.TorNTheta,
Pdict=_def.TorPd, Idict=_def.TorITord,
Bsdict=_def.TorBsTord, Bvdict=_def.TorBvTord,
LegDict=_def.TorLegd, indices=False,
draw=True, fs=None, wintit=_wintit, Test=True):
""" Plotting the toroidal projection of a Ves instance
Parameters
----------
V A Ves instance
Nstep An int (the number of points for evaluation of theta by np.linspace)
ax A plt.Axes instance (if given) on which to plot, otherwise ('None') a new figure/axes is created
Tdict A dictionnary specifying the style of the polygon plot
LegDict A dictionnary specifying the style of the legend box (if None => no legend)
Returns
-------
ax The plt.Axes instance on which the plot was performed
"""
if Test:
assert type(Nstep) is int
ax, C0, C1, C2 = _check_Lax(ax,n=1)
assert type(Pdict) is dict, 'Arg Pdict should be a dictionary !'
assert type(Idict) is dict, 'Arg Idict should be a dictionary !'
assert type(LegDict) is dict or LegDict is None, 'Arg LegDict should be a dictionary !'
if ax is None:
ax = _def.Plot_LOSProj_DefAxes('Hor', Type=V.Id.Type,
fs=fs, wintit=wintit)
P1Min = V.dgeom['P1Min']
P1Max = V.dgeom['P1Max']
if 'P' in Elt:
if V._InOut=='in':
if V.Id.Type=='Tor':
Theta = np.linspace(0, 2*np.pi, num=Nstep,
endpoint=True, retstep=False)
lx = np.concatenate((P1Min[0]*np.cos(Theta),np.array([np.nan]),
P1Max[0]*np.cos(Theta)))
ly = np.concatenate((P1Min[0]*np.sin(Theta),np.array([np.nan]),
P1Max[0]*np.sin(Theta)))
elif V.Id.Type=='Lin':
lx = np.array([V.Lim[0,0],V.Lim[0,1],V.Lim[0,1],
V.Lim[0,0],V.Lim[0,0]])
ly = np.array([P1Min[0],P1Min[0],P1Max[0],P1Max[0],P1Min[0]])
ax.plot(lx,ly,label=V.Id.NameLTX,**Pdict)
elif V._InOut=='out':
if V.Id.Type=='Tor':
Theta = np.linspace(0, 2*np.pi, num=Nstep,
endpoint=True, retstep=False)
if V.noccur==0:
lx = np.concatenate((P1Min[0]*np.cos(Theta),
P1Max[0]*np.cos(Theta[::-1])))
ly = np.concatenate((P1Min[0]*np.sin(Theta),
P1Max[0]*np.sin(Theta[::-1])))
Lp = [mPolygon(np.array([lx,ly]).T, closed=True,
label=V.Id.NameLTX, **Pdict)]
else:
Lp = [mWedge((0,0), P1Max[0],
V.Lim[ii][0]*180./np.pi,
V.Lim[ii][1]*180./np.pi,
width=P1Max[0]-P1Min[0],
label=V.Id.NameLTX, **Pdict)
for ii in range(0,len(V.Lim))]
elif V.Id.Type=='Lin':
ly = np.array([P1Min[0],P1Min[0],
P1Max[0],P1Max[0],P1Min[0]])
Lp = []
for ii in range(0,len(V.Lim)):
lx = np.array([V.Lim[ii][0],V.Lim[ii][1],
V.Lim[ii][1],V.Lim[ii][0],
V.Lim[ii][0]])
Lp.append(mPolygon(np.array([lx,ly]).T,
closed=True, label=V.Id.NameLTX,
**Pdict))
for pp in Lp:
ax.add_patch(pp)
else:
msg = "Unknown self._InOut !"
raise Exception(msg)
if 'I' in Elt:
if V.Id.Type=='Tor':
lx = V.dsino['RefPt'][0]*np.cos(Theta)
ly = V.dsino['RefPt'][0]*np.sin(Theta)
elif V.Id.Type=='Lin':
lx = np.array([np.min(V.Lim),np.max(V.Lim)])
ly = V.dsino['RefPt'][0]*np.ones((2,))
ax.plot(lx,ly,label=V.Id.NameLTX+" Imp",**Idict)
if 'Bs' in Elt:
if V.Id.Type=='Tor':
lx = V.dgeom['BaryS'][0]*np.cos(Theta)
ly = V.dgeom['BaryS'][0]*np.sin(Theta)
elif V.Id.Type=='Lin':
lx = np.array([np.min(V.Lim),np.max(V.Lim)])
ly = V.dgeom['BaryS'][0]*np.ones((2,))
ax.plot(lx,ly,label=V.Id.NameLTX+" Bs", **Bsdict)
if 'Bv' in Elt and V.Type=='Tor':
lx = V.dgeom['BaryV'][0]*np.cos(Theta)
ly = V.dgeom['BaryV'][0]*np.sin(Theta)
ax.plot(lx,ly,label=V.Id.NameLTX+" Bv", **Bvdict)
if indices and V.noccur>1:
if V.Id.Type=='Tor':
for ii in range(0,V.noccur):
R, theta = V.dgeom['P1Max'][0], np.mean(V.Lim[ii])
X, Y = R*np.cos(theta), R*np.sin(theta)
ax.annotate(r"{0}".format(ii), size=10,
xy = (X,Y),
xytext = (X+0.02*np.cos(theta),
Y+0.02*np.sin(theta)),
horizontalalignment='center',
verticalalignment='center')
elif V.Id.Type=='Lin':
for ii in range(0,V.noccur):
X, Y = np.mean(V.Lim[ii]), V.dgeom['P1Max'][0]
ax.annotate(r"{0}".format(ii), size=10,
xy = (X,Y),
xytext = (X, Y+0.02),
horizontalalignment='center',
verticalalignment='center')
if not LegDict is None:
ax.legend(**LegDict)
if draw:
ax.relim()
ax.autoscale_view()
ax.figure.canvas.draw()
return ax | def function[_Plot_HorProj_Ves, parameter[V, ax, Elt, Nstep, Pdict, Idict, Bsdict, Bvdict, LegDict, indices, draw, fs, wintit, Test]]:
constant[ Plotting the toroidal projection of a Ves instance
Parameters
----------
V A Ves instance
Nstep An int (the number of points for evaluation of theta by np.linspace)
ax A plt.Axes instance (if given) on which to plot, otherwise ('None') a new figure/axes is created
Tdict A dictionnary specifying the style of the polygon plot
LegDict A dictionnary specifying the style of the legend box (if None => no legend)
Returns
-------
ax The plt.Axes instance on which the plot was performed
]
if name[Test] begin[:]
assert[compare[call[name[type], parameter[name[Nstep]]] is name[int]]]
<ast.Tuple object at 0x7da18f58d4e0> assign[=] call[name[_check_Lax], parameter[name[ax]]]
assert[compare[call[name[type], parameter[name[Pdict]]] is name[dict]]]
assert[compare[call[name[type], parameter[name[Idict]]] is name[dict]]]
assert[<ast.BoolOp object at 0x7da18f58d990>]
if compare[name[ax] is constant[None]] begin[:]
variable[ax] assign[=] call[name[_def].Plot_LOSProj_DefAxes, parameter[constant[Hor]]]
variable[P1Min] assign[=] call[name[V].dgeom][constant[P1Min]]
variable[P1Max] assign[=] call[name[V].dgeom][constant[P1Max]]
if compare[constant[P] in name[Elt]] begin[:]
if compare[name[V]._InOut equal[==] constant[in]] begin[:]
if compare[name[V].Id.Type equal[==] constant[Tor]] begin[:]
variable[Theta] assign[=] call[name[np].linspace, parameter[constant[0], binary_operation[constant[2] * name[np].pi]]]
variable[lx] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.BinOp object at 0x7da18f58fe20>, <ast.Call object at 0x7da18f58e590>, <ast.BinOp object at 0x7da18f58ded0>]]]]
variable[ly] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.BinOp object at 0x7da18f58ca60>, <ast.Call object at 0x7da18f58e620>, <ast.BinOp object at 0x7da18f58cee0>]]]]
call[name[ax].plot, parameter[name[lx], name[ly]]]
if compare[constant[I] in name[Elt]] begin[:]
if compare[name[V].Id.Type equal[==] constant[Tor]] begin[:]
variable[lx] assign[=] binary_operation[call[call[name[V].dsino][constant[RefPt]]][constant[0]] * call[name[np].cos, parameter[name[Theta]]]]
variable[ly] assign[=] binary_operation[call[call[name[V].dsino][constant[RefPt]]][constant[0]] * call[name[np].sin, parameter[name[Theta]]]]
call[name[ax].plot, parameter[name[lx], name[ly]]]
if compare[constant[Bs] in name[Elt]] begin[:]
if compare[name[V].Id.Type equal[==] constant[Tor]] begin[:]
variable[lx] assign[=] binary_operation[call[call[name[V].dgeom][constant[BaryS]]][constant[0]] * call[name[np].cos, parameter[name[Theta]]]]
variable[ly] assign[=] binary_operation[call[call[name[V].dgeom][constant[BaryS]]][constant[0]] * call[name[np].sin, parameter[name[Theta]]]]
call[name[ax].plot, parameter[name[lx], name[ly]]]
if <ast.BoolOp object at 0x7da20c7caf80> begin[:]
variable[lx] assign[=] binary_operation[call[call[name[V].dgeom][constant[BaryV]]][constant[0]] * call[name[np].cos, parameter[name[Theta]]]]
variable[ly] assign[=] binary_operation[call[call[name[V].dgeom][constant[BaryV]]][constant[0]] * call[name[np].sin, parameter[name[Theta]]]]
call[name[ax].plot, parameter[name[lx], name[ly]]]
if <ast.BoolOp object at 0x7da20c7950c0> begin[:]
if compare[name[V].Id.Type equal[==] constant[Tor]] begin[:]
for taget[name[ii]] in starred[call[name[range], parameter[constant[0], name[V].noccur]]] begin[:]
<ast.Tuple object at 0x7da20c7959c0> assign[=] tuple[[<ast.Subscript object at 0x7da20c795480>, <ast.Call object at 0x7da20c796290>]]
<ast.Tuple object at 0x7da20c795e10> assign[=] tuple[[<ast.BinOp object at 0x7da20c7960b0>, <ast.BinOp object at 0x7da20c794130>]]
call[name[ax].annotate, parameter[call[constant[{0}].format, parameter[name[ii]]]]]
if <ast.UnaryOp object at 0x7da20c794940> begin[:]
call[name[ax].legend, parameter[]]
if name[draw] begin[:]
call[name[ax].relim, parameter[]]
call[name[ax].autoscale_view, parameter[]]
call[name[ax].figure.canvas.draw, parameter[]]
return[name[ax]] | keyword[def] identifier[_Plot_HorProj_Ves] ( identifier[V] , identifier[ax] = keyword[None] , identifier[Elt] = literal[string] , identifier[Nstep] = identifier[_def] . identifier[TorNTheta] ,
identifier[Pdict] = identifier[_def] . identifier[TorPd] , identifier[Idict] = identifier[_def] . identifier[TorITord] ,
identifier[Bsdict] = identifier[_def] . identifier[TorBsTord] , identifier[Bvdict] = identifier[_def] . identifier[TorBvTord] ,
identifier[LegDict] = identifier[_def] . identifier[TorLegd] , identifier[indices] = keyword[False] ,
identifier[draw] = keyword[True] , identifier[fs] = keyword[None] , identifier[wintit] = identifier[_wintit] , identifier[Test] = keyword[True] ):
literal[string]
keyword[if] identifier[Test] :
keyword[assert] identifier[type] ( identifier[Nstep] ) keyword[is] identifier[int]
identifier[ax] , identifier[C0] , identifier[C1] , identifier[C2] = identifier[_check_Lax] ( identifier[ax] , identifier[n] = literal[int] )
keyword[assert] identifier[type] ( identifier[Pdict] ) keyword[is] identifier[dict] , literal[string]
keyword[assert] identifier[type] ( identifier[Idict] ) keyword[is] identifier[dict] , literal[string]
keyword[assert] identifier[type] ( identifier[LegDict] ) keyword[is] identifier[dict] keyword[or] identifier[LegDict] keyword[is] keyword[None] , literal[string]
keyword[if] identifier[ax] keyword[is] keyword[None] :
identifier[ax] = identifier[_def] . identifier[Plot_LOSProj_DefAxes] ( literal[string] , identifier[Type] = identifier[V] . identifier[Id] . identifier[Type] ,
identifier[fs] = identifier[fs] , identifier[wintit] = identifier[wintit] )
identifier[P1Min] = identifier[V] . identifier[dgeom] [ literal[string] ]
identifier[P1Max] = identifier[V] . identifier[dgeom] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[Elt] :
keyword[if] identifier[V] . identifier[_InOut] == literal[string] :
keyword[if] identifier[V] . identifier[Id] . identifier[Type] == literal[string] :
identifier[Theta] = identifier[np] . identifier[linspace] ( literal[int] , literal[int] * identifier[np] . identifier[pi] , identifier[num] = identifier[Nstep] ,
identifier[endpoint] = keyword[True] , identifier[retstep] = keyword[False] )
identifier[lx] = identifier[np] . identifier[concatenate] (( identifier[P1Min] [ literal[int] ]* identifier[np] . identifier[cos] ( identifier[Theta] ), identifier[np] . identifier[array] ([ identifier[np] . identifier[nan] ]),
identifier[P1Max] [ literal[int] ]* identifier[np] . identifier[cos] ( identifier[Theta] )))
identifier[ly] = identifier[np] . identifier[concatenate] (( identifier[P1Min] [ literal[int] ]* identifier[np] . identifier[sin] ( identifier[Theta] ), identifier[np] . identifier[array] ([ identifier[np] . identifier[nan] ]),
identifier[P1Max] [ literal[int] ]* identifier[np] . identifier[sin] ( identifier[Theta] )))
keyword[elif] identifier[V] . identifier[Id] . identifier[Type] == literal[string] :
identifier[lx] = identifier[np] . identifier[array] ([ identifier[V] . identifier[Lim] [ literal[int] , literal[int] ], identifier[V] . identifier[Lim] [ literal[int] , literal[int] ], identifier[V] . identifier[Lim] [ literal[int] , literal[int] ],
identifier[V] . identifier[Lim] [ literal[int] , literal[int] ], identifier[V] . identifier[Lim] [ literal[int] , literal[int] ]])
identifier[ly] = identifier[np] . identifier[array] ([ identifier[P1Min] [ literal[int] ], identifier[P1Min] [ literal[int] ], identifier[P1Max] [ literal[int] ], identifier[P1Max] [ literal[int] ], identifier[P1Min] [ literal[int] ]])
identifier[ax] . identifier[plot] ( identifier[lx] , identifier[ly] , identifier[label] = identifier[V] . identifier[Id] . identifier[NameLTX] ,** identifier[Pdict] )
keyword[elif] identifier[V] . identifier[_InOut] == literal[string] :
keyword[if] identifier[V] . identifier[Id] . identifier[Type] == literal[string] :
identifier[Theta] = identifier[np] . identifier[linspace] ( literal[int] , literal[int] * identifier[np] . identifier[pi] , identifier[num] = identifier[Nstep] ,
identifier[endpoint] = keyword[True] , identifier[retstep] = keyword[False] )
keyword[if] identifier[V] . identifier[noccur] == literal[int] :
identifier[lx] = identifier[np] . identifier[concatenate] (( identifier[P1Min] [ literal[int] ]* identifier[np] . identifier[cos] ( identifier[Theta] ),
identifier[P1Max] [ literal[int] ]* identifier[np] . identifier[cos] ( identifier[Theta] [::- literal[int] ])))
identifier[ly] = identifier[np] . identifier[concatenate] (( identifier[P1Min] [ literal[int] ]* identifier[np] . identifier[sin] ( identifier[Theta] ),
identifier[P1Max] [ literal[int] ]* identifier[np] . identifier[sin] ( identifier[Theta] [::- literal[int] ])))
identifier[Lp] =[ identifier[mPolygon] ( identifier[np] . identifier[array] ([ identifier[lx] , identifier[ly] ]). identifier[T] , identifier[closed] = keyword[True] ,
identifier[label] = identifier[V] . identifier[Id] . identifier[NameLTX] ,** identifier[Pdict] )]
keyword[else] :
identifier[Lp] =[ identifier[mWedge] (( literal[int] , literal[int] ), identifier[P1Max] [ literal[int] ],
identifier[V] . identifier[Lim] [ identifier[ii] ][ literal[int] ]* literal[int] / identifier[np] . identifier[pi] ,
identifier[V] . identifier[Lim] [ identifier[ii] ][ literal[int] ]* literal[int] / identifier[np] . identifier[pi] ,
identifier[width] = identifier[P1Max] [ literal[int] ]- identifier[P1Min] [ literal[int] ],
identifier[label] = identifier[V] . identifier[Id] . identifier[NameLTX] ,** identifier[Pdict] )
keyword[for] identifier[ii] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[V] . identifier[Lim] ))]
keyword[elif] identifier[V] . identifier[Id] . identifier[Type] == literal[string] :
identifier[ly] = identifier[np] . identifier[array] ([ identifier[P1Min] [ literal[int] ], identifier[P1Min] [ literal[int] ],
identifier[P1Max] [ literal[int] ], identifier[P1Max] [ literal[int] ], identifier[P1Min] [ literal[int] ]])
identifier[Lp] =[]
keyword[for] identifier[ii] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[V] . identifier[Lim] )):
identifier[lx] = identifier[np] . identifier[array] ([ identifier[V] . identifier[Lim] [ identifier[ii] ][ literal[int] ], identifier[V] . identifier[Lim] [ identifier[ii] ][ literal[int] ],
identifier[V] . identifier[Lim] [ identifier[ii] ][ literal[int] ], identifier[V] . identifier[Lim] [ identifier[ii] ][ literal[int] ],
identifier[V] . identifier[Lim] [ identifier[ii] ][ literal[int] ]])
identifier[Lp] . identifier[append] ( identifier[mPolygon] ( identifier[np] . identifier[array] ([ identifier[lx] , identifier[ly] ]). identifier[T] ,
identifier[closed] = keyword[True] , identifier[label] = identifier[V] . identifier[Id] . identifier[NameLTX] ,
** identifier[Pdict] ))
keyword[for] identifier[pp] keyword[in] identifier[Lp] :
identifier[ax] . identifier[add_patch] ( identifier[pp] )
keyword[else] :
identifier[msg] = literal[string]
keyword[raise] identifier[Exception] ( identifier[msg] )
keyword[if] literal[string] keyword[in] identifier[Elt] :
keyword[if] identifier[V] . identifier[Id] . identifier[Type] == literal[string] :
identifier[lx] = identifier[V] . identifier[dsino] [ literal[string] ][ literal[int] ]* identifier[np] . identifier[cos] ( identifier[Theta] )
identifier[ly] = identifier[V] . identifier[dsino] [ literal[string] ][ literal[int] ]* identifier[np] . identifier[sin] ( identifier[Theta] )
keyword[elif] identifier[V] . identifier[Id] . identifier[Type] == literal[string] :
identifier[lx] = identifier[np] . identifier[array] ([ identifier[np] . identifier[min] ( identifier[V] . identifier[Lim] ), identifier[np] . identifier[max] ( identifier[V] . identifier[Lim] )])
identifier[ly] = identifier[V] . identifier[dsino] [ literal[string] ][ literal[int] ]* identifier[np] . identifier[ones] (( literal[int] ,))
identifier[ax] . identifier[plot] ( identifier[lx] , identifier[ly] , identifier[label] = identifier[V] . identifier[Id] . identifier[NameLTX] + literal[string] ,** identifier[Idict] )
keyword[if] literal[string] keyword[in] identifier[Elt] :
keyword[if] identifier[V] . identifier[Id] . identifier[Type] == literal[string] :
identifier[lx] = identifier[V] . identifier[dgeom] [ literal[string] ][ literal[int] ]* identifier[np] . identifier[cos] ( identifier[Theta] )
identifier[ly] = identifier[V] . identifier[dgeom] [ literal[string] ][ literal[int] ]* identifier[np] . identifier[sin] ( identifier[Theta] )
keyword[elif] identifier[V] . identifier[Id] . identifier[Type] == literal[string] :
identifier[lx] = identifier[np] . identifier[array] ([ identifier[np] . identifier[min] ( identifier[V] . identifier[Lim] ), identifier[np] . identifier[max] ( identifier[V] . identifier[Lim] )])
identifier[ly] = identifier[V] . identifier[dgeom] [ literal[string] ][ literal[int] ]* identifier[np] . identifier[ones] (( literal[int] ,))
identifier[ax] . identifier[plot] ( identifier[lx] , identifier[ly] , identifier[label] = identifier[V] . identifier[Id] . identifier[NameLTX] + literal[string] ,** identifier[Bsdict] )
keyword[if] literal[string] keyword[in] identifier[Elt] keyword[and] identifier[V] . identifier[Type] == literal[string] :
identifier[lx] = identifier[V] . identifier[dgeom] [ literal[string] ][ literal[int] ]* identifier[np] . identifier[cos] ( identifier[Theta] )
identifier[ly] = identifier[V] . identifier[dgeom] [ literal[string] ][ literal[int] ]* identifier[np] . identifier[sin] ( identifier[Theta] )
identifier[ax] . identifier[plot] ( identifier[lx] , identifier[ly] , identifier[label] = identifier[V] . identifier[Id] . identifier[NameLTX] + literal[string] ,** identifier[Bvdict] )
keyword[if] identifier[indices] keyword[and] identifier[V] . identifier[noccur] > literal[int] :
keyword[if] identifier[V] . identifier[Id] . identifier[Type] == literal[string] :
keyword[for] identifier[ii] keyword[in] identifier[range] ( literal[int] , identifier[V] . identifier[noccur] ):
identifier[R] , identifier[theta] = identifier[V] . identifier[dgeom] [ literal[string] ][ literal[int] ], identifier[np] . identifier[mean] ( identifier[V] . identifier[Lim] [ identifier[ii] ])
identifier[X] , identifier[Y] = identifier[R] * identifier[np] . identifier[cos] ( identifier[theta] ), identifier[R] * identifier[np] . identifier[sin] ( identifier[theta] )
identifier[ax] . identifier[annotate] ( literal[string] . identifier[format] ( identifier[ii] ), identifier[size] = literal[int] ,
identifier[xy] =( identifier[X] , identifier[Y] ),
identifier[xytext] =( identifier[X] + literal[int] * identifier[np] . identifier[cos] ( identifier[theta] ),
identifier[Y] + literal[int] * identifier[np] . identifier[sin] ( identifier[theta] )),
identifier[horizontalalignment] = literal[string] ,
identifier[verticalalignment] = literal[string] )
keyword[elif] identifier[V] . identifier[Id] . identifier[Type] == literal[string] :
keyword[for] identifier[ii] keyword[in] identifier[range] ( literal[int] , identifier[V] . identifier[noccur] ):
identifier[X] , identifier[Y] = identifier[np] . identifier[mean] ( identifier[V] . identifier[Lim] [ identifier[ii] ]), identifier[V] . identifier[dgeom] [ literal[string] ][ literal[int] ]
identifier[ax] . identifier[annotate] ( literal[string] . identifier[format] ( identifier[ii] ), identifier[size] = literal[int] ,
identifier[xy] =( identifier[X] , identifier[Y] ),
identifier[xytext] =( identifier[X] , identifier[Y] + literal[int] ),
identifier[horizontalalignment] = literal[string] ,
identifier[verticalalignment] = literal[string] )
keyword[if] keyword[not] identifier[LegDict] keyword[is] keyword[None] :
identifier[ax] . identifier[legend] (** identifier[LegDict] )
keyword[if] identifier[draw] :
identifier[ax] . identifier[relim] ()
identifier[ax] . identifier[autoscale_view] ()
identifier[ax] . identifier[figure] . identifier[canvas] . identifier[draw] ()
keyword[return] identifier[ax] | def _Plot_HorProj_Ves(V, ax=None, Elt='PI', Nstep=_def.TorNTheta, Pdict=_def.TorPd, Idict=_def.TorITord, Bsdict=_def.TorBsTord, Bvdict=_def.TorBvTord, LegDict=_def.TorLegd, indices=False, draw=True, fs=None, wintit=_wintit, Test=True):
""" Plotting the toroidal projection of a Ves instance
Parameters
----------
V A Ves instance
Nstep An int (the number of points for evaluation of theta by np.linspace)
ax A plt.Axes instance (if given) on which to plot, otherwise ('None') a new figure/axes is created
Tdict A dictionnary specifying the style of the polygon plot
LegDict A dictionnary specifying the style of the legend box (if None => no legend)
Returns
-------
ax The plt.Axes instance on which the plot was performed
"""
if Test:
assert type(Nstep) is int
(ax, C0, C1, C2) = _check_Lax(ax, n=1)
assert type(Pdict) is dict, 'Arg Pdict should be a dictionary !'
assert type(Idict) is dict, 'Arg Idict should be a dictionary !'
assert type(LegDict) is dict or LegDict is None, 'Arg LegDict should be a dictionary !' # depends on [control=['if'], data=[]]
if ax is None:
ax = _def.Plot_LOSProj_DefAxes('Hor', Type=V.Id.Type, fs=fs, wintit=wintit) # depends on [control=['if'], data=['ax']]
P1Min = V.dgeom['P1Min']
P1Max = V.dgeom['P1Max']
if 'P' in Elt:
if V._InOut == 'in':
if V.Id.Type == 'Tor':
Theta = np.linspace(0, 2 * np.pi, num=Nstep, endpoint=True, retstep=False)
lx = np.concatenate((P1Min[0] * np.cos(Theta), np.array([np.nan]), P1Max[0] * np.cos(Theta)))
ly = np.concatenate((P1Min[0] * np.sin(Theta), np.array([np.nan]), P1Max[0] * np.sin(Theta))) # depends on [control=['if'], data=[]]
elif V.Id.Type == 'Lin':
lx = np.array([V.Lim[0, 0], V.Lim[0, 1], V.Lim[0, 1], V.Lim[0, 0], V.Lim[0, 0]])
ly = np.array([P1Min[0], P1Min[0], P1Max[0], P1Max[0], P1Min[0]]) # depends on [control=['if'], data=[]]
ax.plot(lx, ly, label=V.Id.NameLTX, **Pdict) # depends on [control=['if'], data=[]]
elif V._InOut == 'out':
if V.Id.Type == 'Tor':
Theta = np.linspace(0, 2 * np.pi, num=Nstep, endpoint=True, retstep=False)
if V.noccur == 0:
lx = np.concatenate((P1Min[0] * np.cos(Theta), P1Max[0] * np.cos(Theta[::-1])))
ly = np.concatenate((P1Min[0] * np.sin(Theta), P1Max[0] * np.sin(Theta[::-1])))
Lp = [mPolygon(np.array([lx, ly]).T, closed=True, label=V.Id.NameLTX, **Pdict)] # depends on [control=['if'], data=[]]
else:
Lp = [mWedge((0, 0), P1Max[0], V.Lim[ii][0] * 180.0 / np.pi, V.Lim[ii][1] * 180.0 / np.pi, width=P1Max[0] - P1Min[0], label=V.Id.NameLTX, **Pdict) for ii in range(0, len(V.Lim))] # depends on [control=['if'], data=[]]
elif V.Id.Type == 'Lin':
ly = np.array([P1Min[0], P1Min[0], P1Max[0], P1Max[0], P1Min[0]])
Lp = []
for ii in range(0, len(V.Lim)):
lx = np.array([V.Lim[ii][0], V.Lim[ii][1], V.Lim[ii][1], V.Lim[ii][0], V.Lim[ii][0]])
Lp.append(mPolygon(np.array([lx, ly]).T, closed=True, label=V.Id.NameLTX, **Pdict)) # depends on [control=['for'], data=['ii']] # depends on [control=['if'], data=[]]
for pp in Lp:
ax.add_patch(pp) # depends on [control=['for'], data=['pp']] # depends on [control=['if'], data=[]]
else:
msg = 'Unknown self._InOut !'
raise Exception(msg) # depends on [control=['if'], data=[]]
if 'I' in Elt:
if V.Id.Type == 'Tor':
lx = V.dsino['RefPt'][0] * np.cos(Theta)
ly = V.dsino['RefPt'][0] * np.sin(Theta) # depends on [control=['if'], data=[]]
elif V.Id.Type == 'Lin':
lx = np.array([np.min(V.Lim), np.max(V.Lim)])
ly = V.dsino['RefPt'][0] * np.ones((2,)) # depends on [control=['if'], data=[]]
ax.plot(lx, ly, label=V.Id.NameLTX + ' Imp', **Idict) # depends on [control=['if'], data=[]]
if 'Bs' in Elt:
if V.Id.Type == 'Tor':
lx = V.dgeom['BaryS'][0] * np.cos(Theta)
ly = V.dgeom['BaryS'][0] * np.sin(Theta) # depends on [control=['if'], data=[]]
elif V.Id.Type == 'Lin':
lx = np.array([np.min(V.Lim), np.max(V.Lim)])
ly = V.dgeom['BaryS'][0] * np.ones((2,)) # depends on [control=['if'], data=[]]
ax.plot(lx, ly, label=V.Id.NameLTX + ' Bs', **Bsdict) # depends on [control=['if'], data=[]]
if 'Bv' in Elt and V.Type == 'Tor':
lx = V.dgeom['BaryV'][0] * np.cos(Theta)
ly = V.dgeom['BaryV'][0] * np.sin(Theta)
ax.plot(lx, ly, label=V.Id.NameLTX + ' Bv', **Bvdict) # depends on [control=['if'], data=[]]
if indices and V.noccur > 1:
if V.Id.Type == 'Tor':
for ii in range(0, V.noccur):
(R, theta) = (V.dgeom['P1Max'][0], np.mean(V.Lim[ii]))
(X, Y) = (R * np.cos(theta), R * np.sin(theta))
ax.annotate('{0}'.format(ii), size=10, xy=(X, Y), xytext=(X + 0.02 * np.cos(theta), Y + 0.02 * np.sin(theta)), horizontalalignment='center', verticalalignment='center') # depends on [control=['for'], data=['ii']] # depends on [control=['if'], data=[]]
elif V.Id.Type == 'Lin':
for ii in range(0, V.noccur):
(X, Y) = (np.mean(V.Lim[ii]), V.dgeom['P1Max'][0])
ax.annotate('{0}'.format(ii), size=10, xy=(X, Y), xytext=(X, Y + 0.02), horizontalalignment='center', verticalalignment='center') # depends on [control=['for'], data=['ii']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not LegDict is None:
ax.legend(**LegDict) # depends on [control=['if'], data=[]]
if draw:
ax.relim()
ax.autoscale_view()
ax.figure.canvas.draw() # depends on [control=['if'], data=[]]
return ax |
def autoencoder_autoregressive():
"""Autoregressive autoencoder model."""
hparams = autoencoder_basic()
hparams.add_hparam("autoregressive_forget_base", False)
hparams.add_hparam("autoregressive_mode", "none")
hparams.add_hparam("autoregressive_decode_steps", 0)
hparams.add_hparam("autoregressive_eval_pure_autoencoder", False)
hparams.add_hparam("autoregressive_gumbel_sample", False)
return hparams | def function[autoencoder_autoregressive, parameter[]]:
constant[Autoregressive autoencoder model.]
variable[hparams] assign[=] call[name[autoencoder_basic], parameter[]]
call[name[hparams].add_hparam, parameter[constant[autoregressive_forget_base], constant[False]]]
call[name[hparams].add_hparam, parameter[constant[autoregressive_mode], constant[none]]]
call[name[hparams].add_hparam, parameter[constant[autoregressive_decode_steps], constant[0]]]
call[name[hparams].add_hparam, parameter[constant[autoregressive_eval_pure_autoencoder], constant[False]]]
call[name[hparams].add_hparam, parameter[constant[autoregressive_gumbel_sample], constant[False]]]
return[name[hparams]] | keyword[def] identifier[autoencoder_autoregressive] ():
literal[string]
identifier[hparams] = identifier[autoencoder_basic] ()
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[False] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[False] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[False] )
keyword[return] identifier[hparams] | def autoencoder_autoregressive():
"""Autoregressive autoencoder model."""
hparams = autoencoder_basic()
hparams.add_hparam('autoregressive_forget_base', False)
hparams.add_hparam('autoregressive_mode', 'none')
hparams.add_hparam('autoregressive_decode_steps', 0)
hparams.add_hparam('autoregressive_eval_pure_autoencoder', False)
hparams.add_hparam('autoregressive_gumbel_sample', False)
return hparams |
def newton_refine2(s_vals, curve1, curve2):
"""Image for :func:`.newton_refine` docstring."""
if NO_IMAGES:
return
ax = curve1.plot(256)
ax.lines[-1].zorder = 1
curve2.plot(256, ax=ax)
ax.lines[-1].zorder = 1
points = curve1.evaluate_multi(np.asfortranarray(s_vals))
colors = seaborn.dark_palette("blue", 5)
ax.scatter(
points[0, :], points[1, :], c=colors, s=20, alpha=0.75, zorder=2
)
ax.axis("scaled")
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 1.0)
save_image(ax.figure, "newton_refine2.png") | def function[newton_refine2, parameter[s_vals, curve1, curve2]]:
constant[Image for :func:`.newton_refine` docstring.]
if name[NO_IMAGES] begin[:]
return[None]
variable[ax] assign[=] call[name[curve1].plot, parameter[constant[256]]]
call[name[ax].lines][<ast.UnaryOp object at 0x7da2054a6710>].zorder assign[=] constant[1]
call[name[curve2].plot, parameter[constant[256]]]
call[name[ax].lines][<ast.UnaryOp object at 0x7da2054a7970>].zorder assign[=] constant[1]
variable[points] assign[=] call[name[curve1].evaluate_multi, parameter[call[name[np].asfortranarray, parameter[name[s_vals]]]]]
variable[colors] assign[=] call[name[seaborn].dark_palette, parameter[constant[blue], constant[5]]]
call[name[ax].scatter, parameter[call[name[points]][tuple[[<ast.Constant object at 0x7da2054a70a0>, <ast.Slice object at 0x7da2054a69b0>]]], call[name[points]][tuple[[<ast.Constant object at 0x7da2054a7520>, <ast.Slice object at 0x7da2054a5720>]]]]]
call[name[ax].axis, parameter[constant[scaled]]]
call[name[ax].set_xlim, parameter[constant[0.0], constant[1.0]]]
call[name[ax].set_ylim, parameter[constant[0.0], constant[1.0]]]
call[name[save_image], parameter[name[ax].figure, constant[newton_refine2.png]]] | keyword[def] identifier[newton_refine2] ( identifier[s_vals] , identifier[curve1] , identifier[curve2] ):
literal[string]
keyword[if] identifier[NO_IMAGES] :
keyword[return]
identifier[ax] = identifier[curve1] . identifier[plot] ( literal[int] )
identifier[ax] . identifier[lines] [- literal[int] ]. identifier[zorder] = literal[int]
identifier[curve2] . identifier[plot] ( literal[int] , identifier[ax] = identifier[ax] )
identifier[ax] . identifier[lines] [- literal[int] ]. identifier[zorder] = literal[int]
identifier[points] = identifier[curve1] . identifier[evaluate_multi] ( identifier[np] . identifier[asfortranarray] ( identifier[s_vals] ))
identifier[colors] = identifier[seaborn] . identifier[dark_palette] ( literal[string] , literal[int] )
identifier[ax] . identifier[scatter] (
identifier[points] [ literal[int] ,:], identifier[points] [ literal[int] ,:], identifier[c] = identifier[colors] , identifier[s] = literal[int] , identifier[alpha] = literal[int] , identifier[zorder] = literal[int]
)
identifier[ax] . identifier[axis] ( literal[string] )
identifier[ax] . identifier[set_xlim] ( literal[int] , literal[int] )
identifier[ax] . identifier[set_ylim] ( literal[int] , literal[int] )
identifier[save_image] ( identifier[ax] . identifier[figure] , literal[string] ) | def newton_refine2(s_vals, curve1, curve2):
"""Image for :func:`.newton_refine` docstring."""
if NO_IMAGES:
return # depends on [control=['if'], data=[]]
ax = curve1.plot(256)
ax.lines[-1].zorder = 1
curve2.plot(256, ax=ax)
ax.lines[-1].zorder = 1
points = curve1.evaluate_multi(np.asfortranarray(s_vals))
colors = seaborn.dark_palette('blue', 5)
ax.scatter(points[0, :], points[1, :], c=colors, s=20, alpha=0.75, zorder=2)
ax.axis('scaled')
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 1.0)
save_image(ax.figure, 'newton_refine2.png') |
def read_stats(self, *stats):
""" Read stream statistics from chassis.
:param stats: list of requested statistics to read, if empty - read all statistics.
"""
from ixexplorer.ixe_stream import IxePacketGroupStream
sleep_time = 0.1 # in cases we only want few counters but very fast we need a smaller sleep time
if not stats:
stats = [m.attrname for m in IxePgStats.__tcl_members__ if m.flags & FLAG_RDONLY]
sleep_time = 1
# Read twice to refresh rate statistics.
for port in self.tx_ports_streams:
port.api.call_rc('streamTransmitStats get {} 1 4096'.format(port.uri))
for rx_port in self.rx_ports:
rx_port.api.call_rc('packetGroupStats get {} 0 65536'.format(rx_port.uri))
time.sleep(sleep_time)
self.statistics = OrderedDict()
for tx_port, streams in self.tx_ports_streams.items():
for stream in streams:
stream_stats = OrderedDict()
tx_port.api.call_rc('streamTransmitStats get {} 1 4096'.format(tx_port.uri))
stream_tx_stats = IxeStreamTxStats(tx_port, stream.index)
stream_stats_tx = {c: v for c, v in stream_tx_stats.get_attributes(FLAG_RDONLY).items()}
stream_stats['tx'] = stream_stats_tx
stream_stat_pgid = IxePacketGroupStream(stream).groupId
stream_stats_pg = pg_stats_dict()
for port in self.session.ports.values():
stream_stats_pg[str(port)] = OrderedDict(zip(stats, [-1] * len(stats)))
for rx_port in self.rx_ports:
if not stream.rx_ports or rx_port in stream.rx_ports:
rx_port.api.call_rc('packetGroupStats get {} 0 65536'.format(rx_port.uri))
pg_stats = IxePgStats(rx_port, stream_stat_pgid)
stream_stats_pg[str(rx_port)] = pg_stats.read_stats(*stats)
stream_stats['rx'] = stream_stats_pg
self.statistics[str(stream)] = stream_stats
return self.statistics | def function[read_stats, parameter[self]]:
constant[ Read stream statistics from chassis.
:param stats: list of requested statistics to read, if empty - read all statistics.
]
from relative_module[ixexplorer.ixe_stream] import module[IxePacketGroupStream]
variable[sleep_time] assign[=] constant[0.1]
if <ast.UnaryOp object at 0x7da1b0ca7e20> begin[:]
variable[stats] assign[=] <ast.ListComp object at 0x7da1b0ca7340>
variable[sleep_time] assign[=] constant[1]
for taget[name[port]] in starred[name[self].tx_ports_streams] begin[:]
call[name[port].api.call_rc, parameter[call[constant[streamTransmitStats get {} 1 4096].format, parameter[name[port].uri]]]]
for taget[name[rx_port]] in starred[name[self].rx_ports] begin[:]
call[name[rx_port].api.call_rc, parameter[call[constant[packetGroupStats get {} 0 65536].format, parameter[name[rx_port].uri]]]]
call[name[time].sleep, parameter[name[sleep_time]]]
name[self].statistics assign[=] call[name[OrderedDict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0ca5510>, <ast.Name object at 0x7da1b0ca6bf0>]]] in starred[call[name[self].tx_ports_streams.items, parameter[]]] begin[:]
for taget[name[stream]] in starred[name[streams]] begin[:]
variable[stream_stats] assign[=] call[name[OrderedDict], parameter[]]
call[name[tx_port].api.call_rc, parameter[call[constant[streamTransmitStats get {} 1 4096].format, parameter[name[tx_port].uri]]]]
variable[stream_tx_stats] assign[=] call[name[IxeStreamTxStats], parameter[name[tx_port], name[stream].index]]
variable[stream_stats_tx] assign[=] <ast.DictComp object at 0x7da1b0ca68f0>
call[name[stream_stats]][constant[tx]] assign[=] name[stream_stats_tx]
variable[stream_stat_pgid] assign[=] call[name[IxePacketGroupStream], parameter[name[stream]]].groupId
variable[stream_stats_pg] assign[=] call[name[pg_stats_dict], parameter[]]
for taget[name[port]] in starred[call[name[self].session.ports.values, parameter[]]] begin[:]
call[name[stream_stats_pg]][call[name[str], parameter[name[port]]]] assign[=] call[name[OrderedDict], parameter[call[name[zip], parameter[name[stats], binary_operation[list[[<ast.UnaryOp object at 0x7da1b0ca5ab0>]] * call[name[len], parameter[name[stats]]]]]]]]
for taget[name[rx_port]] in starred[name[self].rx_ports] begin[:]
if <ast.BoolOp object at 0x7da1b0ca5cf0> begin[:]
call[name[rx_port].api.call_rc, parameter[call[constant[packetGroupStats get {} 0 65536].format, parameter[name[rx_port].uri]]]]
variable[pg_stats] assign[=] call[name[IxePgStats], parameter[name[rx_port], name[stream_stat_pgid]]]
call[name[stream_stats_pg]][call[name[str], parameter[name[rx_port]]]] assign[=] call[name[pg_stats].read_stats, parameter[<ast.Starred object at 0x7da1b0ca7bb0>]]
call[name[stream_stats]][constant[rx]] assign[=] name[stream_stats_pg]
call[name[self].statistics][call[name[str], parameter[name[stream]]]] assign[=] name[stream_stats]
return[name[self].statistics] | keyword[def] identifier[read_stats] ( identifier[self] ,* identifier[stats] ):
literal[string]
keyword[from] identifier[ixexplorer] . identifier[ixe_stream] keyword[import] identifier[IxePacketGroupStream]
identifier[sleep_time] = literal[int]
keyword[if] keyword[not] identifier[stats] :
identifier[stats] =[ identifier[m] . identifier[attrname] keyword[for] identifier[m] keyword[in] identifier[IxePgStats] . identifier[__tcl_members__] keyword[if] identifier[m] . identifier[flags] & identifier[FLAG_RDONLY] ]
identifier[sleep_time] = literal[int]
keyword[for] identifier[port] keyword[in] identifier[self] . identifier[tx_ports_streams] :
identifier[port] . identifier[api] . identifier[call_rc] ( literal[string] . identifier[format] ( identifier[port] . identifier[uri] ))
keyword[for] identifier[rx_port] keyword[in] identifier[self] . identifier[rx_ports] :
identifier[rx_port] . identifier[api] . identifier[call_rc] ( literal[string] . identifier[format] ( identifier[rx_port] . identifier[uri] ))
identifier[time] . identifier[sleep] ( identifier[sleep_time] )
identifier[self] . identifier[statistics] = identifier[OrderedDict] ()
keyword[for] identifier[tx_port] , identifier[streams] keyword[in] identifier[self] . identifier[tx_ports_streams] . identifier[items] ():
keyword[for] identifier[stream] keyword[in] identifier[streams] :
identifier[stream_stats] = identifier[OrderedDict] ()
identifier[tx_port] . identifier[api] . identifier[call_rc] ( literal[string] . identifier[format] ( identifier[tx_port] . identifier[uri] ))
identifier[stream_tx_stats] = identifier[IxeStreamTxStats] ( identifier[tx_port] , identifier[stream] . identifier[index] )
identifier[stream_stats_tx] ={ identifier[c] : identifier[v] keyword[for] identifier[c] , identifier[v] keyword[in] identifier[stream_tx_stats] . identifier[get_attributes] ( identifier[FLAG_RDONLY] ). identifier[items] ()}
identifier[stream_stats] [ literal[string] ]= identifier[stream_stats_tx]
identifier[stream_stat_pgid] = identifier[IxePacketGroupStream] ( identifier[stream] ). identifier[groupId]
identifier[stream_stats_pg] = identifier[pg_stats_dict] ()
keyword[for] identifier[port] keyword[in] identifier[self] . identifier[session] . identifier[ports] . identifier[values] ():
identifier[stream_stats_pg] [ identifier[str] ( identifier[port] )]= identifier[OrderedDict] ( identifier[zip] ( identifier[stats] ,[- literal[int] ]* identifier[len] ( identifier[stats] )))
keyword[for] identifier[rx_port] keyword[in] identifier[self] . identifier[rx_ports] :
keyword[if] keyword[not] identifier[stream] . identifier[rx_ports] keyword[or] identifier[rx_port] keyword[in] identifier[stream] . identifier[rx_ports] :
identifier[rx_port] . identifier[api] . identifier[call_rc] ( literal[string] . identifier[format] ( identifier[rx_port] . identifier[uri] ))
identifier[pg_stats] = identifier[IxePgStats] ( identifier[rx_port] , identifier[stream_stat_pgid] )
identifier[stream_stats_pg] [ identifier[str] ( identifier[rx_port] )]= identifier[pg_stats] . identifier[read_stats] (* identifier[stats] )
identifier[stream_stats] [ literal[string] ]= identifier[stream_stats_pg]
identifier[self] . identifier[statistics] [ identifier[str] ( identifier[stream] )]= identifier[stream_stats]
keyword[return] identifier[self] . identifier[statistics] | def read_stats(self, *stats):
""" Read stream statistics from chassis.
:param stats: list of requested statistics to read, if empty - read all statistics.
"""
from ixexplorer.ixe_stream import IxePacketGroupStream
sleep_time = 0.1 # in cases we only want few counters but very fast we need a smaller sleep time
if not stats:
stats = [m.attrname for m in IxePgStats.__tcl_members__ if m.flags & FLAG_RDONLY]
sleep_time = 1 # depends on [control=['if'], data=[]]
# Read twice to refresh rate statistics.
for port in self.tx_ports_streams:
port.api.call_rc('streamTransmitStats get {} 1 4096'.format(port.uri)) # depends on [control=['for'], data=['port']]
for rx_port in self.rx_ports:
rx_port.api.call_rc('packetGroupStats get {} 0 65536'.format(rx_port.uri)) # depends on [control=['for'], data=['rx_port']]
time.sleep(sleep_time)
self.statistics = OrderedDict()
for (tx_port, streams) in self.tx_ports_streams.items():
for stream in streams:
stream_stats = OrderedDict()
tx_port.api.call_rc('streamTransmitStats get {} 1 4096'.format(tx_port.uri))
stream_tx_stats = IxeStreamTxStats(tx_port, stream.index)
stream_stats_tx = {c: v for (c, v) in stream_tx_stats.get_attributes(FLAG_RDONLY).items()}
stream_stats['tx'] = stream_stats_tx
stream_stat_pgid = IxePacketGroupStream(stream).groupId
stream_stats_pg = pg_stats_dict()
for port in self.session.ports.values():
stream_stats_pg[str(port)] = OrderedDict(zip(stats, [-1] * len(stats))) # depends on [control=['for'], data=['port']]
for rx_port in self.rx_ports:
if not stream.rx_ports or rx_port in stream.rx_ports:
rx_port.api.call_rc('packetGroupStats get {} 0 65536'.format(rx_port.uri))
pg_stats = IxePgStats(rx_port, stream_stat_pgid)
stream_stats_pg[str(rx_port)] = pg_stats.read_stats(*stats) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rx_port']]
stream_stats['rx'] = stream_stats_pg
self.statistics[str(stream)] = stream_stats # depends on [control=['for'], data=['stream']] # depends on [control=['for'], data=[]]
return self.statistics |
def edit(self, data_src, value):
"""
Edit data layer.
:param data_src: Name of :class:`DataSource` to edit.
:type data_src: str
:param value: Values to edit.
:type value: dict
"""
# check if opening file
if 'filename' in value:
items = [k for k, v in self.reg.data_source.iteritems() if
v == data_src]
self.reg.unregister(items) # remove items from Registry
# open file and register new data
self.open(data_src, value['filename'], value.get('path'))
self.layer[data_src].update(value) | def function[edit, parameter[self, data_src, value]]:
constant[
Edit data layer.
:param data_src: Name of :class:`DataSource` to edit.
:type data_src: str
:param value: Values to edit.
:type value: dict
]
if compare[constant[filename] in name[value]] begin[:]
variable[items] assign[=] <ast.ListComp object at 0x7da18f722560>
call[name[self].reg.unregister, parameter[name[items]]]
call[name[self].open, parameter[name[data_src], call[name[value]][constant[filename]], call[name[value].get, parameter[constant[path]]]]]
call[call[name[self].layer][name[data_src]].update, parameter[name[value]]] | keyword[def] identifier[edit] ( identifier[self] , identifier[data_src] , identifier[value] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[value] :
identifier[items] =[ identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[reg] . identifier[data_source] . identifier[iteritems] () keyword[if]
identifier[v] == identifier[data_src] ]
identifier[self] . identifier[reg] . identifier[unregister] ( identifier[items] )
identifier[self] . identifier[open] ( identifier[data_src] , identifier[value] [ literal[string] ], identifier[value] . identifier[get] ( literal[string] ))
identifier[self] . identifier[layer] [ identifier[data_src] ]. identifier[update] ( identifier[value] ) | def edit(self, data_src, value):
"""
Edit data layer.
:param data_src: Name of :class:`DataSource` to edit.
:type data_src: str
:param value: Values to edit.
:type value: dict
"""
# check if opening file
if 'filename' in value:
items = [k for (k, v) in self.reg.data_source.iteritems() if v == data_src]
self.reg.unregister(items) # remove items from Registry
# open file and register new data
self.open(data_src, value['filename'], value.get('path'))
self.layer[data_src].update(value) # depends on [control=['if'], data=['value']] |
def print_object_size_tree(obj, lbl='obj', maxdepth=None):
""" Needs work """
from utool import util_str
from utool import util_type
byte_str2 = util_str.byte_str2
def _get_object_size_tree(obj, depth=0, lbl='obj', seen=None):
indent = ' ' * (depth * 4)
if maxdepth is not None and depth >= maxdepth:
size_list = [get_object_nbytes(obj)]
print(indent + str(size_list[0]))
return size_list
if (obj is None or isinstance(obj, (int, bool, float))):
return [sys.getsizeof(obj)]
elif isinstance(obj, six.string_types):
return [sys.getsizeof(obj)]
object_id = id(obj)
if object_id in seen:
print(indent + '%s ' % ('(seen) ' + lbl,))
return []
seen.add(object_id)
size_list = [(lbl, sys.getsizeof(obj))]
if isinstance(obj, np.ndarray):
size_list.append(obj.nbytes)
print('%s%s = %s ' % (indent, '(ndarray) %s' % (lbl,), byte_str2(obj.nbytes)))
elif (isinstance(obj, (tuple, list, set, frozenset))):
typestr = util_type.type_str(type(obj))
print('%s(%s) %s = %s ' % (indent, typestr, lbl, byte_str2(sys.getsizeof(obj))))
for item in obj:
size_list += _get_object_size_tree(item, depth + 1, 'item', seen)
elif isinstance(obj, dict):
print('%s(dict) %s = %s ' % (indent, lbl, byte_str2(sys.getsizeof(obj))))
try:
for key, val in six.iteritems(obj):
size_list += _get_object_size_tree(key, depth + 1, key, seen)
size_list += _get_object_size_tree(val, depth + 1, key, seen)
except RuntimeError as dictex:
ut.printex(dictex, 'RuntimeError in parsing dict nbytes',
keys=['key', (type, 'obj')], iswarning=True)
raise
elif isinstance(obj, object) and hasattr(obj, '__dict__'):
if hasattr(obj, 'used_memory'):
size_ = obj.used_memory()
print('(%sflann?) %s = %s ' % (indent, lbl, byte_str2(size_)))
size_list += [size_]
else:
print('%s(object) %s = %s ' % (indent, lbl, byte_str2(sys.getsizeof(obj))))
size_list += _get_object_size_tree(obj.__dict__,
depth=depth + 1,
lbl='__dict__', seen=seen)
return size_list
seen = set([])
_get_object_size_tree(obj, depth=0, lbl=lbl, seen=seen)
del seen | def function[print_object_size_tree, parameter[obj, lbl, maxdepth]]:
constant[ Needs work ]
from relative_module[utool] import module[util_str]
from relative_module[utool] import module[util_type]
variable[byte_str2] assign[=] name[util_str].byte_str2
def function[_get_object_size_tree, parameter[obj, depth, lbl, seen]]:
variable[indent] assign[=] binary_operation[constant[ ] * binary_operation[name[depth] * constant[4]]]
if <ast.BoolOp object at 0x7da1b2381600> begin[:]
variable[size_list] assign[=] list[[<ast.Call object at 0x7da1b2381120>]]
call[name[print], parameter[binary_operation[name[indent] + call[name[str], parameter[call[name[size_list]][constant[0]]]]]]]
return[name[size_list]]
if <ast.BoolOp object at 0x7da1b24b1390> begin[:]
return[list[[<ast.Call object at 0x7da1b24b2110>]]]
variable[object_id] assign[=] call[name[id], parameter[name[obj]]]
if compare[name[object_id] in name[seen]] begin[:]
call[name[print], parameter[binary_operation[name[indent] + binary_operation[constant[%s ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da1b24b0250>]]]]]]
return[list[[]]]
call[name[seen].add, parameter[name[object_id]]]
variable[size_list] assign[=] list[[<ast.Tuple object at 0x7da1b24b2080>]]
if call[name[isinstance], parameter[name[obj], name[np].ndarray]] begin[:]
call[name[size_list].append, parameter[name[obj].nbytes]]
call[name[print], parameter[binary_operation[constant[%s%s = %s ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b24b0a30>, <ast.BinOp object at 0x7da1b24b1510>, <ast.Call object at 0x7da1b24b0850>]]]]]
return[name[size_list]]
variable[seen] assign[=] call[name[set], parameter[list[[]]]]
call[name[_get_object_size_tree], parameter[name[obj]]]
<ast.Delete object at 0x7da1b24b16c0> | keyword[def] identifier[print_object_size_tree] ( identifier[obj] , identifier[lbl] = literal[string] , identifier[maxdepth] = keyword[None] ):
literal[string]
keyword[from] identifier[utool] keyword[import] identifier[util_str]
keyword[from] identifier[utool] keyword[import] identifier[util_type]
identifier[byte_str2] = identifier[util_str] . identifier[byte_str2]
keyword[def] identifier[_get_object_size_tree] ( identifier[obj] , identifier[depth] = literal[int] , identifier[lbl] = literal[string] , identifier[seen] = keyword[None] ):
identifier[indent] = literal[string] *( identifier[depth] * literal[int] )
keyword[if] identifier[maxdepth] keyword[is] keyword[not] keyword[None] keyword[and] identifier[depth] >= identifier[maxdepth] :
identifier[size_list] =[ identifier[get_object_nbytes] ( identifier[obj] )]
identifier[print] ( identifier[indent] + identifier[str] ( identifier[size_list] [ literal[int] ]))
keyword[return] identifier[size_list]
keyword[if] ( identifier[obj] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[obj] ,( identifier[int] , identifier[bool] , identifier[float] ))):
keyword[return] [ identifier[sys] . identifier[getsizeof] ( identifier[obj] )]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[six] . identifier[string_types] ):
keyword[return] [ identifier[sys] . identifier[getsizeof] ( identifier[obj] )]
identifier[object_id] = identifier[id] ( identifier[obj] )
keyword[if] identifier[object_id] keyword[in] identifier[seen] :
identifier[print] ( identifier[indent] + literal[string] %( literal[string] + identifier[lbl] ,))
keyword[return] []
identifier[seen] . identifier[add] ( identifier[object_id] )
identifier[size_list] =[( identifier[lbl] , identifier[sys] . identifier[getsizeof] ( identifier[obj] ))]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[np] . identifier[ndarray] ):
identifier[size_list] . identifier[append] ( identifier[obj] . identifier[nbytes] )
identifier[print] ( literal[string] %( identifier[indent] , literal[string] %( identifier[lbl] ,), identifier[byte_str2] ( identifier[obj] . identifier[nbytes] )))
keyword[elif] ( identifier[isinstance] ( identifier[obj] ,( identifier[tuple] , identifier[list] , identifier[set] , identifier[frozenset] ))):
identifier[typestr] = identifier[util_type] . identifier[type_str] ( identifier[type] ( identifier[obj] ))
identifier[print] ( literal[string] %( identifier[indent] , identifier[typestr] , identifier[lbl] , identifier[byte_str2] ( identifier[sys] . identifier[getsizeof] ( identifier[obj] ))))
keyword[for] identifier[item] keyword[in] identifier[obj] :
identifier[size_list] += identifier[_get_object_size_tree] ( identifier[item] , identifier[depth] + literal[int] , literal[string] , identifier[seen] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[dict] ):
identifier[print] ( literal[string] %( identifier[indent] , identifier[lbl] , identifier[byte_str2] ( identifier[sys] . identifier[getsizeof] ( identifier[obj] ))))
keyword[try] :
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[six] . identifier[iteritems] ( identifier[obj] ):
identifier[size_list] += identifier[_get_object_size_tree] ( identifier[key] , identifier[depth] + literal[int] , identifier[key] , identifier[seen] )
identifier[size_list] += identifier[_get_object_size_tree] ( identifier[val] , identifier[depth] + literal[int] , identifier[key] , identifier[seen] )
keyword[except] identifier[RuntimeError] keyword[as] identifier[dictex] :
identifier[ut] . identifier[printex] ( identifier[dictex] , literal[string] ,
identifier[keys] =[ literal[string] ,( identifier[type] , literal[string] )], identifier[iswarning] = keyword[True] )
keyword[raise]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[object] ) keyword[and] identifier[hasattr] ( identifier[obj] , literal[string] ):
keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ):
identifier[size_] = identifier[obj] . identifier[used_memory] ()
identifier[print] ( literal[string] %( identifier[indent] , identifier[lbl] , identifier[byte_str2] ( identifier[size_] )))
identifier[size_list] +=[ identifier[size_] ]
keyword[else] :
identifier[print] ( literal[string] %( identifier[indent] , identifier[lbl] , identifier[byte_str2] ( identifier[sys] . identifier[getsizeof] ( identifier[obj] ))))
identifier[size_list] += identifier[_get_object_size_tree] ( identifier[obj] . identifier[__dict__] ,
identifier[depth] = identifier[depth] + literal[int] ,
identifier[lbl] = literal[string] , identifier[seen] = identifier[seen] )
keyword[return] identifier[size_list]
identifier[seen] = identifier[set] ([])
identifier[_get_object_size_tree] ( identifier[obj] , identifier[depth] = literal[int] , identifier[lbl] = identifier[lbl] , identifier[seen] = identifier[seen] )
keyword[del] identifier[seen] | def print_object_size_tree(obj, lbl='obj', maxdepth=None):
""" Needs work """
from utool import util_str
from utool import util_type
byte_str2 = util_str.byte_str2
def _get_object_size_tree(obj, depth=0, lbl='obj', seen=None):
indent = ' ' * (depth * 4)
if maxdepth is not None and depth >= maxdepth:
size_list = [get_object_nbytes(obj)]
print(indent + str(size_list[0]))
return size_list # depends on [control=['if'], data=[]]
if obj is None or isinstance(obj, (int, bool, float)):
return [sys.getsizeof(obj)] # depends on [control=['if'], data=[]]
elif isinstance(obj, six.string_types):
return [sys.getsizeof(obj)] # depends on [control=['if'], data=[]]
object_id = id(obj)
if object_id in seen:
print(indent + '%s ' % ('(seen) ' + lbl,))
return [] # depends on [control=['if'], data=[]]
seen.add(object_id)
size_list = [(lbl, sys.getsizeof(obj))]
if isinstance(obj, np.ndarray):
size_list.append(obj.nbytes)
print('%s%s = %s ' % (indent, '(ndarray) %s' % (lbl,), byte_str2(obj.nbytes))) # depends on [control=['if'], data=[]]
elif isinstance(obj, (tuple, list, set, frozenset)):
typestr = util_type.type_str(type(obj))
print('%s(%s) %s = %s ' % (indent, typestr, lbl, byte_str2(sys.getsizeof(obj))))
for item in obj:
size_list += _get_object_size_tree(item, depth + 1, 'item', seen) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
elif isinstance(obj, dict):
print('%s(dict) %s = %s ' % (indent, lbl, byte_str2(sys.getsizeof(obj))))
try:
for (key, val) in six.iteritems(obj):
size_list += _get_object_size_tree(key, depth + 1, key, seen)
size_list += _get_object_size_tree(val, depth + 1, key, seen) # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except RuntimeError as dictex:
ut.printex(dictex, 'RuntimeError in parsing dict nbytes', keys=['key', (type, 'obj')], iswarning=True)
raise # depends on [control=['except'], data=['dictex']] # depends on [control=['if'], data=[]]
elif isinstance(obj, object) and hasattr(obj, '__dict__'):
if hasattr(obj, 'used_memory'):
size_ = obj.used_memory()
print('(%sflann?) %s = %s ' % (indent, lbl, byte_str2(size_)))
size_list += [size_] # depends on [control=['if'], data=[]]
else:
print('%s(object) %s = %s ' % (indent, lbl, byte_str2(sys.getsizeof(obj))))
size_list += _get_object_size_tree(obj.__dict__, depth=depth + 1, lbl='__dict__', seen=seen) # depends on [control=['if'], data=[]]
return size_list
seen = set([])
_get_object_size_tree(obj, depth=0, lbl=lbl, seen=seen)
del seen |
async def prodAllOnce(self):
"""
Call `prod` once for each Prodable in this Looper
:return: the sum of the number of events executed successfully
"""
# TODO: looks like limit is always None???
limit = None
s = 0
for n in self.prodables:
s += await n.prod(limit)
return s | <ast.AsyncFunctionDef object at 0x7da1b170f490> | keyword[async] keyword[def] identifier[prodAllOnce] ( identifier[self] ):
literal[string]
identifier[limit] = keyword[None]
identifier[s] = literal[int]
keyword[for] identifier[n] keyword[in] identifier[self] . identifier[prodables] :
identifier[s] += keyword[await] identifier[n] . identifier[prod] ( identifier[limit] )
keyword[return] identifier[s] | async def prodAllOnce(self):
"""
Call `prod` once for each Prodable in this Looper
:return: the sum of the number of events executed successfully
"""
# TODO: looks like limit is always None???
limit = None
s = 0
for n in self.prodables:
s += await n.prod(limit) # depends on [control=['for'], data=['n']]
return s |
def unescape_sql(inp):
"""
:param inp: an input string to be unescaped
:return: return the unescaped version of the string.
"""
if inp.startswith('"') and inp.endswith('"'):
inp = inp[1:-1]
return inp.replace('""','"').replace('\\\\','\\') | def function[unescape_sql, parameter[inp]]:
constant[
:param inp: an input string to be unescaped
:return: return the unescaped version of the string.
]
if <ast.BoolOp object at 0x7da1b1019780> begin[:]
variable[inp] assign[=] call[name[inp]][<ast.Slice object at 0x7da1b1019870>]
return[call[call[name[inp].replace, parameter[constant[""], constant["]]].replace, parameter[constant[\\], constant[\]]]] | keyword[def] identifier[unescape_sql] ( identifier[inp] ):
literal[string]
keyword[if] identifier[inp] . identifier[startswith] ( literal[string] ) keyword[and] identifier[inp] . identifier[endswith] ( literal[string] ):
identifier[inp] = identifier[inp] [ literal[int] :- literal[int] ]
keyword[return] identifier[inp] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ) | def unescape_sql(inp):
"""
:param inp: an input string to be unescaped
:return: return the unescaped version of the string.
"""
if inp.startswith('"') and inp.endswith('"'):
inp = inp[1:-1] # depends on [control=['if'], data=[]]
return inp.replace('""', '"').replace('\\\\', '\\') |
def ordered_dict_to_dict(d):
"""
Converts inner OrderedDict to bare dict
"""
ret = {}
new_d = deepcopy(d)
for k, v in new_d.items():
if isinstance(v, OrderedDict):
v = dict(v)
if isinstance(v, dict):
v = ordered_dict_to_dict(v)
ret[k] = v
return ret | def function[ordered_dict_to_dict, parameter[d]]:
constant[
Converts inner OrderedDict to bare dict
]
variable[ret] assign[=] dictionary[[], []]
variable[new_d] assign[=] call[name[deepcopy], parameter[name[d]]]
for taget[tuple[[<ast.Name object at 0x7da1b1bc2a70>, <ast.Name object at 0x7da1b1bc2a10>]]] in starred[call[name[new_d].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[v], name[OrderedDict]]] begin[:]
variable[v] assign[=] call[name[dict], parameter[name[v]]]
if call[name[isinstance], parameter[name[v], name[dict]]] begin[:]
variable[v] assign[=] call[name[ordered_dict_to_dict], parameter[name[v]]]
call[name[ret]][name[k]] assign[=] name[v]
return[name[ret]] | keyword[def] identifier[ordered_dict_to_dict] ( identifier[d] ):
literal[string]
identifier[ret] ={}
identifier[new_d] = identifier[deepcopy] ( identifier[d] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[new_d] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[v] , identifier[OrderedDict] ):
identifier[v] = identifier[dict] ( identifier[v] )
keyword[if] identifier[isinstance] ( identifier[v] , identifier[dict] ):
identifier[v] = identifier[ordered_dict_to_dict] ( identifier[v] )
identifier[ret] [ identifier[k] ]= identifier[v]
keyword[return] identifier[ret] | def ordered_dict_to_dict(d):
"""
Converts inner OrderedDict to bare dict
"""
ret = {}
new_d = deepcopy(d)
for (k, v) in new_d.items():
if isinstance(v, OrderedDict):
v = dict(v) # depends on [control=['if'], data=[]]
if isinstance(v, dict):
v = ordered_dict_to_dict(v) # depends on [control=['if'], data=[]]
ret[k] = v # depends on [control=['for'], data=[]]
return ret |
def MakePmfFromCdf(cdf, name=None):
"""Makes a normalized Pmf from a Cdf object.
Args:
cdf: Cdf object
name: string name for the new Pmf
Returns:
Pmf object
"""
if name is None:
name = cdf.name
pmf = Pmf(name=name)
prev = 0.0
for val, prob in cdf.Items():
pmf.Incr(val, prob - prev)
prev = prob
return pmf | def function[MakePmfFromCdf, parameter[cdf, name]]:
constant[Makes a normalized Pmf from a Cdf object.
Args:
cdf: Cdf object
name: string name for the new Pmf
Returns:
Pmf object
]
if compare[name[name] is constant[None]] begin[:]
variable[name] assign[=] name[cdf].name
variable[pmf] assign[=] call[name[Pmf], parameter[]]
variable[prev] assign[=] constant[0.0]
for taget[tuple[[<ast.Name object at 0x7da1b033b970>, <ast.Name object at 0x7da1b0338a30>]]] in starred[call[name[cdf].Items, parameter[]]] begin[:]
call[name[pmf].Incr, parameter[name[val], binary_operation[name[prob] - name[prev]]]]
variable[prev] assign[=] name[prob]
return[name[pmf]] | keyword[def] identifier[MakePmfFromCdf] ( identifier[cdf] , identifier[name] = keyword[None] ):
literal[string]
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[name] = identifier[cdf] . identifier[name]
identifier[pmf] = identifier[Pmf] ( identifier[name] = identifier[name] )
identifier[prev] = literal[int]
keyword[for] identifier[val] , identifier[prob] keyword[in] identifier[cdf] . identifier[Items] ():
identifier[pmf] . identifier[Incr] ( identifier[val] , identifier[prob] - identifier[prev] )
identifier[prev] = identifier[prob]
keyword[return] identifier[pmf] | def MakePmfFromCdf(cdf, name=None):
"""Makes a normalized Pmf from a Cdf object.
Args:
cdf: Cdf object
name: string name for the new Pmf
Returns:
Pmf object
"""
if name is None:
name = cdf.name # depends on [control=['if'], data=['name']]
pmf = Pmf(name=name)
prev = 0.0
for (val, prob) in cdf.Items():
pmf.Incr(val, prob - prev)
prev = prob # depends on [control=['for'], data=[]]
return pmf |
def install_update_deps(self):
"""todo: Docstring for install_update_deps
:return:
:rtype:
"""
logger.debug("")
self._ctx.installed(self.name)
# are there any dependencies?
depfile = os.path.join(self.repo_dir, '_upkg', 'depends')
logger.debug("depfile? %s", depfile)
if os.path.exists(depfile):
logger.debug("Found depends file at %s", depfile)
deps = open(depfile, 'r')
dep = deps.readline()
while dep:
dep = dep.strip()
logger.debug("depends: %s", dep)
self._ctx.add_dep(nice_pkg_name(os.path.basename(dep)), dep)
dep = deps.readline()
deps.close()
for rep in self._ctx.deps_needed:
repo = Repo(url=rep)
if repo.installed:
repo.update()
else:
repo.install() | def function[install_update_deps, parameter[self]]:
constant[todo: Docstring for install_update_deps
:return:
:rtype:
]
call[name[logger].debug, parameter[constant[]]]
call[name[self]._ctx.installed, parameter[name[self].name]]
variable[depfile] assign[=] call[name[os].path.join, parameter[name[self].repo_dir, constant[_upkg], constant[depends]]]
call[name[logger].debug, parameter[constant[depfile? %s], name[depfile]]]
if call[name[os].path.exists, parameter[name[depfile]]] begin[:]
call[name[logger].debug, parameter[constant[Found depends file at %s], name[depfile]]]
variable[deps] assign[=] call[name[open], parameter[name[depfile], constant[r]]]
variable[dep] assign[=] call[name[deps].readline, parameter[]]
while name[dep] begin[:]
variable[dep] assign[=] call[name[dep].strip, parameter[]]
call[name[logger].debug, parameter[constant[depends: %s], name[dep]]]
call[name[self]._ctx.add_dep, parameter[call[name[nice_pkg_name], parameter[call[name[os].path.basename, parameter[name[dep]]]]], name[dep]]]
variable[dep] assign[=] call[name[deps].readline, parameter[]]
call[name[deps].close, parameter[]]
for taget[name[rep]] in starred[name[self]._ctx.deps_needed] begin[:]
variable[repo] assign[=] call[name[Repo], parameter[]]
if name[repo].installed begin[:]
call[name[repo].update, parameter[]] | keyword[def] identifier[install_update_deps] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_ctx] . identifier[installed] ( identifier[self] . identifier[name] )
identifier[depfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[repo_dir] , literal[string] , literal[string] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[depfile] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[depfile] ):
identifier[logger] . identifier[debug] ( literal[string] , identifier[depfile] )
identifier[deps] = identifier[open] ( identifier[depfile] , literal[string] )
identifier[dep] = identifier[deps] . identifier[readline] ()
keyword[while] identifier[dep] :
identifier[dep] = identifier[dep] . identifier[strip] ()
identifier[logger] . identifier[debug] ( literal[string] , identifier[dep] )
identifier[self] . identifier[_ctx] . identifier[add_dep] ( identifier[nice_pkg_name] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[dep] )), identifier[dep] )
identifier[dep] = identifier[deps] . identifier[readline] ()
identifier[deps] . identifier[close] ()
keyword[for] identifier[rep] keyword[in] identifier[self] . identifier[_ctx] . identifier[deps_needed] :
identifier[repo] = identifier[Repo] ( identifier[url] = identifier[rep] )
keyword[if] identifier[repo] . identifier[installed] :
identifier[repo] . identifier[update] ()
keyword[else] :
identifier[repo] . identifier[install] () | def install_update_deps(self):
"""todo: Docstring for install_update_deps
:return:
:rtype:
"""
logger.debug('')
self._ctx.installed(self.name)
# are there any dependencies?
depfile = os.path.join(self.repo_dir, '_upkg', 'depends')
logger.debug('depfile? %s', depfile)
if os.path.exists(depfile):
logger.debug('Found depends file at %s', depfile)
deps = open(depfile, 'r')
dep = deps.readline()
while dep:
dep = dep.strip()
logger.debug('depends: %s', dep)
self._ctx.add_dep(nice_pkg_name(os.path.basename(dep)), dep)
dep = deps.readline() # depends on [control=['while'], data=[]]
deps.close() # depends on [control=['if'], data=[]]
for rep in self._ctx.deps_needed:
repo = Repo(url=rep)
if repo.installed:
repo.update() # depends on [control=['if'], data=[]]
else:
repo.install() # depends on [control=['for'], data=['rep']] |
def add_filter(func, language=None):
"""
Register filters for specific language.
If language == None the filter applies for all languages.
Filter will not apply for stop words in cache.
:param func: callable
:param language: string|None
:return:
"""
if language not in _filters:
_filters[language] = []
_filters[language].append(func) | def function[add_filter, parameter[func, language]]:
constant[
Register filters for specific language.
If language == None the filter applies for all languages.
Filter will not apply for stop words in cache.
:param func: callable
:param language: string|None
:return:
]
if compare[name[language] <ast.NotIn object at 0x7da2590d7190> name[_filters]] begin[:]
call[name[_filters]][name[language]] assign[=] list[[]]
call[call[name[_filters]][name[language]].append, parameter[name[func]]] | keyword[def] identifier[add_filter] ( identifier[func] , identifier[language] = keyword[None] ):
literal[string]
keyword[if] identifier[language] keyword[not] keyword[in] identifier[_filters] :
identifier[_filters] [ identifier[language] ]=[]
identifier[_filters] [ identifier[language] ]. identifier[append] ( identifier[func] ) | def add_filter(func, language=None):
"""
Register filters for specific language.
If language == None the filter applies for all languages.
Filter will not apply for stop words in cache.
:param func: callable
:param language: string|None
:return:
"""
if language not in _filters:
_filters[language] = [] # depends on [control=['if'], data=['language', '_filters']]
_filters[language].append(func) |
def extend_reservation(request, user_id, days=7):
''' Allows staff to extend the reservation on a given user's cart.
'''
user = User.objects.get(id=int(user_id))
cart = CartController.for_user(user)
cart.extend_reservation(datetime.timedelta(days=days))
return redirect(request.META["HTTP_REFERER"]) | def function[extend_reservation, parameter[request, user_id, days]]:
constant[ Allows staff to extend the reservation on a given user's cart.
]
variable[user] assign[=] call[name[User].objects.get, parameter[]]
variable[cart] assign[=] call[name[CartController].for_user, parameter[name[user]]]
call[name[cart].extend_reservation, parameter[call[name[datetime].timedelta, parameter[]]]]
return[call[name[redirect], parameter[call[name[request].META][constant[HTTP_REFERER]]]]] | keyword[def] identifier[extend_reservation] ( identifier[request] , identifier[user_id] , identifier[days] = literal[int] ):
literal[string]
identifier[user] = identifier[User] . identifier[objects] . identifier[get] ( identifier[id] = identifier[int] ( identifier[user_id] ))
identifier[cart] = identifier[CartController] . identifier[for_user] ( identifier[user] )
identifier[cart] . identifier[extend_reservation] ( identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[days] ))
keyword[return] identifier[redirect] ( identifier[request] . identifier[META] [ literal[string] ]) | def extend_reservation(request, user_id, days=7):
""" Allows staff to extend the reservation on a given user's cart.
"""
user = User.objects.get(id=int(user_id))
cart = CartController.for_user(user)
cart.extend_reservation(datetime.timedelta(days=days))
return redirect(request.META['HTTP_REFERER']) |
def is_client_method_whitelisted(request: AxesHttpRequest) -> bool:
"""
Check if the given request uses a whitelisted method.
"""
if settings.AXES_NEVER_LOCKOUT_GET and request.method == 'GET':
return True
return False | def function[is_client_method_whitelisted, parameter[request]]:
constant[
Check if the given request uses a whitelisted method.
]
if <ast.BoolOp object at 0x7da1b1d50910> begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[is_client_method_whitelisted] ( identifier[request] : identifier[AxesHttpRequest] )-> identifier[bool] :
literal[string]
keyword[if] identifier[settings] . identifier[AXES_NEVER_LOCKOUT_GET] keyword[and] identifier[request] . identifier[method] == literal[string] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def is_client_method_whitelisted(request: AxesHttpRequest) -> bool:
"""
Check if the given request uses a whitelisted method.
"""
if settings.AXES_NEVER_LOCKOUT_GET and request.method == 'GET':
return True # depends on [control=['if'], data=[]]
return False |
def _make_individual(self, paramlist):
"""Makes an individual particle."""
part = creator.Individual(paramlist)
part.ident = None
return part | def function[_make_individual, parameter[self, paramlist]]:
constant[Makes an individual particle.]
variable[part] assign[=] call[name[creator].Individual, parameter[name[paramlist]]]
name[part].ident assign[=] constant[None]
return[name[part]] | keyword[def] identifier[_make_individual] ( identifier[self] , identifier[paramlist] ):
literal[string]
identifier[part] = identifier[creator] . identifier[Individual] ( identifier[paramlist] )
identifier[part] . identifier[ident] = keyword[None]
keyword[return] identifier[part] | def _make_individual(self, paramlist):
"""Makes an individual particle."""
part = creator.Individual(paramlist)
part.ident = None
return part |
def stop(self, now=False):
"""Stop and remove the service
Consider using stop/start when Docker adds support
"""
self.log.info(
"Stopping and removing Docker service %s (id: %s)",
self.service_name, self.service_id[:7])
yield self.docker('remove_service', self.service_id[:7])
self.log.info(
"Docker service %s (id: %s) removed",
self.service_name, self.service_id[:7])
self.clear_state() | def function[stop, parameter[self, now]]:
constant[Stop and remove the service
Consider using stop/start when Docker adds support
]
call[name[self].log.info, parameter[constant[Stopping and removing Docker service %s (id: %s)], name[self].service_name, call[name[self].service_id][<ast.Slice object at 0x7da1b023c430>]]]
<ast.Yield object at 0x7da1b023fe80>
call[name[self].log.info, parameter[constant[Docker service %s (id: %s) removed], name[self].service_name, call[name[self].service_id][<ast.Slice object at 0x7da1b023c4c0>]]]
call[name[self].clear_state, parameter[]] | keyword[def] identifier[stop] ( identifier[self] , identifier[now] = keyword[False] ):
literal[string]
identifier[self] . identifier[log] . identifier[info] (
literal[string] ,
identifier[self] . identifier[service_name] , identifier[self] . identifier[service_id] [: literal[int] ])
keyword[yield] identifier[self] . identifier[docker] ( literal[string] , identifier[self] . identifier[service_id] [: literal[int] ])
identifier[self] . identifier[log] . identifier[info] (
literal[string] ,
identifier[self] . identifier[service_name] , identifier[self] . identifier[service_id] [: literal[int] ])
identifier[self] . identifier[clear_state] () | def stop(self, now=False):
"""Stop and remove the service
Consider using stop/start when Docker adds support
"""
self.log.info('Stopping and removing Docker service %s (id: %s)', self.service_name, self.service_id[:7])
yield self.docker('remove_service', self.service_id[:7])
self.log.info('Docker service %s (id: %s) removed', self.service_name, self.service_id[:7])
self.clear_state() |
def nameservers(self):
"""
:rtype: list
:returns: A list of nameserver strings for this hosted zone.
"""
# If this HostedZone was instantiated by ListHostedZones, the nameservers
# attribute didn't get populated. If the user requests it, we'll
# lazy load by querying it in after the fact. It's safe to cache like
# this since these nameserver values won't change.
if not self._nameservers:
# We'll just snatch the nameserver values from a fresh copy
# via GetHostedZone.
hosted_zone = self.connection.get_hosted_zone_by_id(self.id)
self._nameservers = hosted_zone._nameservers
return self._nameservers | def function[nameservers, parameter[self]]:
constant[
:rtype: list
:returns: A list of nameserver strings for this hosted zone.
]
if <ast.UnaryOp object at 0x7da2054a7340> begin[:]
variable[hosted_zone] assign[=] call[name[self].connection.get_hosted_zone_by_id, parameter[name[self].id]]
name[self]._nameservers assign[=] name[hosted_zone]._nameservers
return[name[self]._nameservers] | keyword[def] identifier[nameservers] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_nameservers] :
identifier[hosted_zone] = identifier[self] . identifier[connection] . identifier[get_hosted_zone_by_id] ( identifier[self] . identifier[id] )
identifier[self] . identifier[_nameservers] = identifier[hosted_zone] . identifier[_nameservers]
keyword[return] identifier[self] . identifier[_nameservers] | def nameservers(self):
"""
:rtype: list
:returns: A list of nameserver strings for this hosted zone.
"""
# If this HostedZone was instantiated by ListHostedZones, the nameservers
# attribute didn't get populated. If the user requests it, we'll
# lazy load by querying it in after the fact. It's safe to cache like
# this since these nameserver values won't change.
if not self._nameservers:
# We'll just snatch the nameserver values from a fresh copy
# via GetHostedZone.
hosted_zone = self.connection.get_hosted_zone_by_id(self.id)
self._nameservers = hosted_zone._nameservers # depends on [control=['if'], data=[]]
return self._nameservers |
def get_write_cache(self):
"""Returns the write cache for this setup, creating it if necessary.
Returns None if no write cache is configured.
"""
if self._options.write_to and not self._write_cache:
cache_spec = self._resolve(self._sanitize_cache_spec(self._options.write_to))
if cache_spec:
with self._cache_setup_lock:
self._write_cache = self._do_create_artifact_cache(cache_spec, 'will write to')
return self._write_cache | def function[get_write_cache, parameter[self]]:
constant[Returns the write cache for this setup, creating it if necessary.
Returns None if no write cache is configured.
]
if <ast.BoolOp object at 0x7da1b2248d90> begin[:]
variable[cache_spec] assign[=] call[name[self]._resolve, parameter[call[name[self]._sanitize_cache_spec, parameter[name[self]._options.write_to]]]]
if name[cache_spec] begin[:]
with name[self]._cache_setup_lock begin[:]
name[self]._write_cache assign[=] call[name[self]._do_create_artifact_cache, parameter[name[cache_spec], constant[will write to]]]
return[name[self]._write_cache] | keyword[def] identifier[get_write_cache] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_options] . identifier[write_to] keyword[and] keyword[not] identifier[self] . identifier[_write_cache] :
identifier[cache_spec] = identifier[self] . identifier[_resolve] ( identifier[self] . identifier[_sanitize_cache_spec] ( identifier[self] . identifier[_options] . identifier[write_to] ))
keyword[if] identifier[cache_spec] :
keyword[with] identifier[self] . identifier[_cache_setup_lock] :
identifier[self] . identifier[_write_cache] = identifier[self] . identifier[_do_create_artifact_cache] ( identifier[cache_spec] , literal[string] )
keyword[return] identifier[self] . identifier[_write_cache] | def get_write_cache(self):
"""Returns the write cache for this setup, creating it if necessary.
Returns None if no write cache is configured.
"""
if self._options.write_to and (not self._write_cache):
cache_spec = self._resolve(self._sanitize_cache_spec(self._options.write_to))
if cache_spec:
with self._cache_setup_lock:
self._write_cache = self._do_create_artifact_cache(cache_spec, 'will write to') # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self._write_cache |
def on_menu_clear_interpretation(self, event):
'''
clear all current interpretations.
'''
# delete all previous interpretation
for sp in list(self.Data.keys()):
del self.Data[sp]['pars']
self.Data[sp]['pars'] = {}
self.Data[sp]['pars']['lab_dc_field'] = self.Data[sp]['lab_dc_field']
self.Data[sp]['pars']['er_specimen_name'] = self.Data[sp]['er_specimen_name']
self.Data[sp]['pars']['er_sample_name'] = self.Data[sp]['er_sample_name']
self.Data_samples = {}
self.Data_sites = {}
self.tmin_box.SetValue("")
self.tmax_box.SetValue("")
self.clear_boxes()
self.draw_figure(self.s) | def function[on_menu_clear_interpretation, parameter[self, event]]:
constant[
clear all current interpretations.
]
for taget[name[sp]] in starred[call[name[list], parameter[call[name[self].Data.keys, parameter[]]]]] begin[:]
<ast.Delete object at 0x7da1b057be80>
call[call[name[self].Data][name[sp]]][constant[pars]] assign[=] dictionary[[], []]
call[call[call[name[self].Data][name[sp]]][constant[pars]]][constant[lab_dc_field]] assign[=] call[call[name[self].Data][name[sp]]][constant[lab_dc_field]]
call[call[call[name[self].Data][name[sp]]][constant[pars]]][constant[er_specimen_name]] assign[=] call[call[name[self].Data][name[sp]]][constant[er_specimen_name]]
call[call[call[name[self].Data][name[sp]]][constant[pars]]][constant[er_sample_name]] assign[=] call[call[name[self].Data][name[sp]]][constant[er_sample_name]]
name[self].Data_samples assign[=] dictionary[[], []]
name[self].Data_sites assign[=] dictionary[[], []]
call[name[self].tmin_box.SetValue, parameter[constant[]]]
call[name[self].tmax_box.SetValue, parameter[constant[]]]
call[name[self].clear_boxes, parameter[]]
call[name[self].draw_figure, parameter[name[self].s]] | keyword[def] identifier[on_menu_clear_interpretation] ( identifier[self] , identifier[event] ):
literal[string]
keyword[for] identifier[sp] keyword[in] identifier[list] ( identifier[self] . identifier[Data] . identifier[keys] ()):
keyword[del] identifier[self] . identifier[Data] [ identifier[sp] ][ literal[string] ]
identifier[self] . identifier[Data] [ identifier[sp] ][ literal[string] ]={}
identifier[self] . identifier[Data] [ identifier[sp] ][ literal[string] ][ literal[string] ]= identifier[self] . identifier[Data] [ identifier[sp] ][ literal[string] ]
identifier[self] . identifier[Data] [ identifier[sp] ][ literal[string] ][ literal[string] ]= identifier[self] . identifier[Data] [ identifier[sp] ][ literal[string] ]
identifier[self] . identifier[Data] [ identifier[sp] ][ literal[string] ][ literal[string] ]= identifier[self] . identifier[Data] [ identifier[sp] ][ literal[string] ]
identifier[self] . identifier[Data_samples] ={}
identifier[self] . identifier[Data_sites] ={}
identifier[self] . identifier[tmin_box] . identifier[SetValue] ( literal[string] )
identifier[self] . identifier[tmax_box] . identifier[SetValue] ( literal[string] )
identifier[self] . identifier[clear_boxes] ()
identifier[self] . identifier[draw_figure] ( identifier[self] . identifier[s] ) | def on_menu_clear_interpretation(self, event):
"""
clear all current interpretations.
"""
# delete all previous interpretation
for sp in list(self.Data.keys()):
del self.Data[sp]['pars']
self.Data[sp]['pars'] = {}
self.Data[sp]['pars']['lab_dc_field'] = self.Data[sp]['lab_dc_field']
self.Data[sp]['pars']['er_specimen_name'] = self.Data[sp]['er_specimen_name']
self.Data[sp]['pars']['er_sample_name'] = self.Data[sp]['er_sample_name'] # depends on [control=['for'], data=['sp']]
self.Data_samples = {}
self.Data_sites = {}
self.tmin_box.SetValue('')
self.tmax_box.SetValue('')
self.clear_boxes()
self.draw_figure(self.s) |
def normalize_encoding(sample, encoding):
"""Normalize encoding including 'utf-8-sig', 'utf-16-be', utf-16-le tweaks.
"""
encoding = codecs.lookup(encoding).name
# Work around 'Incorrect detection of utf-8-sig encoding'
# <https://github.com/PyYoshi/cChardet/issues/28>
if encoding == 'utf-8':
if sample.startswith(codecs.BOM_UTF8):
encoding = 'utf-8-sig'
# Use the BOM stripping name (without byte-order) for UTF-16 encodings
elif encoding == 'utf-16-be':
if sample.startswith(codecs.BOM_UTF16_BE):
encoding = 'utf-16'
elif encoding == 'utf-16-le':
if sample.startswith(codecs.BOM_UTF16_LE):
encoding = 'utf-16'
return encoding | def function[normalize_encoding, parameter[sample, encoding]]:
constant[Normalize encoding including 'utf-8-sig', 'utf-16-be', utf-16-le tweaks.
]
variable[encoding] assign[=] call[name[codecs].lookup, parameter[name[encoding]]].name
if compare[name[encoding] equal[==] constant[utf-8]] begin[:]
if call[name[sample].startswith, parameter[name[codecs].BOM_UTF8]] begin[:]
variable[encoding] assign[=] constant[utf-8-sig]
return[name[encoding]] | keyword[def] identifier[normalize_encoding] ( identifier[sample] , identifier[encoding] ):
literal[string]
identifier[encoding] = identifier[codecs] . identifier[lookup] ( identifier[encoding] ). identifier[name]
keyword[if] identifier[encoding] == literal[string] :
keyword[if] identifier[sample] . identifier[startswith] ( identifier[codecs] . identifier[BOM_UTF8] ):
identifier[encoding] = literal[string]
keyword[elif] identifier[encoding] == literal[string] :
keyword[if] identifier[sample] . identifier[startswith] ( identifier[codecs] . identifier[BOM_UTF16_BE] ):
identifier[encoding] = literal[string]
keyword[elif] identifier[encoding] == literal[string] :
keyword[if] identifier[sample] . identifier[startswith] ( identifier[codecs] . identifier[BOM_UTF16_LE] ):
identifier[encoding] = literal[string]
keyword[return] identifier[encoding] | def normalize_encoding(sample, encoding):
"""Normalize encoding including 'utf-8-sig', 'utf-16-be', utf-16-le tweaks.
"""
encoding = codecs.lookup(encoding).name
# Work around 'Incorrect detection of utf-8-sig encoding'
# <https://github.com/PyYoshi/cChardet/issues/28>
if encoding == 'utf-8':
if sample.startswith(codecs.BOM_UTF8):
encoding = 'utf-8-sig' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['encoding']]
# Use the BOM stripping name (without byte-order) for UTF-16 encodings
elif encoding == 'utf-16-be':
if sample.startswith(codecs.BOM_UTF16_BE):
encoding = 'utf-16' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['encoding']]
elif encoding == 'utf-16-le':
if sample.startswith(codecs.BOM_UTF16_LE):
encoding = 'utf-16' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['encoding']]
return encoding |
def to_pb(self):
"""Converts the garbage collection rule to a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
max_age = _helpers._timedelta_to_duration_pb(self.max_age)
return table_v2_pb2.GcRule(max_age=max_age) | def function[to_pb, parameter[self]]:
constant[Converts the garbage collection rule to a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
]
variable[max_age] assign[=] call[name[_helpers]._timedelta_to_duration_pb, parameter[name[self].max_age]]
return[call[name[table_v2_pb2].GcRule, parameter[]]] | keyword[def] identifier[to_pb] ( identifier[self] ):
literal[string]
identifier[max_age] = identifier[_helpers] . identifier[_timedelta_to_duration_pb] ( identifier[self] . identifier[max_age] )
keyword[return] identifier[table_v2_pb2] . identifier[GcRule] ( identifier[max_age] = identifier[max_age] ) | def to_pb(self):
"""Converts the garbage collection rule to a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
max_age = _helpers._timedelta_to_duration_pb(self.max_age)
return table_v2_pb2.GcRule(max_age=max_age) |
def append(self, *args, **kw):
"""Adds a new row to a table.
Adds a row to the given table.
The column names and their corresponding values
must either be supplied as a dictionary of {fields:values},
or a series of keyword arguments of field=value style.
"""
if args and kw:
return
if args and type(args[0]) == dict:
fields = [k for k in args[0]]
v = [args[0][k] for k in args[0]]
if kw:
fields = [k for k in kw]
v = [kw[k] for k in kw]
q = ", ".join(["?" for x in fields])
sql = "insert into "+self._name+" ("+", ".join(fields)+") "
sql += "values ("+q+")"
self._db._cur.execute(sql, v)
self._db._i += 1
if self._db._i >= self._db._commit:
self._db._i = 0
self._db._con.commit() | def function[append, parameter[self]]:
constant[Adds a new row to a table.
Adds a row to the given table.
The column names and their corresponding values
must either be supplied as a dictionary of {fields:values},
or a series of keyword arguments of field=value style.
]
if <ast.BoolOp object at 0x7da204346350> begin[:]
return[None]
if <ast.BoolOp object at 0x7da204347ca0> begin[:]
variable[fields] assign[=] <ast.ListComp object at 0x7da2043464d0>
variable[v] assign[=] <ast.ListComp object at 0x7da204346fe0>
if name[kw] begin[:]
variable[fields] assign[=] <ast.ListComp object at 0x7da204344430>
variable[v] assign[=] <ast.ListComp object at 0x7da204344ac0>
variable[q] assign[=] call[constant[, ].join, parameter[<ast.ListComp object at 0x7da18bcc8b50>]]
variable[sql] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[insert into ] + name[self]._name] + constant[ (]] + call[constant[, ].join, parameter[name[fields]]]] + constant[) ]]
<ast.AugAssign object at 0x7da18bcc9330>
call[name[self]._db._cur.execute, parameter[name[sql], name[v]]]
<ast.AugAssign object at 0x7da18bccafb0>
if compare[name[self]._db._i greater_or_equal[>=] name[self]._db._commit] begin[:]
name[self]._db._i assign[=] constant[0]
call[name[self]._db._con.commit, parameter[]] | keyword[def] identifier[append] ( identifier[self] ,* identifier[args] ,** identifier[kw] ):
literal[string]
keyword[if] identifier[args] keyword[and] identifier[kw] :
keyword[return]
keyword[if] identifier[args] keyword[and] identifier[type] ( identifier[args] [ literal[int] ])== identifier[dict] :
identifier[fields] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[args] [ literal[int] ]]
identifier[v] =[ identifier[args] [ literal[int] ][ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[args] [ literal[int] ]]
keyword[if] identifier[kw] :
identifier[fields] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[kw] ]
identifier[v] =[ identifier[kw] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[kw] ]
identifier[q] = literal[string] . identifier[join] ([ literal[string] keyword[for] identifier[x] keyword[in] identifier[fields] ])
identifier[sql] = literal[string] + identifier[self] . identifier[_name] + literal[string] + literal[string] . identifier[join] ( identifier[fields] )+ literal[string]
identifier[sql] += literal[string] + identifier[q] + literal[string]
identifier[self] . identifier[_db] . identifier[_cur] . identifier[execute] ( identifier[sql] , identifier[v] )
identifier[self] . identifier[_db] . identifier[_i] += literal[int]
keyword[if] identifier[self] . identifier[_db] . identifier[_i] >= identifier[self] . identifier[_db] . identifier[_commit] :
identifier[self] . identifier[_db] . identifier[_i] = literal[int]
identifier[self] . identifier[_db] . identifier[_con] . identifier[commit] () | def append(self, *args, **kw):
"""Adds a new row to a table.
Adds a row to the given table.
The column names and their corresponding values
must either be supplied as a dictionary of {fields:values},
or a series of keyword arguments of field=value style.
"""
if args and kw:
return # depends on [control=['if'], data=[]]
if args and type(args[0]) == dict:
fields = [k for k in args[0]]
v = [args[0][k] for k in args[0]] # depends on [control=['if'], data=[]]
if kw:
fields = [k for k in kw]
v = [kw[k] for k in kw] # depends on [control=['if'], data=[]]
q = ', '.join(['?' for x in fields])
sql = 'insert into ' + self._name + ' (' + ', '.join(fields) + ') '
sql += 'values (' + q + ')'
self._db._cur.execute(sql, v)
self._db._i += 1
if self._db._i >= self._db._commit:
self._db._i = 0
self._db._con.commit() # depends on [control=['if'], data=[]] |
def get_fields(model, include=None):
"""
Returns ordered dict in format 'field': 'verbose_name'
"""
fields = OrderedDict()
info = model._meta
if include:
selected = [info.get_field(name) for name in include]
else:
selected = [field for field in info.fields if field.editable]
for field in selected:
fields[field.name] = field.verbose_name
return fields | def function[get_fields, parameter[model, include]]:
constant[
Returns ordered dict in format 'field': 'verbose_name'
]
variable[fields] assign[=] call[name[OrderedDict], parameter[]]
variable[info] assign[=] name[model]._meta
if name[include] begin[:]
variable[selected] assign[=] <ast.ListComp object at 0x7da18f58f4c0>
for taget[name[field]] in starred[name[selected]] begin[:]
call[name[fields]][name[field].name] assign[=] name[field].verbose_name
return[name[fields]] | keyword[def] identifier[get_fields] ( identifier[model] , identifier[include] = keyword[None] ):
literal[string]
identifier[fields] = identifier[OrderedDict] ()
identifier[info] = identifier[model] . identifier[_meta]
keyword[if] identifier[include] :
identifier[selected] =[ identifier[info] . identifier[get_field] ( identifier[name] ) keyword[for] identifier[name] keyword[in] identifier[include] ]
keyword[else] :
identifier[selected] =[ identifier[field] keyword[for] identifier[field] keyword[in] identifier[info] . identifier[fields] keyword[if] identifier[field] . identifier[editable] ]
keyword[for] identifier[field] keyword[in] identifier[selected] :
identifier[fields] [ identifier[field] . identifier[name] ]= identifier[field] . identifier[verbose_name]
keyword[return] identifier[fields] | def get_fields(model, include=None):
"""
Returns ordered dict in format 'field': 'verbose_name'
"""
fields = OrderedDict()
info = model._meta
if include:
selected = [info.get_field(name) for name in include] # depends on [control=['if'], data=[]]
else:
selected = [field for field in info.fields if field.editable]
for field in selected:
fields[field.name] = field.verbose_name # depends on [control=['for'], data=['field']]
return fields |
def count_rows_with_nans(X):
"""Count the number of rows in 2D arrays that contain any nan values."""
if X.ndim == 2:
return np.where(np.isnan(X).sum(axis=1) != 0, 1, 0).sum() | def function[count_rows_with_nans, parameter[X]]:
constant[Count the number of rows in 2D arrays that contain any nan values.]
if compare[name[X].ndim equal[==] constant[2]] begin[:]
return[call[call[name[np].where, parameter[compare[call[call[name[np].isnan, parameter[name[X]]].sum, parameter[]] not_equal[!=] constant[0]], constant[1], constant[0]]].sum, parameter[]]] | keyword[def] identifier[count_rows_with_nans] ( identifier[X] ):
literal[string]
keyword[if] identifier[X] . identifier[ndim] == literal[int] :
keyword[return] identifier[np] . identifier[where] ( identifier[np] . identifier[isnan] ( identifier[X] ). identifier[sum] ( identifier[axis] = literal[int] )!= literal[int] , literal[int] , literal[int] ). identifier[sum] () | def count_rows_with_nans(X):
"""Count the number of rows in 2D arrays that contain any nan values."""
if X.ndim == 2:
return np.where(np.isnan(X).sum(axis=1) != 0, 1, 0).sum() # depends on [control=['if'], data=[]] |
def set_e(self):
if self.ui.combobox_decodings.count() < 1: # Empty list
return
self.e = copy.deepcopy(self.decodings[self.ui.combobox_decodings.currentIndex()])
""":type: encoding """
chain = self.e.get_chain()
self.ui.decoderchain.clear()
self.chainoptions.clear()
last_i = ""
for i in chain:
if i in [constants.DECODING_INVERT, constants.DECODING_ENOCEAN, constants.DECODING_DIFFERENTIAL,
constants.DECODING_REDUNDANCY, constants.DECODING_CARRIER, constants.DECODING_BITORDER,
constants.DECODING_EDGE, constants.DECODING_DATAWHITENING, constants.DECODING_SUBSTITUTION,
constants.DECODING_EXTERNAL, constants.DECODING_CUT, constants.DECODING_MORSE,
constants.DECODING_DISABLED_PREFIX]:
self.ui.decoderchain.addItem(i)
self.decoderchainUpdate()
last_i = self.ui.decoderchain.item(self.ui.decoderchain.count() - 1).text()
else:
if any(x in last_i for x in [constants.DECODING_REDUNDANCY, constants.DECODING_CARRIER,
constants.DECODING_SUBSTITUTION, constants.DECODING_EXTERNAL,
constants.DECODING_DATAWHITENING, constants.DECODING_CUT,
constants.DECODING_MORSE]):
self.chainoptions[last_i] = i
self.decoderchainUpdate()
self.decoder_update()
self.ui.saveas.setVisible(False) | def function[set_e, parameter[self]]:
if compare[call[name[self].ui.combobox_decodings.count, parameter[]] less[<] constant[1]] begin[:]
return[None]
name[self].e assign[=] call[name[copy].deepcopy, parameter[call[name[self].decodings][call[name[self].ui.combobox_decodings.currentIndex, parameter[]]]]]
constant[:type: encoding ]
variable[chain] assign[=] call[name[self].e.get_chain, parameter[]]
call[name[self].ui.decoderchain.clear, parameter[]]
call[name[self].chainoptions.clear, parameter[]]
variable[last_i] assign[=] constant[]
for taget[name[i]] in starred[name[chain]] begin[:]
if compare[name[i] in list[[<ast.Attribute object at 0x7da1b1fca080>, <ast.Attribute object at 0x7da1b1fcbbe0>, <ast.Attribute object at 0x7da1b1fc8e80>, <ast.Attribute object at 0x7da1b1fc9c00>, <ast.Attribute object at 0x7da1b1fc8f40>, <ast.Attribute object at 0x7da1b1fcafb0>, <ast.Attribute object at 0x7da1b1fcbca0>, <ast.Attribute object at 0x7da1b1fcbbb0>, <ast.Attribute object at 0x7da1b1fcb9d0>, <ast.Attribute object at 0x7da1b1fc89a0>, <ast.Attribute object at 0x7da1b1fcb4c0>, <ast.Attribute object at 0x7da1b1fc8910>, <ast.Attribute object at 0x7da1b1fc9810>]]] begin[:]
call[name[self].ui.decoderchain.addItem, parameter[name[i]]]
call[name[self].decoderchainUpdate, parameter[]]
variable[last_i] assign[=] call[call[name[self].ui.decoderchain.item, parameter[binary_operation[call[name[self].ui.decoderchain.count, parameter[]] - constant[1]]]].text, parameter[]]
call[name[self].decoderchainUpdate, parameter[]]
call[name[self].decoder_update, parameter[]]
call[name[self].ui.saveas.setVisible, parameter[constant[False]]] | keyword[def] identifier[set_e] ( identifier[self] ):
keyword[if] identifier[self] . identifier[ui] . identifier[combobox_decodings] . identifier[count] ()< literal[int] :
keyword[return]
identifier[self] . identifier[e] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[decodings] [ identifier[self] . identifier[ui] . identifier[combobox_decodings] . identifier[currentIndex] ()])
literal[string]
identifier[chain] = identifier[self] . identifier[e] . identifier[get_chain] ()
identifier[self] . identifier[ui] . identifier[decoderchain] . identifier[clear] ()
identifier[self] . identifier[chainoptions] . identifier[clear] ()
identifier[last_i] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[chain] :
keyword[if] identifier[i] keyword[in] [ identifier[constants] . identifier[DECODING_INVERT] , identifier[constants] . identifier[DECODING_ENOCEAN] , identifier[constants] . identifier[DECODING_DIFFERENTIAL] ,
identifier[constants] . identifier[DECODING_REDUNDANCY] , identifier[constants] . identifier[DECODING_CARRIER] , identifier[constants] . identifier[DECODING_BITORDER] ,
identifier[constants] . identifier[DECODING_EDGE] , identifier[constants] . identifier[DECODING_DATAWHITENING] , identifier[constants] . identifier[DECODING_SUBSTITUTION] ,
identifier[constants] . identifier[DECODING_EXTERNAL] , identifier[constants] . identifier[DECODING_CUT] , identifier[constants] . identifier[DECODING_MORSE] ,
identifier[constants] . identifier[DECODING_DISABLED_PREFIX] ]:
identifier[self] . identifier[ui] . identifier[decoderchain] . identifier[addItem] ( identifier[i] )
identifier[self] . identifier[decoderchainUpdate] ()
identifier[last_i] = identifier[self] . identifier[ui] . identifier[decoderchain] . identifier[item] ( identifier[self] . identifier[ui] . identifier[decoderchain] . identifier[count] ()- literal[int] ). identifier[text] ()
keyword[else] :
keyword[if] identifier[any] ( identifier[x] keyword[in] identifier[last_i] keyword[for] identifier[x] keyword[in] [ identifier[constants] . identifier[DECODING_REDUNDANCY] , identifier[constants] . identifier[DECODING_CARRIER] ,
identifier[constants] . identifier[DECODING_SUBSTITUTION] , identifier[constants] . identifier[DECODING_EXTERNAL] ,
identifier[constants] . identifier[DECODING_DATAWHITENING] , identifier[constants] . identifier[DECODING_CUT] ,
identifier[constants] . identifier[DECODING_MORSE] ]):
identifier[self] . identifier[chainoptions] [ identifier[last_i] ]= identifier[i]
identifier[self] . identifier[decoderchainUpdate] ()
identifier[self] . identifier[decoder_update] ()
identifier[self] . identifier[ui] . identifier[saveas] . identifier[setVisible] ( keyword[False] ) | def set_e(self):
if self.ui.combobox_decodings.count() < 1: # Empty list
return # depends on [control=['if'], data=[]]
self.e = copy.deepcopy(self.decodings[self.ui.combobox_decodings.currentIndex()])
':type: encoding '
chain = self.e.get_chain()
self.ui.decoderchain.clear()
self.chainoptions.clear()
last_i = ''
for i in chain:
if i in [constants.DECODING_INVERT, constants.DECODING_ENOCEAN, constants.DECODING_DIFFERENTIAL, constants.DECODING_REDUNDANCY, constants.DECODING_CARRIER, constants.DECODING_BITORDER, constants.DECODING_EDGE, constants.DECODING_DATAWHITENING, constants.DECODING_SUBSTITUTION, constants.DECODING_EXTERNAL, constants.DECODING_CUT, constants.DECODING_MORSE, constants.DECODING_DISABLED_PREFIX]:
self.ui.decoderchain.addItem(i)
self.decoderchainUpdate()
last_i = self.ui.decoderchain.item(self.ui.decoderchain.count() - 1).text() # depends on [control=['if'], data=['i']]
elif any((x in last_i for x in [constants.DECODING_REDUNDANCY, constants.DECODING_CARRIER, constants.DECODING_SUBSTITUTION, constants.DECODING_EXTERNAL, constants.DECODING_DATAWHITENING, constants.DECODING_CUT, constants.DECODING_MORSE])):
self.chainoptions[last_i] = i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
self.decoderchainUpdate()
self.decoder_update()
self.ui.saveas.setVisible(False) |
def populated_column_map(self):
'''Return the _column_map without unused optional fields'''
column_map = []
cls = self.model
for csv_name, field_pattern in cls._column_map:
# Separate the local field name from foreign columns
if '__' in field_pattern:
field_name = field_pattern.split('__', 1)[0]
else:
field_name = field_pattern
# Handle point fields
point_match = re_point.match(field_name)
if point_match:
field = None
else:
field = cls._meta.get_field(field_name)
# Only add optional columns if they are used in the records
if field and field.blank and not field.has_default():
kwargs = {field_name: get_blank_value(field)}
if self.exclude(**kwargs).exists():
column_map.append((csv_name, field_pattern))
else:
column_map.append((csv_name, field_pattern))
return column_map | def function[populated_column_map, parameter[self]]:
constant[Return the _column_map without unused optional fields]
variable[column_map] assign[=] list[[]]
variable[cls] assign[=] name[self].model
for taget[tuple[[<ast.Name object at 0x7da18f58efb0>, <ast.Name object at 0x7da18f58fd30>]]] in starred[name[cls]._column_map] begin[:]
if compare[constant[__] in name[field_pattern]] begin[:]
variable[field_name] assign[=] call[call[name[field_pattern].split, parameter[constant[__], constant[1]]]][constant[0]]
variable[point_match] assign[=] call[name[re_point].match, parameter[name[field_name]]]
if name[point_match] begin[:]
variable[field] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18f58c310> begin[:]
variable[kwargs] assign[=] dictionary[[<ast.Name object at 0x7da18f58f040>], [<ast.Call object at 0x7da18f58c220>]]
if call[call[name[self].exclude, parameter[]].exists, parameter[]] begin[:]
call[name[column_map].append, parameter[tuple[[<ast.Name object at 0x7da18f58c760>, <ast.Name object at 0x7da18f58dae0>]]]]
return[name[column_map]] | keyword[def] identifier[populated_column_map] ( identifier[self] ):
literal[string]
identifier[column_map] =[]
identifier[cls] = identifier[self] . identifier[model]
keyword[for] identifier[csv_name] , identifier[field_pattern] keyword[in] identifier[cls] . identifier[_column_map] :
keyword[if] literal[string] keyword[in] identifier[field_pattern] :
identifier[field_name] = identifier[field_pattern] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]
keyword[else] :
identifier[field_name] = identifier[field_pattern]
identifier[point_match] = identifier[re_point] . identifier[match] ( identifier[field_name] )
keyword[if] identifier[point_match] :
identifier[field] = keyword[None]
keyword[else] :
identifier[field] = identifier[cls] . identifier[_meta] . identifier[get_field] ( identifier[field_name] )
keyword[if] identifier[field] keyword[and] identifier[field] . identifier[blank] keyword[and] keyword[not] identifier[field] . identifier[has_default] ():
identifier[kwargs] ={ identifier[field_name] : identifier[get_blank_value] ( identifier[field] )}
keyword[if] identifier[self] . identifier[exclude] (** identifier[kwargs] ). identifier[exists] ():
identifier[column_map] . identifier[append] (( identifier[csv_name] , identifier[field_pattern] ))
keyword[else] :
identifier[column_map] . identifier[append] (( identifier[csv_name] , identifier[field_pattern] ))
keyword[return] identifier[column_map] | def populated_column_map(self):
"""Return the _column_map without unused optional fields"""
column_map = []
cls = self.model
for (csv_name, field_pattern) in cls._column_map:
# Separate the local field name from foreign columns
if '__' in field_pattern:
field_name = field_pattern.split('__', 1)[0] # depends on [control=['if'], data=['field_pattern']]
else:
field_name = field_pattern
# Handle point fields
point_match = re_point.match(field_name)
if point_match:
field = None # depends on [control=['if'], data=[]]
else:
field = cls._meta.get_field(field_name)
# Only add optional columns if they are used in the records
if field and field.blank and (not field.has_default()):
kwargs = {field_name: get_blank_value(field)}
if self.exclude(**kwargs).exists():
column_map.append((csv_name, field_pattern)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
column_map.append((csv_name, field_pattern)) # depends on [control=['for'], data=[]]
return column_map |
def committors(sources, sinks, msm):
"""
Get the forward committors of the reaction sources -> sinks.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
msm : msmbuilder.MarkovStateModel
MSM fit to the data.
Returns
-------
forward_committors : np.ndarray
The forward committors for the reaction sources -> sinks
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016.
"""
if hasattr(msm, 'all_transmats_'):
commits = np.zeros(msm.all_transmats_.shape[:2])
for i, tprob in enumerate(msm.all_transmats_):
commits[i, :] = _committors(sources, sinks, tprob)
return np.median(commits, axis=0)
return _committors(sources, sinks, msm.transmat_) | def function[committors, parameter[sources, sinks, msm]]:
constant[
Get the forward committors of the reaction sources -> sinks.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
msm : msmbuilder.MarkovStateModel
MSM fit to the data.
Returns
-------
forward_committors : np.ndarray
The forward committors for the reaction sources -> sinks
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016.
]
if call[name[hasattr], parameter[name[msm], constant[all_transmats_]]] begin[:]
variable[commits] assign[=] call[name[np].zeros, parameter[call[name[msm].all_transmats_.shape][<ast.Slice object at 0x7da1b07f4a30>]]]
for taget[tuple[[<ast.Name object at 0x7da1b07f57e0>, <ast.Name object at 0x7da1b07f5600>]]] in starred[call[name[enumerate], parameter[name[msm].all_transmats_]]] begin[:]
call[name[commits]][tuple[[<ast.Name object at 0x7da1b07f6500>, <ast.Slice object at 0x7da1b07f5bd0>]]] assign[=] call[name[_committors], parameter[name[sources], name[sinks], name[tprob]]]
return[call[name[np].median, parameter[name[commits]]]]
return[call[name[_committors], parameter[name[sources], name[sinks], name[msm].transmat_]]] | keyword[def] identifier[committors] ( identifier[sources] , identifier[sinks] , identifier[msm] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[msm] , literal[string] ):
identifier[commits] = identifier[np] . identifier[zeros] ( identifier[msm] . identifier[all_transmats_] . identifier[shape] [: literal[int] ])
keyword[for] identifier[i] , identifier[tprob] keyword[in] identifier[enumerate] ( identifier[msm] . identifier[all_transmats_] ):
identifier[commits] [ identifier[i] ,:]= identifier[_committors] ( identifier[sources] , identifier[sinks] , identifier[tprob] )
keyword[return] identifier[np] . identifier[median] ( identifier[commits] , identifier[axis] = literal[int] )
keyword[return] identifier[_committors] ( identifier[sources] , identifier[sinks] , identifier[msm] . identifier[transmat_] ) | def committors(sources, sinks, msm):
"""
Get the forward committors of the reaction sources -> sinks.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
msm : msmbuilder.MarkovStateModel
MSM fit to the data.
Returns
-------
forward_committors : np.ndarray
The forward committors for the reaction sources -> sinks
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016.
"""
if hasattr(msm, 'all_transmats_'):
commits = np.zeros(msm.all_transmats_.shape[:2])
for (i, tprob) in enumerate(msm.all_transmats_):
commits[i, :] = _committors(sources, sinks, tprob) # depends on [control=['for'], data=[]]
return np.median(commits, axis=0) # depends on [control=['if'], data=[]]
return _committors(sources, sinks, msm.transmat_) |
def parse(self, scope, error=False, depth=0):
""" Parse function. We search for mixins
first within current scope then fallback
to global scope. The special scope.deferred
is used when local scope mixins are called
within parent mixins.
If nothing is found we fallback to block-mixin
as lessc.js allows calls to blocks and mixins to
be inter-changable.
clx: This method is a HACK that stems from
poor design elsewhere. I will fix it
when I have more time.
args:
scope (Scope): Current scope
returns:
mixed
"""
res = False
ident, args = self.tokens
ident.parse(scope)
mixins = scope.mixins(ident.raw())
if not mixins:
ident.parse(None)
mixins = scope.mixins(ident.raw())
if depth > 64:
raise SyntaxError('NameError `%s`' % ident.raw(True))
if not mixins:
if scope.deferred:
store = [t for t in scope.deferred.parsed[-1]]
i = 0
while scope.deferred.parsed[-1]:
scope.current = scope.deferred
ident.parse(scope)
mixins = scope.mixins(ident.raw())
scope.current = None
if mixins or i > 64:
break
scope.deferred.parsed[-1].pop()
i += 1
scope.deferred.parsed[-1] = store
if not mixins:
# Fallback to blocks
block = scope.blocks(ident.raw())
if not block:
ident.parse(None)
block = scope.blocks(ident.raw())
if block:
scope.current = scope.real[-1] if scope.real else None
res = block.copy_inner(scope)
scope.current = None
if mixins:
for mixin in mixins:
scope.current = scope.real[-1] if scope.real else None
res = mixin.call(scope, args)
if res:
# Add variables to scope to support
# closures
[scope.add_variable(v) for v in mixin.vars]
scope.deferred = ident
break
if res:
store = [t for t in scope.deferred.parsed[-1]
] if scope.deferred else False
tmp_res = []
for p in res:
if p:
if isinstance(p, Deferred):
tmp_res.append(p.parse(scope, depth=depth + 1))
else:
tmp_res.append(p.parse(scope))
res = tmp_res
#res = [p.parse(scope, depth=depth+1) for p in res if p]
while (any(t for t in res if isinstance(t, Deferred))):
res = [p.parse(scope) for p in res if p]
if store:
scope.deferred.parsed[-1] = store
if error and not res:
raise SyntaxError('NameError `%s`' % ident.raw(True))
return res | def function[parse, parameter[self, scope, error, depth]]:
constant[ Parse function. We search for mixins
first within current scope then fallback
to global scope. The special scope.deferred
is used when local scope mixins are called
within parent mixins.
If nothing is found we fallback to block-mixin
as lessc.js allows calls to blocks and mixins to
be inter-changable.
clx: This method is a HACK that stems from
poor design elsewhere. I will fix it
when I have more time.
args:
scope (Scope): Current scope
returns:
mixed
]
variable[res] assign[=] constant[False]
<ast.Tuple object at 0x7da1afff9030> assign[=] name[self].tokens
call[name[ident].parse, parameter[name[scope]]]
variable[mixins] assign[=] call[name[scope].mixins, parameter[call[name[ident].raw, parameter[]]]]
if <ast.UnaryOp object at 0x7da1afffafe0> begin[:]
call[name[ident].parse, parameter[constant[None]]]
variable[mixins] assign[=] call[name[scope].mixins, parameter[call[name[ident].raw, parameter[]]]]
if compare[name[depth] greater[>] constant[64]] begin[:]
<ast.Raise object at 0x7da1afff9b70>
if <ast.UnaryOp object at 0x7da1afff9e70> begin[:]
if name[scope].deferred begin[:]
variable[store] assign[=] <ast.ListComp object at 0x7da1afffa080>
variable[i] assign[=] constant[0]
while call[name[scope].deferred.parsed][<ast.UnaryOp object at 0x7da1afffa140>] begin[:]
name[scope].current assign[=] name[scope].deferred
call[name[ident].parse, parameter[name[scope]]]
variable[mixins] assign[=] call[name[scope].mixins, parameter[call[name[ident].raw, parameter[]]]]
name[scope].current assign[=] constant[None]
if <ast.BoolOp object at 0x7da1afffa710> begin[:]
break
call[call[name[scope].deferred.parsed][<ast.UnaryOp object at 0x7da1afffab90>].pop, parameter[]]
<ast.AugAssign object at 0x7da1afffac20>
call[name[scope].deferred.parsed][<ast.UnaryOp object at 0x7da1afffad70>] assign[=] name[store]
if <ast.UnaryOp object at 0x7da1afffae60> begin[:]
variable[block] assign[=] call[name[scope].blocks, parameter[call[name[ident].raw, parameter[]]]]
if <ast.UnaryOp object at 0x7da1afff9630> begin[:]
call[name[ident].parse, parameter[constant[None]]]
variable[block] assign[=] call[name[scope].blocks, parameter[call[name[ident].raw, parameter[]]]]
if name[block] begin[:]
name[scope].current assign[=] <ast.IfExp object at 0x7da1afff9990>
variable[res] assign[=] call[name[block].copy_inner, parameter[name[scope]]]
name[scope].current assign[=] constant[None]
if name[mixins] begin[:]
for taget[name[mixin]] in starred[name[mixins]] begin[:]
name[scope].current assign[=] <ast.IfExp object at 0x7da1afffb220>
variable[res] assign[=] call[name[mixin].call, parameter[name[scope], name[args]]]
if name[res] begin[:]
<ast.ListComp object at 0x7da1afffb760>
name[scope].deferred assign[=] name[ident]
break
if name[res] begin[:]
variable[store] assign[=] <ast.IfExp object at 0x7da1afffbb50>
variable[tmp_res] assign[=] list[[]]
for taget[name[p]] in starred[name[res]] begin[:]
if name[p] begin[:]
if call[name[isinstance], parameter[name[p], name[Deferred]]] begin[:]
call[name[tmp_res].append, parameter[call[name[p].parse, parameter[name[scope]]]]]
variable[res] assign[=] name[tmp_res]
while call[name[any], parameter[<ast.GeneratorExp object at 0x7da1afff8a60>]] begin[:]
variable[res] assign[=] <ast.ListComp object at 0x7da1aff6b250>
if name[store] begin[:]
call[name[scope].deferred.parsed][<ast.UnaryOp object at 0x7da1aff6af20>] assign[=] name[store]
if <ast.BoolOp object at 0x7da1aff6ae60> begin[:]
<ast.Raise object at 0x7da1aff6ada0>
return[name[res]] | keyword[def] identifier[parse] ( identifier[self] , identifier[scope] , identifier[error] = keyword[False] , identifier[depth] = literal[int] ):
literal[string]
identifier[res] = keyword[False]
identifier[ident] , identifier[args] = identifier[self] . identifier[tokens]
identifier[ident] . identifier[parse] ( identifier[scope] )
identifier[mixins] = identifier[scope] . identifier[mixins] ( identifier[ident] . identifier[raw] ())
keyword[if] keyword[not] identifier[mixins] :
identifier[ident] . identifier[parse] ( keyword[None] )
identifier[mixins] = identifier[scope] . identifier[mixins] ( identifier[ident] . identifier[raw] ())
keyword[if] identifier[depth] > literal[int] :
keyword[raise] identifier[SyntaxError] ( literal[string] % identifier[ident] . identifier[raw] ( keyword[True] ))
keyword[if] keyword[not] identifier[mixins] :
keyword[if] identifier[scope] . identifier[deferred] :
identifier[store] =[ identifier[t] keyword[for] identifier[t] keyword[in] identifier[scope] . identifier[deferred] . identifier[parsed] [- literal[int] ]]
identifier[i] = literal[int]
keyword[while] identifier[scope] . identifier[deferred] . identifier[parsed] [- literal[int] ]:
identifier[scope] . identifier[current] = identifier[scope] . identifier[deferred]
identifier[ident] . identifier[parse] ( identifier[scope] )
identifier[mixins] = identifier[scope] . identifier[mixins] ( identifier[ident] . identifier[raw] ())
identifier[scope] . identifier[current] = keyword[None]
keyword[if] identifier[mixins] keyword[or] identifier[i] > literal[int] :
keyword[break]
identifier[scope] . identifier[deferred] . identifier[parsed] [- literal[int] ]. identifier[pop] ()
identifier[i] += literal[int]
identifier[scope] . identifier[deferred] . identifier[parsed] [- literal[int] ]= identifier[store]
keyword[if] keyword[not] identifier[mixins] :
identifier[block] = identifier[scope] . identifier[blocks] ( identifier[ident] . identifier[raw] ())
keyword[if] keyword[not] identifier[block] :
identifier[ident] . identifier[parse] ( keyword[None] )
identifier[block] = identifier[scope] . identifier[blocks] ( identifier[ident] . identifier[raw] ())
keyword[if] identifier[block] :
identifier[scope] . identifier[current] = identifier[scope] . identifier[real] [- literal[int] ] keyword[if] identifier[scope] . identifier[real] keyword[else] keyword[None]
identifier[res] = identifier[block] . identifier[copy_inner] ( identifier[scope] )
identifier[scope] . identifier[current] = keyword[None]
keyword[if] identifier[mixins] :
keyword[for] identifier[mixin] keyword[in] identifier[mixins] :
identifier[scope] . identifier[current] = identifier[scope] . identifier[real] [- literal[int] ] keyword[if] identifier[scope] . identifier[real] keyword[else] keyword[None]
identifier[res] = identifier[mixin] . identifier[call] ( identifier[scope] , identifier[args] )
keyword[if] identifier[res] :
[ identifier[scope] . identifier[add_variable] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[mixin] . identifier[vars] ]
identifier[scope] . identifier[deferred] = identifier[ident]
keyword[break]
keyword[if] identifier[res] :
identifier[store] =[ identifier[t] keyword[for] identifier[t] keyword[in] identifier[scope] . identifier[deferred] . identifier[parsed] [- literal[int] ]
] keyword[if] identifier[scope] . identifier[deferred] keyword[else] keyword[False]
identifier[tmp_res] =[]
keyword[for] identifier[p] keyword[in] identifier[res] :
keyword[if] identifier[p] :
keyword[if] identifier[isinstance] ( identifier[p] , identifier[Deferred] ):
identifier[tmp_res] . identifier[append] ( identifier[p] . identifier[parse] ( identifier[scope] , identifier[depth] = identifier[depth] + literal[int] ))
keyword[else] :
identifier[tmp_res] . identifier[append] ( identifier[p] . identifier[parse] ( identifier[scope] ))
identifier[res] = identifier[tmp_res]
keyword[while] ( identifier[any] ( identifier[t] keyword[for] identifier[t] keyword[in] identifier[res] keyword[if] identifier[isinstance] ( identifier[t] , identifier[Deferred] ))):
identifier[res] =[ identifier[p] . identifier[parse] ( identifier[scope] ) keyword[for] identifier[p] keyword[in] identifier[res] keyword[if] identifier[p] ]
keyword[if] identifier[store] :
identifier[scope] . identifier[deferred] . identifier[parsed] [- literal[int] ]= identifier[store]
keyword[if] identifier[error] keyword[and] keyword[not] identifier[res] :
keyword[raise] identifier[SyntaxError] ( literal[string] % identifier[ident] . identifier[raw] ( keyword[True] ))
keyword[return] identifier[res] | def parse(self, scope, error=False, depth=0):
""" Parse function. We search for mixins
first within current scope then fallback
to global scope. The special scope.deferred
is used when local scope mixins are called
within parent mixins.
If nothing is found we fallback to block-mixin
as lessc.js allows calls to blocks and mixins to
be inter-changable.
clx: This method is a HACK that stems from
poor design elsewhere. I will fix it
when I have more time.
args:
scope (Scope): Current scope
returns:
mixed
"""
res = False
(ident, args) = self.tokens
ident.parse(scope)
mixins = scope.mixins(ident.raw())
if not mixins:
ident.parse(None)
mixins = scope.mixins(ident.raw()) # depends on [control=['if'], data=[]]
if depth > 64:
raise SyntaxError('NameError `%s`' % ident.raw(True)) # depends on [control=['if'], data=[]]
if not mixins:
if scope.deferred:
store = [t for t in scope.deferred.parsed[-1]]
i = 0
while scope.deferred.parsed[-1]:
scope.current = scope.deferred
ident.parse(scope)
mixins = scope.mixins(ident.raw())
scope.current = None
if mixins or i > 64:
break # depends on [control=['if'], data=[]]
scope.deferred.parsed[-1].pop()
i += 1 # depends on [control=['while'], data=[]]
scope.deferred.parsed[-1] = store # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not mixins:
# Fallback to blocks
block = scope.blocks(ident.raw())
if not block:
ident.parse(None)
block = scope.blocks(ident.raw()) # depends on [control=['if'], data=[]]
if block:
scope.current = scope.real[-1] if scope.real else None
res = block.copy_inner(scope)
scope.current = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if mixins:
for mixin in mixins:
scope.current = scope.real[-1] if scope.real else None
res = mixin.call(scope, args)
if res:
# Add variables to scope to support
# closures
[scope.add_variable(v) for v in mixin.vars]
scope.deferred = ident
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['mixin']] # depends on [control=['if'], data=[]]
if res:
store = [t for t in scope.deferred.parsed[-1]] if scope.deferred else False
tmp_res = []
for p in res:
if p:
if isinstance(p, Deferred):
tmp_res.append(p.parse(scope, depth=depth + 1)) # depends on [control=['if'], data=[]]
else:
tmp_res.append(p.parse(scope)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']]
res = tmp_res
#res = [p.parse(scope, depth=depth+1) for p in res if p]
while any((t for t in res if isinstance(t, Deferred))):
res = [p.parse(scope) for p in res if p] # depends on [control=['while'], data=[]]
if store:
scope.deferred.parsed[-1] = store # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if error and (not res):
raise SyntaxError('NameError `%s`' % ident.raw(True)) # depends on [control=['if'], data=[]]
return res |
def walk_up(self, depth):
"""
Returns a generator yielding all nodes in the tree at a specific depth,
or above. Yields nodes starting with leaves and traversing up to the
root.
:param depth:
An integer >= 0 of the depth of nodes to walk up from
:return:
A generator yielding PolicyTreeNode objects
"""
for child in list(self.children):
if depth != 0:
for grandchild in child.walk_up(depth - 1):
yield grandchild
yield child | def function[walk_up, parameter[self, depth]]:
constant[
Returns a generator yielding all nodes in the tree at a specific depth,
or above. Yields nodes starting with leaves and traversing up to the
root.
:param depth:
An integer >= 0 of the depth of nodes to walk up from
:return:
A generator yielding PolicyTreeNode objects
]
for taget[name[child]] in starred[call[name[list], parameter[name[self].children]]] begin[:]
if compare[name[depth] not_equal[!=] constant[0]] begin[:]
for taget[name[grandchild]] in starred[call[name[child].walk_up, parameter[binary_operation[name[depth] - constant[1]]]]] begin[:]
<ast.Yield object at 0x7da1b0d8d120>
<ast.Yield object at 0x7da1b0d8d750> | keyword[def] identifier[walk_up] ( identifier[self] , identifier[depth] ):
literal[string]
keyword[for] identifier[child] keyword[in] identifier[list] ( identifier[self] . identifier[children] ):
keyword[if] identifier[depth] != literal[int] :
keyword[for] identifier[grandchild] keyword[in] identifier[child] . identifier[walk_up] ( identifier[depth] - literal[int] ):
keyword[yield] identifier[grandchild]
keyword[yield] identifier[child] | def walk_up(self, depth):
"""
Returns a generator yielding all nodes in the tree at a specific depth,
or above. Yields nodes starting with leaves and traversing up to the
root.
:param depth:
An integer >= 0 of the depth of nodes to walk up from
:return:
A generator yielding PolicyTreeNode objects
"""
for child in list(self.children):
if depth != 0:
for grandchild in child.walk_up(depth - 1):
yield grandchild # depends on [control=['for'], data=['grandchild']] # depends on [control=['if'], data=['depth']]
yield child # depends on [control=['for'], data=['child']] |
def state(self) -> SessionState:
"""The state of the managed Spark session."""
if self.session_id is None:
raise ValueError("session not yet started")
session = self.client.get_session(self.session_id)
if session is None:
raise ValueError("session not found - it may have been shut down")
return session.state | def function[state, parameter[self]]:
constant[The state of the managed Spark session.]
if compare[name[self].session_id is constant[None]] begin[:]
<ast.Raise object at 0x7da1b26adc90>
variable[session] assign[=] call[name[self].client.get_session, parameter[name[self].session_id]]
if compare[name[session] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b26acb50>
return[name[session].state] | keyword[def] identifier[state] ( identifier[self] )-> identifier[SessionState] :
literal[string]
keyword[if] identifier[self] . identifier[session_id] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[session] = identifier[self] . identifier[client] . identifier[get_session] ( identifier[self] . identifier[session_id] )
keyword[if] identifier[session] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[session] . identifier[state] | def state(self) -> SessionState:
"""The state of the managed Spark session."""
if self.session_id is None:
raise ValueError('session not yet started') # depends on [control=['if'], data=[]]
session = self.client.get_session(self.session_id)
if session is None:
raise ValueError('session not found - it may have been shut down') # depends on [control=['if'], data=[]]
return session.state |
def release(self):
"""
Release the lock.
:returns: Returns ``True`` if the lock was released.
"""
unlocked = self.database.run_script(
'lock_release',
keys=[self.key, self.event],
args=[self._lock_id])
return unlocked != 0 | def function[release, parameter[self]]:
constant[
Release the lock.
:returns: Returns ``True`` if the lock was released.
]
variable[unlocked] assign[=] call[name[self].database.run_script, parameter[constant[lock_release]]]
return[compare[name[unlocked] not_equal[!=] constant[0]]] | keyword[def] identifier[release] ( identifier[self] ):
literal[string]
identifier[unlocked] = identifier[self] . identifier[database] . identifier[run_script] (
literal[string] ,
identifier[keys] =[ identifier[self] . identifier[key] , identifier[self] . identifier[event] ],
identifier[args] =[ identifier[self] . identifier[_lock_id] ])
keyword[return] identifier[unlocked] != literal[int] | def release(self):
"""
Release the lock.
:returns: Returns ``True`` if the lock was released.
"""
unlocked = self.database.run_script('lock_release', keys=[self.key, self.event], args=[self._lock_id])
return unlocked != 0 |
def dump_orm_tree_as_insert_sql(engine: Engine,
baseobj: object,
fileobj: TextIO) -> None:
"""
Sends an object, and all its relations (discovered via "relationship"
links) as ``INSERT`` commands in SQL, to ``fileobj``.
Args:
engine: SQLAlchemy :class:`Engine`
baseobj: starting SQLAlchemy ORM object
fileobj: file-like object to write to
Problem: foreign key constraints.
- MySQL/InnoDB doesn't wait to the end of a transaction to check FK
integrity (which it should):
http://stackoverflow.com/questions/5014700/in-mysql-can-i-defer-referential-integrity-checks-until-commit # noqa
- PostgreSQL can.
- Anyway, slightly ugly hacks...
https://dev.mysql.com/doc/refman/5.5/en/optimizing-innodb-bulk-data-loading.html
- Not so obvious how we can iterate through the list of ORM objects and
guarantee correct insertion order with respect to all FKs.
""" # noqa
writeline_nl(
fileobj,
sql_comment("Data for all objects related to the first below:"))
bulk_insert_extras(engine.dialect.name, fileobj, start=True)
for part in walk_orm_tree(baseobj):
dump_orm_object_as_insert_sql(engine, part, fileobj)
bulk_insert_extras(engine.dialect.name, fileobj, start=False) | def function[dump_orm_tree_as_insert_sql, parameter[engine, baseobj, fileobj]]:
constant[
Sends an object, and all its relations (discovered via "relationship"
links) as ``INSERT`` commands in SQL, to ``fileobj``.
Args:
engine: SQLAlchemy :class:`Engine`
baseobj: starting SQLAlchemy ORM object
fileobj: file-like object to write to
Problem: foreign key constraints.
- MySQL/InnoDB doesn't wait to the end of a transaction to check FK
integrity (which it should):
http://stackoverflow.com/questions/5014700/in-mysql-can-i-defer-referential-integrity-checks-until-commit # noqa
- PostgreSQL can.
- Anyway, slightly ugly hacks...
https://dev.mysql.com/doc/refman/5.5/en/optimizing-innodb-bulk-data-loading.html
- Not so obvious how we can iterate through the list of ORM objects and
guarantee correct insertion order with respect to all FKs.
]
call[name[writeline_nl], parameter[name[fileobj], call[name[sql_comment], parameter[constant[Data for all objects related to the first below:]]]]]
call[name[bulk_insert_extras], parameter[name[engine].dialect.name, name[fileobj]]]
for taget[name[part]] in starred[call[name[walk_orm_tree], parameter[name[baseobj]]]] begin[:]
call[name[dump_orm_object_as_insert_sql], parameter[name[engine], name[part], name[fileobj]]]
call[name[bulk_insert_extras], parameter[name[engine].dialect.name, name[fileobj]]] | keyword[def] identifier[dump_orm_tree_as_insert_sql] ( identifier[engine] : identifier[Engine] ,
identifier[baseobj] : identifier[object] ,
identifier[fileobj] : identifier[TextIO] )-> keyword[None] :
literal[string]
identifier[writeline_nl] (
identifier[fileobj] ,
identifier[sql_comment] ( literal[string] ))
identifier[bulk_insert_extras] ( identifier[engine] . identifier[dialect] . identifier[name] , identifier[fileobj] , identifier[start] = keyword[True] )
keyword[for] identifier[part] keyword[in] identifier[walk_orm_tree] ( identifier[baseobj] ):
identifier[dump_orm_object_as_insert_sql] ( identifier[engine] , identifier[part] , identifier[fileobj] )
identifier[bulk_insert_extras] ( identifier[engine] . identifier[dialect] . identifier[name] , identifier[fileobj] , identifier[start] = keyword[False] ) | def dump_orm_tree_as_insert_sql(engine: Engine, baseobj: object, fileobj: TextIO) -> None:
"""
Sends an object, and all its relations (discovered via "relationship"
links) as ``INSERT`` commands in SQL, to ``fileobj``.
Args:
engine: SQLAlchemy :class:`Engine`
baseobj: starting SQLAlchemy ORM object
fileobj: file-like object to write to
Problem: foreign key constraints.
- MySQL/InnoDB doesn't wait to the end of a transaction to check FK
integrity (which it should):
http://stackoverflow.com/questions/5014700/in-mysql-can-i-defer-referential-integrity-checks-until-commit # noqa
- PostgreSQL can.
- Anyway, slightly ugly hacks...
https://dev.mysql.com/doc/refman/5.5/en/optimizing-innodb-bulk-data-loading.html
- Not so obvious how we can iterate through the list of ORM objects and
guarantee correct insertion order with respect to all FKs.
""" # noqa
writeline_nl(fileobj, sql_comment('Data for all objects related to the first below:'))
bulk_insert_extras(engine.dialect.name, fileobj, start=True)
for part in walk_orm_tree(baseobj):
dump_orm_object_as_insert_sql(engine, part, fileobj) # depends on [control=['for'], data=['part']]
bulk_insert_extras(engine.dialect.name, fileobj, start=False) |
def update_value(self, name, value):
"""
Update value in the array of values.
:param name: Key of the value, str
:param value: New value, str
:return: None
"""
if name not in {'id', self.SERVICE_ENDPOINT, self.CONSUME_ENDPOINT, 'type'}:
self._values[name] = value | def function[update_value, parameter[self, name, value]]:
constant[
Update value in the array of values.
:param name: Key of the value, str
:param value: New value, str
:return: None
]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> <ast.Set object at 0x7da2043461a0>] begin[:]
call[name[self]._values][name[name]] assign[=] name[value] | keyword[def] identifier[update_value] ( identifier[self] , identifier[name] , identifier[value] ):
literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] { literal[string] , identifier[self] . identifier[SERVICE_ENDPOINT] , identifier[self] . identifier[CONSUME_ENDPOINT] , literal[string] }:
identifier[self] . identifier[_values] [ identifier[name] ]= identifier[value] | def update_value(self, name, value):
"""
Update value in the array of values.
:param name: Key of the value, str
:param value: New value, str
:return: None
"""
if name not in {'id', self.SERVICE_ENDPOINT, self.CONSUME_ENDPOINT, 'type'}:
self._values[name] = value # depends on [control=['if'], data=['name']] |
def iter_and_close(file_like, block_size):
"""Yield file contents by block then close the file."""
while 1:
try:
block = file_like.read(block_size)
if block:
yield block
else:
raise StopIteration
except StopIteration:
file_like.close()
return | def function[iter_and_close, parameter[file_like, block_size]]:
constant[Yield file contents by block then close the file.]
while constant[1] begin[:]
<ast.Try object at 0x7da1b24bb7c0> | keyword[def] identifier[iter_and_close] ( identifier[file_like] , identifier[block_size] ):
literal[string]
keyword[while] literal[int] :
keyword[try] :
identifier[block] = identifier[file_like] . identifier[read] ( identifier[block_size] )
keyword[if] identifier[block] :
keyword[yield] identifier[block]
keyword[else] :
keyword[raise] identifier[StopIteration]
keyword[except] identifier[StopIteration] :
identifier[file_like] . identifier[close] ()
keyword[return] | def iter_and_close(file_like, block_size):
"""Yield file contents by block then close the file."""
while 1:
try:
block = file_like.read(block_size)
if block:
yield block # depends on [control=['if'], data=[]]
else:
raise StopIteration # depends on [control=['try'], data=[]]
except StopIteration:
file_like.close()
return # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def is_wildcard_nm(nm):
"""Return true if the netmask is in wildcard bits notatation."""
try:
dec = 0xFFFFFFFF - _dot_to_dec(nm, check=True)
except ValueError:
return False
if dec in _NETMASKS_VALUES:
return True
return False | def function[is_wildcard_nm, parameter[nm]]:
constant[Return true if the netmask is in wildcard bits notatation.]
<ast.Try object at 0x7da1afe71ab0>
if compare[name[dec] in name[_NETMASKS_VALUES]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[is_wildcard_nm] ( identifier[nm] ):
literal[string]
keyword[try] :
identifier[dec] = literal[int] - identifier[_dot_to_dec] ( identifier[nm] , identifier[check] = keyword[True] )
keyword[except] identifier[ValueError] :
keyword[return] keyword[False]
keyword[if] identifier[dec] keyword[in] identifier[_NETMASKS_VALUES] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def is_wildcard_nm(nm):
"""Return true if the netmask is in wildcard bits notatation."""
try:
dec = 4294967295 - _dot_to_dec(nm, check=True) # depends on [control=['try'], data=[]]
except ValueError:
return False # depends on [control=['except'], data=[]]
if dec in _NETMASKS_VALUES:
return True # depends on [control=['if'], data=[]]
return False |
def uniprot_ec(uniprot_id):
"""Retrieve the EC number annotation for a UniProt ID.
Args:
uniprot_id: Valid UniProt ID
Returns:
"""
r = requests.post('http://www.uniprot.org/uniprot/?query=%s&columns=ec&format=tab' % uniprot_id)
ec = r.content.decode('utf-8').splitlines()[1]
if len(ec) == 0:
ec = None
return ec | def function[uniprot_ec, parameter[uniprot_id]]:
constant[Retrieve the EC number annotation for a UniProt ID.
Args:
uniprot_id: Valid UniProt ID
Returns:
]
variable[r] assign[=] call[name[requests].post, parameter[binary_operation[constant[http://www.uniprot.org/uniprot/?query=%s&columns=ec&format=tab] <ast.Mod object at 0x7da2590d6920> name[uniprot_id]]]]
variable[ec] assign[=] call[call[call[name[r].content.decode, parameter[constant[utf-8]]].splitlines, parameter[]]][constant[1]]
if compare[call[name[len], parameter[name[ec]]] equal[==] constant[0]] begin[:]
variable[ec] assign[=] constant[None]
return[name[ec]] | keyword[def] identifier[uniprot_ec] ( identifier[uniprot_id] ):
literal[string]
identifier[r] = identifier[requests] . identifier[post] ( literal[string] % identifier[uniprot_id] )
identifier[ec] = identifier[r] . identifier[content] . identifier[decode] ( literal[string] ). identifier[splitlines] ()[ literal[int] ]
keyword[if] identifier[len] ( identifier[ec] )== literal[int] :
identifier[ec] = keyword[None]
keyword[return] identifier[ec] | def uniprot_ec(uniprot_id):
"""Retrieve the EC number annotation for a UniProt ID.
Args:
uniprot_id: Valid UniProt ID
Returns:
"""
r = requests.post('http://www.uniprot.org/uniprot/?query=%s&columns=ec&format=tab' % uniprot_id)
ec = r.content.decode('utf-8').splitlines()[1]
if len(ec) == 0:
ec = None # depends on [control=['if'], data=[]]
return ec |
def xformers(sig):
"""
Get the list of xformer functions for the given signature.
:param str sig: a signature
:returns: a list of xformer functions for the given signature.
:rtype: list of tuple of a function * str
Each function catches all TypeErrors it encounters and raises
corresponding IntoDPValueError exceptions.
"""
return \
[(_wrapper(f), l) for (f, l) in \
_XFORMER.PARSER.parseString(sig, parseAll=True)] | def function[xformers, parameter[sig]]:
constant[
Get the list of xformer functions for the given signature.
:param str sig: a signature
:returns: a list of xformer functions for the given signature.
:rtype: list of tuple of a function * str
Each function catches all TypeErrors it encounters and raises
corresponding IntoDPValueError exceptions.
]
return[<ast.ListComp object at 0x7da18dc054e0>] | keyword[def] identifier[xformers] ( identifier[sig] ):
literal[string]
keyword[return] [( identifier[_wrapper] ( identifier[f] ), identifier[l] ) keyword[for] ( identifier[f] , identifier[l] ) keyword[in] identifier[_XFORMER] . identifier[PARSER] . identifier[parseString] ( identifier[sig] , identifier[parseAll] = keyword[True] )] | def xformers(sig):
"""
Get the list of xformer functions for the given signature.
:param str sig: a signature
:returns: a list of xformer functions for the given signature.
:rtype: list of tuple of a function * str
Each function catches all TypeErrors it encounters and raises
corresponding IntoDPValueError exceptions.
"""
return [(_wrapper(f), l) for (f, l) in _XFORMER.PARSER.parseString(sig, parseAll=True)] |
def select_by_visible_text(self, text):
"""
Performs search of selected item from Web List
@params text - string visible text
"""
xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text))
opts = self.find_elements_by_xpath(xpath)
matched = False
for opt in opts:
self._set_selected(opt)
if not self.is_multiple:
return
matched = True
# in case the target option isn't found by xpath
# attempt to find it by direct comparison among options which contain at least the longest token from the text
if len(opts) == 0 and ' ' in text:
sub_string_without_space = self._get_longest_token(text)
if sub_string_without_space == "":
candidates = self.get_options()
else:
xpath = ".//option[contains(.,{0})]".format(self._escape_string(sub_string_without_space))
candidates = self.find_elements_by_xpath(xpath)
for candidate in candidates:
if text == candidate.text:
self._set_selected(candidate)
if not self.is_multiple:
return
matched = True
if not matched:
raise NoSuchElementException("Could not locate element with visible text: " + str(text)) | def function[select_by_visible_text, parameter[self, text]]:
constant[
Performs search of selected item from Web List
@params text - string visible text
]
variable[xpath] assign[=] call[constant[.//option[normalize-space(.) = {0}]].format, parameter[call[name[self]._escape_string, parameter[name[text]]]]]
variable[opts] assign[=] call[name[self].find_elements_by_xpath, parameter[name[xpath]]]
variable[matched] assign[=] constant[False]
for taget[name[opt]] in starred[name[opts]] begin[:]
call[name[self]._set_selected, parameter[name[opt]]]
if <ast.UnaryOp object at 0x7da18eb57eb0> begin[:]
return[None]
variable[matched] assign[=] constant[True]
if <ast.BoolOp object at 0x7da18eb55810> begin[:]
variable[sub_string_without_space] assign[=] call[name[self]._get_longest_token, parameter[name[text]]]
if compare[name[sub_string_without_space] equal[==] constant[]] begin[:]
variable[candidates] assign[=] call[name[self].get_options, parameter[]]
for taget[name[candidate]] in starred[name[candidates]] begin[:]
if compare[name[text] equal[==] name[candidate].text] begin[:]
call[name[self]._set_selected, parameter[name[candidate]]]
if <ast.UnaryOp object at 0x7da18eb55570> begin[:]
return[None]
variable[matched] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da18eb549d0> begin[:]
<ast.Raise object at 0x7da18eb55210> | keyword[def] identifier[select_by_visible_text] ( identifier[self] , identifier[text] ):
literal[string]
identifier[xpath] = literal[string] . identifier[format] ( identifier[self] . identifier[_escape_string] ( identifier[text] ))
identifier[opts] = identifier[self] . identifier[find_elements_by_xpath] ( identifier[xpath] )
identifier[matched] = keyword[False]
keyword[for] identifier[opt] keyword[in] identifier[opts] :
identifier[self] . identifier[_set_selected] ( identifier[opt] )
keyword[if] keyword[not] identifier[self] . identifier[is_multiple] :
keyword[return]
identifier[matched] = keyword[True]
keyword[if] identifier[len] ( identifier[opts] )== literal[int] keyword[and] literal[string] keyword[in] identifier[text] :
identifier[sub_string_without_space] = identifier[self] . identifier[_get_longest_token] ( identifier[text] )
keyword[if] identifier[sub_string_without_space] == literal[string] :
identifier[candidates] = identifier[self] . identifier[get_options] ()
keyword[else] :
identifier[xpath] = literal[string] . identifier[format] ( identifier[self] . identifier[_escape_string] ( identifier[sub_string_without_space] ))
identifier[candidates] = identifier[self] . identifier[find_elements_by_xpath] ( identifier[xpath] )
keyword[for] identifier[candidate] keyword[in] identifier[candidates] :
keyword[if] identifier[text] == identifier[candidate] . identifier[text] :
identifier[self] . identifier[_set_selected] ( identifier[candidate] )
keyword[if] keyword[not] identifier[self] . identifier[is_multiple] :
keyword[return]
identifier[matched] = keyword[True]
keyword[if] keyword[not] identifier[matched] :
keyword[raise] identifier[NoSuchElementException] ( literal[string] + identifier[str] ( identifier[text] )) | def select_by_visible_text(self, text):
"""
Performs search of selected item from Web List
@params text - string visible text
"""
xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text))
opts = self.find_elements_by_xpath(xpath)
matched = False
for opt in opts:
self._set_selected(opt)
if not self.is_multiple:
return # depends on [control=['if'], data=[]]
matched = True # depends on [control=['for'], data=['opt']]
# in case the target option isn't found by xpath
# attempt to find it by direct comparison among options which contain at least the longest token from the text
if len(opts) == 0 and ' ' in text:
sub_string_without_space = self._get_longest_token(text)
if sub_string_without_space == '':
candidates = self.get_options() # depends on [control=['if'], data=[]]
else:
xpath = './/option[contains(.,{0})]'.format(self._escape_string(sub_string_without_space))
candidates = self.find_elements_by_xpath(xpath)
for candidate in candidates:
if text == candidate.text:
self._set_selected(candidate)
if not self.is_multiple:
return # depends on [control=['if'], data=[]]
matched = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['candidate']] # depends on [control=['if'], data=[]]
if not matched:
raise NoSuchElementException('Could not locate element with visible text: ' + str(text)) # depends on [control=['if'], data=[]] |
def xrange(self, name, min='-', max='+', count=None):
"""
Read stream values within an interval.
name: name of the stream.
start: first stream ID. defaults to '-',
meaning the earliest available.
finish: last stream ID. defaults to '+',
meaning the latest available.
count: if set, only return this many items, beginning with the
earliest available.
"""
pieces = [min, max]
if count is not None:
if not isinstance(count, (int, long)) or count < 1:
raise DataError('XRANGE count must be a positive integer')
pieces.append(Token.get_token('COUNT'))
pieces.append(str(count))
return self.execute_command('XRANGE', name, *pieces) | def function[xrange, parameter[self, name, min, max, count]]:
constant[
Read stream values within an interval.
name: name of the stream.
start: first stream ID. defaults to '-',
meaning the earliest available.
finish: last stream ID. defaults to '+',
meaning the latest available.
count: if set, only return this many items, beginning with the
earliest available.
]
variable[pieces] assign[=] list[[<ast.Name object at 0x7da1b1c7a770>, <ast.Name object at 0x7da1b1c7a050>]]
if compare[name[count] is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da1b1c78f10> begin[:]
<ast.Raise object at 0x7da1b1c7a8f0>
call[name[pieces].append, parameter[call[name[Token].get_token, parameter[constant[COUNT]]]]]
call[name[pieces].append, parameter[call[name[str], parameter[name[count]]]]]
return[call[name[self].execute_command, parameter[constant[XRANGE], name[name], <ast.Starred object at 0x7da1b26ad270>]]] | keyword[def] identifier[xrange] ( identifier[self] , identifier[name] , identifier[min] = literal[string] , identifier[max] = literal[string] , identifier[count] = keyword[None] ):
literal[string]
identifier[pieces] =[ identifier[min] , identifier[max] ]
keyword[if] identifier[count] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[count] ,( identifier[int] , identifier[long] )) keyword[or] identifier[count] < literal[int] :
keyword[raise] identifier[DataError] ( literal[string] )
identifier[pieces] . identifier[append] ( identifier[Token] . identifier[get_token] ( literal[string] ))
identifier[pieces] . identifier[append] ( identifier[str] ( identifier[count] ))
keyword[return] identifier[self] . identifier[execute_command] ( literal[string] , identifier[name] ,* identifier[pieces] ) | def xrange(self, name, min='-', max='+', count=None):
"""
Read stream values within an interval.
name: name of the stream.
start: first stream ID. defaults to '-',
meaning the earliest available.
finish: last stream ID. defaults to '+',
meaning the latest available.
count: if set, only return this many items, beginning with the
earliest available.
"""
pieces = [min, max]
if count is not None:
if not isinstance(count, (int, long)) or count < 1:
raise DataError('XRANGE count must be a positive integer') # depends on [control=['if'], data=[]]
pieces.append(Token.get_token('COUNT'))
pieces.append(str(count)) # depends on [control=['if'], data=['count']]
return self.execute_command('XRANGE', name, *pieces) |
def popup(self):
""" Show the notification from code. This will initialize and activate
if needed.
Notes
------
This does NOT block. Callbacks should be used to handle click events
or the `show` state should be observed to know when it is closed.
"""
if not self.is_initialized:
self.initialize()
if not self.proxy_is_active:
self.activate_proxy()
self.show = True | def function[popup, parameter[self]]:
constant[ Show the notification from code. This will initialize and activate
if needed.
Notes
------
This does NOT block. Callbacks should be used to handle click events
or the `show` state should be observed to know when it is closed.
]
if <ast.UnaryOp object at 0x7da1b1b9d090> begin[:]
call[name[self].initialize, parameter[]]
if <ast.UnaryOp object at 0x7da1b1b9dd80> begin[:]
call[name[self].activate_proxy, parameter[]]
name[self].show assign[=] constant[True] | keyword[def] identifier[popup] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_initialized] :
identifier[self] . identifier[initialize] ()
keyword[if] keyword[not] identifier[self] . identifier[proxy_is_active] :
identifier[self] . identifier[activate_proxy] ()
identifier[self] . identifier[show] = keyword[True] | def popup(self):
""" Show the notification from code. This will initialize and activate
if needed.
Notes
------
This does NOT block. Callbacks should be used to handle click events
or the `show` state should be observed to know when it is closed.
"""
if not self.is_initialized:
self.initialize() # depends on [control=['if'], data=[]]
if not self.proxy_is_active:
self.activate_proxy() # depends on [control=['if'], data=[]]
self.show = True |
def sendline(command, method='cli_show_ascii', **kwargs):
'''
Send arbitray commands to the NX-OS device.
command
The command to be sent.
method:
``cli_show_ascii``: Return raw test or unstructured output.
``cli_show``: Return structured output.
``cli_conf``: Send configuration commands to the device.
Defaults to ``cli_show_ascii``.
NOTE: method is ignored for SSH proxy minion. All data is returned
unstructured.
.. code-block: bash
salt '*' nxos.cmd sendline 'show run | include "^username admin password"'
'''
smethods = ['cli_show_ascii', 'cli_show', 'cli_conf']
if method not in smethods:
msg = """
INPUT ERROR: Second argument 'method' must be one of {0}
Value passed: {1}
Hint: White space separated commands should be wrapped by double quotes
""".format(smethods, method)
return msg
if salt.utils.platform.is_proxy():
return __proxy__['nxos.sendline'](command, method, **kwargs)
else:
return _nxapi_request(command, method, **kwargs) | def function[sendline, parameter[command, method]]:
constant[
Send arbitray commands to the NX-OS device.
command
The command to be sent.
method:
``cli_show_ascii``: Return raw test or unstructured output.
``cli_show``: Return structured output.
``cli_conf``: Send configuration commands to the device.
Defaults to ``cli_show_ascii``.
NOTE: method is ignored for SSH proxy minion. All data is returned
unstructured.
.. code-block: bash
salt '*' nxos.cmd sendline 'show run | include "^username admin password"'
]
variable[smethods] assign[=] list[[<ast.Constant object at 0x7da1b2035690>, <ast.Constant object at 0x7da1b20347f0>, <ast.Constant object at 0x7da1b2037d30>]]
if compare[name[method] <ast.NotIn object at 0x7da2590d7190> name[smethods]] begin[:]
variable[msg] assign[=] call[constant[
INPUT ERROR: Second argument 'method' must be one of {0}
Value passed: {1}
Hint: White space separated commands should be wrapped by double quotes
].format, parameter[name[smethods], name[method]]]
return[name[msg]]
if call[name[salt].utils.platform.is_proxy, parameter[]] begin[:]
return[call[call[name[__proxy__]][constant[nxos.sendline]], parameter[name[command], name[method]]]] | keyword[def] identifier[sendline] ( identifier[command] , identifier[method] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[smethods] =[ literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[method] keyword[not] keyword[in] identifier[smethods] :
identifier[msg] = literal[string] . identifier[format] ( identifier[smethods] , identifier[method] )
keyword[return] identifier[msg]
keyword[if] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_proxy] ():
keyword[return] identifier[__proxy__] [ literal[string] ]( identifier[command] , identifier[method] ,** identifier[kwargs] )
keyword[else] :
keyword[return] identifier[_nxapi_request] ( identifier[command] , identifier[method] ,** identifier[kwargs] ) | def sendline(command, method='cli_show_ascii', **kwargs):
"""
Send arbitray commands to the NX-OS device.
command
The command to be sent.
method:
``cli_show_ascii``: Return raw test or unstructured output.
``cli_show``: Return structured output.
``cli_conf``: Send configuration commands to the device.
Defaults to ``cli_show_ascii``.
NOTE: method is ignored for SSH proxy minion. All data is returned
unstructured.
.. code-block: bash
salt '*' nxos.cmd sendline 'show run | include "^username admin password"'
"""
smethods = ['cli_show_ascii', 'cli_show', 'cli_conf']
if method not in smethods:
msg = "\n INPUT ERROR: Second argument 'method' must be one of {0}\n Value passed: {1}\n Hint: White space separated commands should be wrapped by double quotes\n ".format(smethods, method)
return msg # depends on [control=['if'], data=['method', 'smethods']]
if salt.utils.platform.is_proxy():
return __proxy__['nxos.sendline'](command, method, **kwargs) # depends on [control=['if'], data=[]]
else:
return _nxapi_request(command, method, **kwargs) |
def _2g_bin(dir_path=".", mag_file="", meas_file='measurements.txt',
spec_file="specimens.txt", samp_file="samples.txt", site_file="sites.txt",
loc_file="locations.txt", or_con='3', specnum=0, samp_con='2', corr='1',
gmeths="FS-FD:SO-POM", location="unknown", inst="", user="", noave=False, input_dir="",
lat="", lon=""):
"""
Convert 2G binary format file to MagIC file(s)
Parameters
----------
dir_path : str
output directory, default "."
mag_file : str
input file name
meas_file : str
output measurement file name, default "measurements.txt"
spec_file : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
site_file : str
output site file name, default "sites.txt"
loc_file : str
output location file name, default "locations.txt"
or_con : number
orientation convention, default '3', see info below
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '2', see info below
corr: str
default '1'
gmeths : str
sampling method codes, default "FS-FD:SO-POM", see info below
location : str
location name, default "unknown"
inst : str
instrument, default ""
user : str
user name, default ""
noave : bool
do not average duplicate measurements, default False (so by default, DO average)
input_dir : str
input file directory IF different from dir_path, default ""
lat : float
latitude, default ""
lon : float
longitude, default ""
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
Info
----------
Orientation convention:
[1] Lab arrow azimuth= mag_azimuth; Lab arrow dip=-field_dip
i.e., field_dip is degrees from vertical down - the hade [default]
[2] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = -field_dip
i.e., mag_azimuth is strike and field_dip is hade
[3] Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
i.e., lab arrow same as field arrow, but field_dip was a hade.
[4] lab azimuth and dip are same as mag_azimuth, field_dip
[5] lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] all others you will have to either customize your
self or e-mail ltauxe@ucsd.edu for help.
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
Sampling method codes:
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
SO-SUN orientation with sun compass
"""
def skip(N, ind, L):
for b in range(N):
ind += 1
while L[ind] == "":
ind += 1
ind += 1
while L[ind] == "":
ind += 1
return ind
#
# initialize variables
#
bed_dip, bed_dip_dir = "", ""
sclass, lithology, _type = "", "", ""
DecCorr = 0.
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# format and fix variables
specnum = int(specnum)
specnum = -specnum
input_dir_path, dir_path = pmag.fix_directories(input_dir, dir_path)
if samp_con:
Z = 1
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "option [4] must be in form 4-Z where Z is an integer"
else:
Z = samp_con.split("-")[1]
samp_con = "4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "option [7] must be in form 7-Z where Z is an integer"
else:
Z = samp_con.split("-")[1]
samp_con = "7"
if "6" in samp_con:
print('Naming convention option [6] not currently supported')
return False, 'Naming convention option [6] not currently supported'
# Z=1
# try:
# SampRecs,file_type=pmag.magic_read(os.path.join(input_dir_path, 'er_samples.txt'))
# except:
# print("there is no er_samples.txt file in your input directory - you can't use naming convention #6")
# return False, "there is no er_samples.txt file in your input directory - you can't use naming convention #6"
# if file_type == 'bad_file':
# print("there is no er_samples.txt file in your input directory - you can't use naming convention #6")
# return False, "there is no er_samples.txt file in your input directory - you can't use naming convention #6"
# else: Z=1
if not mag_file:
print("mag file is required input")
return False, "mag file is required input"
output_dir_path = dir_path
mag_file = pmag.resolve_file_name(mag_file, input_dir_path)
samplist = []
try:
SampRecs, file_type = pmag.magic_read(samp_file)
except:
SampRecs = []
MeasRecs, SpecRecs, SiteRecs, LocRecs = [], [], [], []
try:
f = open(mag_file, 'br')
input = str(f.read()).strip("b '")
f.close()
except Exception as ex:
print('ex', ex)
print("bad mag file")
return False, "bad mag file"
firstline, date = 1, ""
d = input.split('\\xcd')
for line in d:
rec = line.split('\\x00')
# skip nearly empty lines
rec_not_null = [i for i in rec if i]
if len(rec_not_null) < 5:
continue
if firstline == 1:
firstline = 0
spec, vol = "", 1
el = 51
#while line[el:el+1] != "\\":
# spec = spec+line[el]
# el += 1
spec = rec[7]
# check for bad sample name
test = spec.split('.')
date = ""
if len(test) > 1:
spec = test[0]
kk = 24
while line[kk] != '\\x01' and line[kk] != '\\x00':
kk += 1
vcc = line[24:kk]
el = 10
while rec[el].strip() != '':
el += 1
date, comments = rec[el+7], []
else:
el = 9
while rec[el] != '\\x01':
el += 1
vcc, date, comments = rec[el-3], rec[el+7], []
specname = spec.lower()
print('importing ', specname)
el += 8
while rec[el].isdigit() == False:
comments.append(rec[el])
el += 1
while rec[el] == "":
el += 1
az = float(rec[el])
el += 1
while rec[el] == "":
el += 1
pl = float(rec[el])
el += 1
while rec[el] == "":
el += 1
bed_dip_dir = float(rec[el])
el += 1
while rec[el] == "":
el += 1
bed_dip = float(rec[el])
el += 1
while rec[el] == "":
el += 1
if rec[el] == '\\x01':
bed_dip = 180.-bed_dip
el += 1
while rec[el] == "":
el += 1
fold_az = float(rec[el])
el += 1
while rec[el] == "":
el += 1
fold_pl = rec[el]
el += 1
while rec[el] == "":
el += 1
if rec[el] != "" and rec[el] != '\\x02' and rec[el] != '\\x01':
deccorr = float(rec[el])
az += deccorr
bed_dip_dir += deccorr
fold_az += deccorr
if bed_dip_dir >= 360:
bed_dip_dir = bed_dip_dir-360.
if az >= 360.:
az = az-360.
if fold_az >= 360.:
fold_az = fold_az-360.
else:
deccorr = 0
if specnum != 0:
sample = specname[:specnum]
else:
sample = specname
methods = gmeths.split(':')
if deccorr != "0":
if 'SO-MAG' in methods:
del methods[methods.index('SO-MAG')]
methods.append('SO-CMD-NORTH')
meths = reduce(lambda x, y: x+':'+y, methods)
method_codes = meths
# parse out the site name
site = pmag.parse_site(sample, samp_con, Z)
SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}
SpecRec["specimen"] = specname
SpecRec["sample"] = sample
if vcc.strip() != "":
vol = float(vcc)*1e-6 # convert to m^3 from cc
SpecRec["volume"] = '%10.3e' % (vol)
SpecRec["geologic_classes"] = sclass
SpecRec["lithologies"] = lithology
SpecRec["geologic_types"] = _type
SpecRecs.append(SpecRec)
if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]:
SampRec["sample"] = sample
SampRec["site"] = site
# convert to labaz, labpl
labaz, labdip = pmag.orient(az, pl, or_con)
SampRec["bed_dip"] = '%7.1f' % (bed_dip)
SampRec["bed_dip_direction"] = '%7.1f' % (bed_dip_dir)
SampRec["dip"] = '%7.1f' % (labdip)
SampRec["azimuth"] = '%7.1f' % (labaz)
SampRec["azimuth_dec_correction"] = '%7.1f' % (deccorr)
SampRec["geologic_classes"] = sclass
SampRec["lithologies"] = lithology
SampRec["geologic_types"] = _type
SampRec["method_codes"] = method_codes
SampRecs.append(SampRec)
if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]:
SiteRec['site'] = site
SiteRec['location'] = location
SiteRec['lat'] = lat
SiteRec['lon'] = lon
SiteRec["geologic_classes"] = sclass
SiteRec["lithologies"] = lithology
SiteRec["geologic_types"] = _type
SiteRecs.append(SiteRec)
if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]:
LocRec['location'] = location
LocRec['lat_n'] = lat
LocRec['lon_e'] = lon
LocRec['lat_s'] = lat
LocRec['lon_w'] = lon
# LocRec["geologic_classes"]=sclass
# LocRec["lithologies"]=lithology
# LocRec["geologic_types"]=_type
LocRecs.append(LocRec)
else:
MeasRec = {}
MeasRec["treat_temp"] = '%8.3e' % (273) # room temp in kelvin
MeasRec["meas_temp"] = '%8.3e' % (273) # room temp in kelvin
MeasRec["treat_ac_field"] = '0'
MeasRec["treat_dc_field"] = '0'
MeasRec["treat_dc_field_phi"] = '0'
MeasRec["treat_dc_field_theta"] = '0'
meas_type = "LT-NO"
MeasRec["quality"] = 'g'
MeasRec["standard"] = 'u'
MeasRec["treat_step_num"] = 0
MeasRec["specimen"] = specname
el, demag = 1, ''
treat = rec[el]
if treat[-1] == 'C':
demag = 'T'
elif treat != 'NRM':
demag = 'AF'
el += 1
while rec[el] == "":
el += 1
MeasRec["dir_dec"] = rec[el]
cdec = float(rec[el])
el += 1
while rec[el] == "":
el += 1
MeasRec["dir_inc"] = rec[el]
cinc = float(rec[el])
el += 1
while rec[el] == "":
el += 1
gdec = rec[el]
el += 1
while rec[el] == "":
el += 1
ginc = rec[el]
el = skip(2, el, rec) # skip bdec,binc
# el=skip(4,el,rec) # skip gdec,ginc,bdec,binc
# print 'moment emu: ',rec[el]
MeasRec["magn_moment"] = '%10.3e' % (
float(rec[el])*1e-3) # moment in Am^2 (from emu)
MeasRec["magn_volume"] = '%10.3e' % (
float(rec[el])*1e-3/vol) # magnetization in A/m
el = skip(2, el, rec) # skip to xsig
MeasRec["magn_x_sigma"] = '%10.3e' % (
float(rec[el])*1e-3) # convert from emu
el = skip(3, el, rec) # skip to ysig
MeasRec["magn_y_sigma"] = '%10.3e' % (
float(rec[el])*1e-3) # convert from emu
el = skip(3, el, rec) # skip to zsig
MeasRec["magn_z_sigma"] = '%10.3e' % (
float(rec[el])*1e-3) # convert from emu
el += 1 # skip to positions
MeasRec["meas_n_orient"] = rec[el]
# el=skip(5,el,rec) # skip to date
# mm=str(months.index(date[0]))
# if len(mm)==1:
# mm='0'+str(mm)
# else:
# mm=str(mm)
# dstring=date[2]+':'+mm+':'+date[1]+":"+date[3]
# MeasRec['measurement_date']=dstring
MeasRec["instrument_codes"] = inst
MeasRec["analysts"] = user
MeasRec["citations"] = "This study"
MeasRec["method_codes"] = meas_type
if demag == "AF":
MeasRec["treat_ac_field"] = '%8.3e' % (
float(treat[:-2])*1e-3) # peak field in tesla
meas_type = "LT-AF-Z"
MeasRec["treat_dc_field"] = '0'
elif demag == "T":
MeasRec["treat_temp"] = '%8.3e' % (
float(treat[:-1])+273.) # temp in kelvin
meas_type = "LT-T-Z"
MeasRec['method_codes'] = meas_type
MeasRecs.append(MeasRec)
con = cb.Contribution(output_dir_path, read_tables=[])
con.add_magic_table_from_data(dtype='specimens', data=SpecRecs)
con.add_magic_table_from_data(dtype='samples', data=SampRecs)
con.add_magic_table_from_data(dtype='sites', data=SiteRecs)
con.add_magic_table_from_data(dtype='locations', data=LocRecs)
MeasOuts = pmag.measurements_methods3(MeasRecs, noave)
con.add_magic_table_from_data(dtype='measurements', data=MeasOuts)
con.write_table_to_file('specimens', custom_name=spec_file)
con.write_table_to_file('samples', custom_name=samp_file)
con.write_table_to_file('sites', custom_name=site_file)
con.write_table_to_file('locations', custom_name=loc_file)
con.write_table_to_file('measurements', custom_name=meas_file)
return True, meas_file | def function[_2g_bin, parameter[dir_path, mag_file, meas_file, spec_file, samp_file, site_file, loc_file, or_con, specnum, samp_con, corr, gmeths, location, inst, user, noave, input_dir, lat, lon]]:
constant[
Convert 2G binary format file to MagIC file(s)
Parameters
----------
dir_path : str
output directory, default "."
mag_file : str
input file name
meas_file : str
output measurement file name, default "measurements.txt"
spec_file : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
site_file : str
output site file name, default "sites.txt"
loc_file : str
output location file name, default "locations.txt"
or_con : number
orientation convention, default '3', see info below
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '2', see info below
corr: str
default '1'
gmeths : str
sampling method codes, default "FS-FD:SO-POM", see info below
location : str
location name, default "unknown"
inst : str
instrument, default ""
user : str
user name, default ""
noave : bool
do not average duplicate measurements, default False (so by default, DO average)
input_dir : str
input file directory IF different from dir_path, default ""
lat : float
latitude, default ""
lon : float
longitude, default ""
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
Info
----------
Orientation convention:
[1] Lab arrow azimuth= mag_azimuth; Lab arrow dip=-field_dip
i.e., field_dip is degrees from vertical down - the hade [default]
[2] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = -field_dip
i.e., mag_azimuth is strike and field_dip is hade
[3] Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
i.e., lab arrow same as field arrow, but field_dip was a hade.
[4] lab azimuth and dip are same as mag_azimuth, field_dip
[5] lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] all others you will have to either customize your
self or e-mail ltauxe@ucsd.edu for help.
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
Sampling method codes:
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
SO-SUN orientation with sun compass
]
def function[skip, parameter[N, ind, L]]:
for taget[name[b]] in starred[call[name[range], parameter[name[N]]]] begin[:]
<ast.AugAssign object at 0x7da1b027ae60>
while compare[call[name[L]][name[ind]] equal[==] constant[]] begin[:]
<ast.AugAssign object at 0x7da1b027acb0>
<ast.AugAssign object at 0x7da1b027ac20>
while compare[call[name[L]][name[ind]] equal[==] constant[]] begin[:]
<ast.AugAssign object at 0x7da1b027aa70>
return[name[ind]]
<ast.Tuple object at 0x7da1b027a920> assign[=] tuple[[<ast.Constant object at 0x7da1b027a860>, <ast.Constant object at 0x7da1b027a830>]]
<ast.Tuple object at 0x7da1b027a7d0> assign[=] tuple[[<ast.Constant object at 0x7da1b027a6e0>, <ast.Constant object at 0x7da1b027a6b0>, <ast.Constant object at 0x7da1b027a680>]]
variable[DecCorr] assign[=] constant[0.0]
variable[months] assign[=] list[[<ast.Constant object at 0x7da1b027a530>, <ast.Constant object at 0x7da1b027a500>, <ast.Constant object at 0x7da1b027a4d0>, <ast.Constant object at 0x7da1b027a4a0>, <ast.Constant object at 0x7da1b027a470>, <ast.Constant object at 0x7da1b027a440>, <ast.Constant object at 0x7da1b027a410>, <ast.Constant object at 0x7da1b027a3e0>, <ast.Constant object at 0x7da1b027a3b0>, <ast.Constant object at 0x7da1b027a380>, <ast.Constant object at 0x7da1b027a350>, <ast.Constant object at 0x7da1b027a320>]]
variable[specnum] assign[=] call[name[int], parameter[name[specnum]]]
variable[specnum] assign[=] <ast.UnaryOp object at 0x7da1b027a1a0>
<ast.Tuple object at 0x7da1b027a110> assign[=] call[name[pmag].fix_directories, parameter[name[input_dir], name[dir_path]]]
if name[samp_con] begin[:]
variable[Z] assign[=] constant[1]
if compare[constant[4] in name[samp_con]] begin[:]
if compare[constant[-] <ast.NotIn object at 0x7da2590d7190> name[samp_con]] begin[:]
call[name[print], parameter[constant[option [4] must be in form 4-Z where Z is an integer]]]
return[tuple[[<ast.Constant object at 0x7da1b0279bd0>, <ast.Constant object at 0x7da1b0279ba0>]]]
if compare[constant[7] in name[samp_con]] begin[:]
if compare[constant[-] <ast.NotIn object at 0x7da2590d7190> name[samp_con]] begin[:]
call[name[print], parameter[constant[option [7] must be in form 7-Z where Z is an integer]]]
return[tuple[[<ast.Constant object at 0x7da1b0279660>, <ast.Constant object at 0x7da1b0279630>]]]
if compare[constant[6] in name[samp_con]] begin[:]
call[name[print], parameter[constant[Naming convention option [6] not currently supported]]]
return[tuple[[<ast.Constant object at 0x7da1b02791b0>, <ast.Constant object at 0x7da1b0279180>]]]
if <ast.UnaryOp object at 0x7da1b02790f0> begin[:]
call[name[print], parameter[constant[mag file is required input]]]
return[tuple[[<ast.Constant object at 0x7da1b0278f40>, <ast.Constant object at 0x7da1b0278f10>]]]
variable[output_dir_path] assign[=] name[dir_path]
variable[mag_file] assign[=] call[name[pmag].resolve_file_name, parameter[name[mag_file], name[input_dir_path]]]
variable[samplist] assign[=] list[[]]
<ast.Try object at 0x7da1b0278c40>
<ast.Tuple object at 0x7da1b02789a0> assign[=] tuple[[<ast.List object at 0x7da1b0278880>, <ast.List object at 0x7da1b0278850>, <ast.List object at 0x7da1b0278820>, <ast.List object at 0x7da1b02787f0>]]
<ast.Try object at 0x7da1b02787c0>
<ast.Tuple object at 0x7da1b029bfd0> assign[=] tuple[[<ast.Constant object at 0x7da1b029bf10>, <ast.Constant object at 0x7da1b029bee0>]]
variable[d] assign[=] call[name[input].split, parameter[constant[\xcd]]]
for taget[name[line]] in starred[name[d]] begin[:]
variable[rec] assign[=] call[name[line].split, parameter[constant[\x00]]]
variable[rec_not_null] assign[=] <ast.ListComp object at 0x7da1b029bb80>
if compare[call[name[len], parameter[name[rec_not_null]]] less[<] constant[5]] begin[:]
continue
if compare[name[firstline] equal[==] constant[1]] begin[:]
variable[firstline] assign[=] constant[0]
<ast.Tuple object at 0x7da1b029b730> assign[=] tuple[[<ast.Constant object at 0x7da1b029e740>, <ast.Constant object at 0x7da1b029e710>]]
variable[el] assign[=] constant[51]
variable[spec] assign[=] call[name[rec]][constant[7]]
variable[test] assign[=] call[name[spec].split, parameter[constant[.]]]
variable[date] assign[=] constant[]
if compare[call[name[len], parameter[name[test]]] greater[>] constant[1]] begin[:]
variable[spec] assign[=] call[name[test]][constant[0]]
variable[kk] assign[=] constant[24]
while <ast.BoolOp object at 0x7da1b029e0e0> begin[:]
<ast.AugAssign object at 0x7da1b029ded0>
variable[vcc] assign[=] call[name[line]][<ast.Slice object at 0x7da1b029dd80>]
variable[el] assign[=] constant[10]
while compare[call[call[name[rec]][name[el]].strip, parameter[]] not_equal[!=] constant[]] begin[:]
<ast.AugAssign object at 0x7da1b029dae0>
<ast.Tuple object at 0x7da1b029da20> assign[=] tuple[[<ast.Subscript object at 0x7da1b029d960>, <ast.List object at 0x7da1b029d870>]]
variable[specname] assign[=] call[name[spec].lower, parameter[]]
call[name[print], parameter[constant[importing ], name[specname]]]
<ast.AugAssign object at 0x7da1b029d0c0>
while compare[call[call[name[rec]][name[el]].isdigit, parameter[]] equal[==] constant[False]] begin[:]
call[name[comments].append, parameter[call[name[rec]][name[el]]]]
<ast.AugAssign object at 0x7da1b029cd30>
while compare[call[name[rec]][name[el]] equal[==] constant[]] begin[:]
<ast.AugAssign object at 0x7da1b029cb80>
variable[az] assign[=] call[name[float], parameter[call[name[rec]][name[el]]]]
<ast.AugAssign object at 0x7da1b029c9a0>
while compare[call[name[rec]][name[el]] equal[==] constant[]] begin[:]
<ast.AugAssign object at 0x7da1b029c7f0>
variable[pl] assign[=] call[name[float], parameter[call[name[rec]][name[el]]]]
<ast.AugAssign object at 0x7da1b029c610>
while compare[call[name[rec]][name[el]] equal[==] constant[]] begin[:]
<ast.AugAssign object at 0x7da1b029c460>
variable[bed_dip_dir] assign[=] call[name[float], parameter[call[name[rec]][name[el]]]]
<ast.AugAssign object at 0x7da1b029c280>
while compare[call[name[rec]][name[el]] equal[==] constant[]] begin[:]
<ast.AugAssign object at 0x7da1b029c0d0>
variable[bed_dip] assign[=] call[name[float], parameter[call[name[rec]][name[el]]]]
<ast.AugAssign object at 0x7da1b020e470>
while compare[call[name[rec]][name[el]] equal[==] constant[]] begin[:]
<ast.AugAssign object at 0x7da1b020e290>
if compare[call[name[rec]][name[el]] equal[==] constant[\x01]] begin[:]
variable[bed_dip] assign[=] binary_operation[constant[180.0] - name[bed_dip]]
<ast.AugAssign object at 0x7da1b020e680>
while compare[call[name[rec]][name[el]] equal[==] constant[]] begin[:]
<ast.AugAssign object at 0x7da1b020e830>
variable[fold_az] assign[=] call[name[float], parameter[call[name[rec]][name[el]]]]
<ast.AugAssign object at 0x7da1b020ea10>
while compare[call[name[rec]][name[el]] equal[==] constant[]] begin[:]
<ast.AugAssign object at 0x7da1b020ebc0>
variable[fold_pl] assign[=] call[name[rec]][name[el]]
<ast.AugAssign object at 0x7da1b020ed40>
while compare[call[name[rec]][name[el]] equal[==] constant[]] begin[:]
<ast.AugAssign object at 0x7da1b020eef0>
if <ast.BoolOp object at 0x7da1b020efb0> begin[:]
variable[deccorr] assign[=] call[name[float], parameter[call[name[rec]][name[el]]]]
<ast.AugAssign object at 0x7da1b020f400>
<ast.AugAssign object at 0x7da1b020f490>
<ast.AugAssign object at 0x7da1b020f520>
if compare[name[bed_dip_dir] greater_or_equal[>=] constant[360]] begin[:]
variable[bed_dip_dir] assign[=] binary_operation[name[bed_dip_dir] - constant[360.0]]
if compare[name[az] greater_or_equal[>=] constant[360.0]] begin[:]
variable[az] assign[=] binary_operation[name[az] - constant[360.0]]
if compare[name[fold_az] greater_or_equal[>=] constant[360.0]] begin[:]
variable[fold_az] assign[=] binary_operation[name[fold_az] - constant[360.0]]
if compare[name[specnum] not_equal[!=] constant[0]] begin[:]
variable[sample] assign[=] call[name[specname]][<ast.Slice object at 0x7da1b023fa60>]
variable[methods] assign[=] call[name[gmeths].split, parameter[constant[:]]]
if compare[name[deccorr] not_equal[!=] constant[0]] begin[:]
if compare[constant[SO-MAG] in name[methods]] begin[:]
<ast.Delete object at 0x7da1b023f6d0>
call[name[methods].append, parameter[constant[SO-CMD-NORTH]]]
variable[meths] assign[=] call[name[reduce], parameter[<ast.Lambda object at 0x7da1b023f370>, name[methods]]]
variable[method_codes] assign[=] name[meths]
variable[site] assign[=] call[name[pmag].parse_site, parameter[name[sample], name[samp_con], name[Z]]]
<ast.Tuple object at 0x7da1b023ef50> assign[=] tuple[[<ast.Dict object at 0x7da1b023ee30>, <ast.Dict object at 0x7da1b023ee00>, <ast.Dict object at 0x7da1b023edd0>, <ast.Dict object at 0x7da1b023eda0>]]
call[name[SpecRec]][constant[specimen]] assign[=] name[specname]
call[name[SpecRec]][constant[sample]] assign[=] name[sample]
if compare[call[name[vcc].strip, parameter[]] not_equal[!=] constant[]] begin[:]
variable[vol] assign[=] binary_operation[call[name[float], parameter[name[vcc]]] * constant[1e-06]]
call[name[SpecRec]][constant[volume]] assign[=] binary_operation[constant[%10.3e] <ast.Mod object at 0x7da2590d6920> name[vol]]
call[name[SpecRec]][constant[geologic_classes]] assign[=] name[sclass]
call[name[SpecRec]][constant[lithologies]] assign[=] name[lithology]
call[name[SpecRec]][constant[geologic_types]] assign[=] name[_type]
call[name[SpecRecs].append, parameter[name[SpecRec]]]
if <ast.BoolOp object at 0x7da1b023e3b0> begin[:]
call[name[SampRec]][constant[sample]] assign[=] name[sample]
call[name[SampRec]][constant[site]] assign[=] name[site]
<ast.Tuple object at 0x7da1b023dd50> assign[=] call[name[pmag].orient, parameter[name[az], name[pl], name[or_con]]]
call[name[SampRec]][constant[bed_dip]] assign[=] binary_operation[constant[%7.1f] <ast.Mod object at 0x7da2590d6920> name[bed_dip]]
call[name[SampRec]][constant[bed_dip_direction]] assign[=] binary_operation[constant[%7.1f] <ast.Mod object at 0x7da2590d6920> name[bed_dip_dir]]
call[name[SampRec]][constant[dip]] assign[=] binary_operation[constant[%7.1f] <ast.Mod object at 0x7da2590d6920> name[labdip]]
call[name[SampRec]][constant[azimuth]] assign[=] binary_operation[constant[%7.1f] <ast.Mod object at 0x7da2590d6920> name[labaz]]
call[name[SampRec]][constant[azimuth_dec_correction]] assign[=] binary_operation[constant[%7.1f] <ast.Mod object at 0x7da2590d6920> name[deccorr]]
call[name[SampRec]][constant[geologic_classes]] assign[=] name[sclass]
call[name[SampRec]][constant[lithologies]] assign[=] name[lithology]
call[name[SampRec]][constant[geologic_types]] assign[=] name[_type]
call[name[SampRec]][constant[method_codes]] assign[=] name[method_codes]
call[name[SampRecs].append, parameter[name[SampRec]]]
if <ast.BoolOp object at 0x7da1b04560e0> begin[:]
call[name[SiteRec]][constant[site]] assign[=] name[site]
call[name[SiteRec]][constant[location]] assign[=] name[location]
call[name[SiteRec]][constant[lat]] assign[=] name[lat]
call[name[SiteRec]][constant[lon]] assign[=] name[lon]
call[name[SiteRec]][constant[geologic_classes]] assign[=] name[sclass]
call[name[SiteRec]][constant[lithologies]] assign[=] name[lithology]
call[name[SiteRec]][constant[geologic_types]] assign[=] name[_type]
call[name[SiteRecs].append, parameter[name[SiteRec]]]
if <ast.BoolOp object at 0x7da1b023d000> begin[:]
call[name[LocRec]][constant[location]] assign[=] name[location]
call[name[LocRec]][constant[lat_n]] assign[=] name[lat]
call[name[LocRec]][constant[lon_e]] assign[=] name[lon]
call[name[LocRec]][constant[lat_s]] assign[=] name[lat]
call[name[LocRec]][constant[lon_w]] assign[=] name[lon]
call[name[LocRecs].append, parameter[name[LocRec]]]
variable[con] assign[=] call[name[cb].Contribution, parameter[name[output_dir_path]]]
call[name[con].add_magic_table_from_data, parameter[]]
call[name[con].add_magic_table_from_data, parameter[]]
call[name[con].add_magic_table_from_data, parameter[]]
call[name[con].add_magic_table_from_data, parameter[]]
variable[MeasOuts] assign[=] call[name[pmag].measurements_methods3, parameter[name[MeasRecs], name[noave]]]
call[name[con].add_magic_table_from_data, parameter[]]
call[name[con].write_table_to_file, parameter[constant[specimens]]]
call[name[con].write_table_to_file, parameter[constant[samples]]]
call[name[con].write_table_to_file, parameter[constant[sites]]]
call[name[con].write_table_to_file, parameter[constant[locations]]]
call[name[con].write_table_to_file, parameter[constant[measurements]]]
return[tuple[[<ast.Constant object at 0x7da1b04c83d0>, <ast.Name object at 0x7da1b04c84c0>]]] | keyword[def] identifier[_2g_bin] ( identifier[dir_path] = literal[string] , identifier[mag_file] = literal[string] , identifier[meas_file] = literal[string] ,
identifier[spec_file] = literal[string] , identifier[samp_file] = literal[string] , identifier[site_file] = literal[string] ,
identifier[loc_file] = literal[string] , identifier[or_con] = literal[string] , identifier[specnum] = literal[int] , identifier[samp_con] = literal[string] , identifier[corr] = literal[string] ,
identifier[gmeths] = literal[string] , identifier[location] = literal[string] , identifier[inst] = literal[string] , identifier[user] = literal[string] , identifier[noave] = keyword[False] , identifier[input_dir] = literal[string] ,
identifier[lat] = literal[string] , identifier[lon] = literal[string] ):
literal[string]
keyword[def] identifier[skip] ( identifier[N] , identifier[ind] , identifier[L] ):
keyword[for] identifier[b] keyword[in] identifier[range] ( identifier[N] ):
identifier[ind] += literal[int]
keyword[while] identifier[L] [ identifier[ind] ]== literal[string] :
identifier[ind] += literal[int]
identifier[ind] += literal[int]
keyword[while] identifier[L] [ identifier[ind] ]== literal[string] :
identifier[ind] += literal[int]
keyword[return] identifier[ind]
identifier[bed_dip] , identifier[bed_dip_dir] = literal[string] , literal[string]
identifier[sclass] , identifier[lithology] , identifier[_type] = literal[string] , literal[string] , literal[string]
identifier[DecCorr] = literal[int]
identifier[months] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[specnum] = identifier[int] ( identifier[specnum] )
identifier[specnum] =- identifier[specnum]
identifier[input_dir_path] , identifier[dir_path] = identifier[pmag] . identifier[fix_directories] ( identifier[input_dir] , identifier[dir_path] )
keyword[if] identifier[samp_con] :
identifier[Z] = literal[int]
keyword[if] literal[string] keyword[in] identifier[samp_con] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[samp_con] :
identifier[print] ( literal[string] )
keyword[return] keyword[False] , literal[string]
keyword[else] :
identifier[Z] = identifier[samp_con] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[samp_con] = literal[string]
keyword[if] literal[string] keyword[in] identifier[samp_con] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[samp_con] :
identifier[print] ( literal[string] )
keyword[return] keyword[False] , literal[string]
keyword[else] :
identifier[Z] = identifier[samp_con] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[samp_con] = literal[string]
keyword[if] literal[string] keyword[in] identifier[samp_con] :
identifier[print] ( literal[string] )
keyword[return] keyword[False] , literal[string]
keyword[if] keyword[not] identifier[mag_file] :
identifier[print] ( literal[string] )
keyword[return] keyword[False] , literal[string]
identifier[output_dir_path] = identifier[dir_path]
identifier[mag_file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[mag_file] , identifier[input_dir_path] )
identifier[samplist] =[]
keyword[try] :
identifier[SampRecs] , identifier[file_type] = identifier[pmag] . identifier[magic_read] ( identifier[samp_file] )
keyword[except] :
identifier[SampRecs] =[]
identifier[MeasRecs] , identifier[SpecRecs] , identifier[SiteRecs] , identifier[LocRecs] =[],[],[],[]
keyword[try] :
identifier[f] = identifier[open] ( identifier[mag_file] , literal[string] )
identifier[input] = identifier[str] ( identifier[f] . identifier[read] ()). identifier[strip] ( literal[string] )
identifier[f] . identifier[close] ()
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[print] ( literal[string] , identifier[ex] )
identifier[print] ( literal[string] )
keyword[return] keyword[False] , literal[string]
identifier[firstline] , identifier[date] = literal[int] , literal[string]
identifier[d] = identifier[input] . identifier[split] ( literal[string] )
keyword[for] identifier[line] keyword[in] identifier[d] :
identifier[rec] = identifier[line] . identifier[split] ( literal[string] )
identifier[rec_not_null] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[rec] keyword[if] identifier[i] ]
keyword[if] identifier[len] ( identifier[rec_not_null] )< literal[int] :
keyword[continue]
keyword[if] identifier[firstline] == literal[int] :
identifier[firstline] = literal[int]
identifier[spec] , identifier[vol] = literal[string] , literal[int]
identifier[el] = literal[int]
identifier[spec] = identifier[rec] [ literal[int] ]
identifier[test] = identifier[spec] . identifier[split] ( literal[string] )
identifier[date] = literal[string]
keyword[if] identifier[len] ( identifier[test] )> literal[int] :
identifier[spec] = identifier[test] [ literal[int] ]
identifier[kk] = literal[int]
keyword[while] identifier[line] [ identifier[kk] ]!= literal[string] keyword[and] identifier[line] [ identifier[kk] ]!= literal[string] :
identifier[kk] += literal[int]
identifier[vcc] = identifier[line] [ literal[int] : identifier[kk] ]
identifier[el] = literal[int]
keyword[while] identifier[rec] [ identifier[el] ]. identifier[strip] ()!= literal[string] :
identifier[el] += literal[int]
identifier[date] , identifier[comments] = identifier[rec] [ identifier[el] + literal[int] ],[]
keyword[else] :
identifier[el] = literal[int]
keyword[while] identifier[rec] [ identifier[el] ]!= literal[string] :
identifier[el] += literal[int]
identifier[vcc] , identifier[date] , identifier[comments] = identifier[rec] [ identifier[el] - literal[int] ], identifier[rec] [ identifier[el] + literal[int] ],[]
identifier[specname] = identifier[spec] . identifier[lower] ()
identifier[print] ( literal[string] , identifier[specname] )
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]. identifier[isdigit] ()== keyword[False] :
identifier[comments] . identifier[append] ( identifier[rec] [ identifier[el] ])
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[el] += literal[int]
identifier[az] = identifier[float] ( identifier[rec] [ identifier[el] ])
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[el] += literal[int]
identifier[pl] = identifier[float] ( identifier[rec] [ identifier[el] ])
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[el] += literal[int]
identifier[bed_dip_dir] = identifier[float] ( identifier[rec] [ identifier[el] ])
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[el] += literal[int]
identifier[bed_dip] = identifier[float] ( identifier[rec] [ identifier[el] ])
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[el] += literal[int]
keyword[if] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[bed_dip] = literal[int] - identifier[bed_dip]
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[el] += literal[int]
identifier[fold_az] = identifier[float] ( identifier[rec] [ identifier[el] ])
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[el] += literal[int]
identifier[fold_pl] = identifier[rec] [ identifier[el] ]
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[el] += literal[int]
keyword[if] identifier[rec] [ identifier[el] ]!= literal[string] keyword[and] identifier[rec] [ identifier[el] ]!= literal[string] keyword[and] identifier[rec] [ identifier[el] ]!= literal[string] :
identifier[deccorr] = identifier[float] ( identifier[rec] [ identifier[el] ])
identifier[az] += identifier[deccorr]
identifier[bed_dip_dir] += identifier[deccorr]
identifier[fold_az] += identifier[deccorr]
keyword[if] identifier[bed_dip_dir] >= literal[int] :
identifier[bed_dip_dir] = identifier[bed_dip_dir] - literal[int]
keyword[if] identifier[az] >= literal[int] :
identifier[az] = identifier[az] - literal[int]
keyword[if] identifier[fold_az] >= literal[int] :
identifier[fold_az] = identifier[fold_az] - literal[int]
keyword[else] :
identifier[deccorr] = literal[int]
keyword[if] identifier[specnum] != literal[int] :
identifier[sample] = identifier[specname] [: identifier[specnum] ]
keyword[else] :
identifier[sample] = identifier[specname]
identifier[methods] = identifier[gmeths] . identifier[split] ( literal[string] )
keyword[if] identifier[deccorr] != literal[string] :
keyword[if] literal[string] keyword[in] identifier[methods] :
keyword[del] identifier[methods] [ identifier[methods] . identifier[index] ( literal[string] )]
identifier[methods] . identifier[append] ( literal[string] )
identifier[meths] = identifier[reduce] ( keyword[lambda] identifier[x] , identifier[y] : identifier[x] + literal[string] + identifier[y] , identifier[methods] )
identifier[method_codes] = identifier[meths]
identifier[site] = identifier[pmag] . identifier[parse_site] ( identifier[sample] , identifier[samp_con] , identifier[Z] )
identifier[SpecRec] , identifier[SampRec] , identifier[SiteRec] , identifier[LocRec] ={},{},{},{}
identifier[SpecRec] [ literal[string] ]= identifier[specname]
identifier[SpecRec] [ literal[string] ]= identifier[sample]
keyword[if] identifier[vcc] . identifier[strip] ()!= literal[string] :
identifier[vol] = identifier[float] ( identifier[vcc] )* literal[int]
identifier[SpecRec] [ literal[string] ]= literal[string] %( identifier[vol] )
identifier[SpecRec] [ literal[string] ]= identifier[sclass]
identifier[SpecRec] [ literal[string] ]= identifier[lithology]
identifier[SpecRec] [ literal[string] ]= identifier[_type]
identifier[SpecRecs] . identifier[append] ( identifier[SpecRec] )
keyword[if] identifier[sample] != literal[string] keyword[and] identifier[sample] keyword[not] keyword[in] [ identifier[x] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[list] ( identifier[x] . identifier[keys] ()) keyword[else] literal[string] keyword[for] identifier[x] keyword[in] identifier[SampRecs] ]:
identifier[SampRec] [ literal[string] ]= identifier[sample]
identifier[SampRec] [ literal[string] ]= identifier[site]
identifier[labaz] , identifier[labdip] = identifier[pmag] . identifier[orient] ( identifier[az] , identifier[pl] , identifier[or_con] )
identifier[SampRec] [ literal[string] ]= literal[string] %( identifier[bed_dip] )
identifier[SampRec] [ literal[string] ]= literal[string] %( identifier[bed_dip_dir] )
identifier[SampRec] [ literal[string] ]= literal[string] %( identifier[labdip] )
identifier[SampRec] [ literal[string] ]= literal[string] %( identifier[labaz] )
identifier[SampRec] [ literal[string] ]= literal[string] %( identifier[deccorr] )
identifier[SampRec] [ literal[string] ]= identifier[sclass]
identifier[SampRec] [ literal[string] ]= identifier[lithology]
identifier[SampRec] [ literal[string] ]= identifier[_type]
identifier[SampRec] [ literal[string] ]= identifier[method_codes]
identifier[SampRecs] . identifier[append] ( identifier[SampRec] )
keyword[if] identifier[site] != literal[string] keyword[and] identifier[site] keyword[not] keyword[in] [ identifier[x] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[list] ( identifier[x] . identifier[keys] ()) keyword[else] literal[string] keyword[for] identifier[x] keyword[in] identifier[SiteRecs] ]:
identifier[SiteRec] [ literal[string] ]= identifier[site]
identifier[SiteRec] [ literal[string] ]= identifier[location]
identifier[SiteRec] [ literal[string] ]= identifier[lat]
identifier[SiteRec] [ literal[string] ]= identifier[lon]
identifier[SiteRec] [ literal[string] ]= identifier[sclass]
identifier[SiteRec] [ literal[string] ]= identifier[lithology]
identifier[SiteRec] [ literal[string] ]= identifier[_type]
identifier[SiteRecs] . identifier[append] ( identifier[SiteRec] )
keyword[if] identifier[location] != literal[string] keyword[and] identifier[location] keyword[not] keyword[in] [ identifier[x] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[list] ( identifier[x] . identifier[keys] ()) keyword[else] literal[string] keyword[for] identifier[x] keyword[in] identifier[LocRecs] ]:
identifier[LocRec] [ literal[string] ]= identifier[location]
identifier[LocRec] [ literal[string] ]= identifier[lat]
identifier[LocRec] [ literal[string] ]= identifier[lon]
identifier[LocRec] [ literal[string] ]= identifier[lat]
identifier[LocRec] [ literal[string] ]= identifier[lon]
identifier[LocRecs] . identifier[append] ( identifier[LocRec] )
keyword[else] :
identifier[MeasRec] ={}
identifier[MeasRec] [ literal[string] ]= literal[string] %( literal[int] )
identifier[MeasRec] [ literal[string] ]= literal[string] %( literal[int] )
identifier[MeasRec] [ literal[string] ]= literal[string]
identifier[MeasRec] [ literal[string] ]= literal[string]
identifier[MeasRec] [ literal[string] ]= literal[string]
identifier[MeasRec] [ literal[string] ]= literal[string]
identifier[meas_type] = literal[string]
identifier[MeasRec] [ literal[string] ]= literal[string]
identifier[MeasRec] [ literal[string] ]= literal[string]
identifier[MeasRec] [ literal[string] ]= literal[int]
identifier[MeasRec] [ literal[string] ]= identifier[specname]
identifier[el] , identifier[demag] = literal[int] , literal[string]
identifier[treat] = identifier[rec] [ identifier[el] ]
keyword[if] identifier[treat] [- literal[int] ]== literal[string] :
identifier[demag] = literal[string]
keyword[elif] identifier[treat] != literal[string] :
identifier[demag] = literal[string]
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[el] += literal[int]
identifier[MeasRec] [ literal[string] ]= identifier[rec] [ identifier[el] ]
identifier[cdec] = identifier[float] ( identifier[rec] [ identifier[el] ])
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[el] += literal[int]
identifier[MeasRec] [ literal[string] ]= identifier[rec] [ identifier[el] ]
identifier[cinc] = identifier[float] ( identifier[rec] [ identifier[el] ])
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[el] += literal[int]
identifier[gdec] = identifier[rec] [ identifier[el] ]
identifier[el] += literal[int]
keyword[while] identifier[rec] [ identifier[el] ]== literal[string] :
identifier[el] += literal[int]
identifier[ginc] = identifier[rec] [ identifier[el] ]
identifier[el] = identifier[skip] ( literal[int] , identifier[el] , identifier[rec] )
identifier[MeasRec] [ literal[string] ]= literal[string] %(
identifier[float] ( identifier[rec] [ identifier[el] ])* literal[int] )
identifier[MeasRec] [ literal[string] ]= literal[string] %(
identifier[float] ( identifier[rec] [ identifier[el] ])* literal[int] / identifier[vol] )
identifier[el] = identifier[skip] ( literal[int] , identifier[el] , identifier[rec] )
identifier[MeasRec] [ literal[string] ]= literal[string] %(
identifier[float] ( identifier[rec] [ identifier[el] ])* literal[int] )
identifier[el] = identifier[skip] ( literal[int] , identifier[el] , identifier[rec] )
identifier[MeasRec] [ literal[string] ]= literal[string] %(
identifier[float] ( identifier[rec] [ identifier[el] ])* literal[int] )
identifier[el] = identifier[skip] ( literal[int] , identifier[el] , identifier[rec] )
identifier[MeasRec] [ literal[string] ]= literal[string] %(
identifier[float] ( identifier[rec] [ identifier[el] ])* literal[int] )
identifier[el] += literal[int]
identifier[MeasRec] [ literal[string] ]= identifier[rec] [ identifier[el] ]
identifier[MeasRec] [ literal[string] ]= identifier[inst]
identifier[MeasRec] [ literal[string] ]= identifier[user]
identifier[MeasRec] [ literal[string] ]= literal[string]
identifier[MeasRec] [ literal[string] ]= identifier[meas_type]
keyword[if] identifier[demag] == literal[string] :
identifier[MeasRec] [ literal[string] ]= literal[string] %(
identifier[float] ( identifier[treat] [:- literal[int] ])* literal[int] )
identifier[meas_type] = literal[string]
identifier[MeasRec] [ literal[string] ]= literal[string]
keyword[elif] identifier[demag] == literal[string] :
identifier[MeasRec] [ literal[string] ]= literal[string] %(
identifier[float] ( identifier[treat] [:- literal[int] ])+ literal[int] )
identifier[meas_type] = literal[string]
identifier[MeasRec] [ literal[string] ]= identifier[meas_type]
identifier[MeasRecs] . identifier[append] ( identifier[MeasRec] )
identifier[con] = identifier[cb] . identifier[Contribution] ( identifier[output_dir_path] , identifier[read_tables] =[])
identifier[con] . identifier[add_magic_table_from_data] ( identifier[dtype] = literal[string] , identifier[data] = identifier[SpecRecs] )
identifier[con] . identifier[add_magic_table_from_data] ( identifier[dtype] = literal[string] , identifier[data] = identifier[SampRecs] )
identifier[con] . identifier[add_magic_table_from_data] ( identifier[dtype] = literal[string] , identifier[data] = identifier[SiteRecs] )
identifier[con] . identifier[add_magic_table_from_data] ( identifier[dtype] = literal[string] , identifier[data] = identifier[LocRecs] )
identifier[MeasOuts] = identifier[pmag] . identifier[measurements_methods3] ( identifier[MeasRecs] , identifier[noave] )
identifier[con] . identifier[add_magic_table_from_data] ( identifier[dtype] = literal[string] , identifier[data] = identifier[MeasOuts] )
identifier[con] . identifier[write_table_to_file] ( literal[string] , identifier[custom_name] = identifier[spec_file] )
identifier[con] . identifier[write_table_to_file] ( literal[string] , identifier[custom_name] = identifier[samp_file] )
identifier[con] . identifier[write_table_to_file] ( literal[string] , identifier[custom_name] = identifier[site_file] )
identifier[con] . identifier[write_table_to_file] ( literal[string] , identifier[custom_name] = identifier[loc_file] )
identifier[con] . identifier[write_table_to_file] ( literal[string] , identifier[custom_name] = identifier[meas_file] )
keyword[return] keyword[True] , identifier[meas_file] | def _2g_bin(dir_path='.', mag_file='', meas_file='measurements.txt', spec_file='specimens.txt', samp_file='samples.txt', site_file='sites.txt', loc_file='locations.txt', or_con='3', specnum=0, samp_con='2', corr='1', gmeths='FS-FD:SO-POM', location='unknown', inst='', user='', noave=False, input_dir='', lat='', lon=''):
"""
Convert 2G binary format file to MagIC file(s)
Parameters
----------
dir_path : str
output directory, default "."
mag_file : str
input file name
meas_file : str
output measurement file name, default "measurements.txt"
spec_file : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
site_file : str
output site file name, default "sites.txt"
loc_file : str
output location file name, default "locations.txt"
or_con : number
orientation convention, default '3', see info below
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '2', see info below
corr: str
default '1'
gmeths : str
sampling method codes, default "FS-FD:SO-POM", see info below
location : str
location name, default "unknown"
inst : str
instrument, default ""
user : str
user name, default ""
noave : bool
do not average duplicate measurements, default False (so by default, DO average)
input_dir : str
input file directory IF different from dir_path, default ""
lat : float
latitude, default ""
lon : float
longitude, default ""
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
Info
----------
Orientation convention:
[1] Lab arrow azimuth= mag_azimuth; Lab arrow dip=-field_dip
i.e., field_dip is degrees from vertical down - the hade [default]
[2] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = -field_dip
i.e., mag_azimuth is strike and field_dip is hade
[3] Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
i.e., lab arrow same as field arrow, but field_dip was a hade.
[4] lab azimuth and dip are same as mag_azimuth, field_dip
[5] lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] all others you will have to either customize your
self or e-mail ltauxe@ucsd.edu for help.
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
Sampling method codes:
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
SO-SUN orientation with sun compass
"""
def skip(N, ind, L):
for b in range(N):
ind += 1
while L[ind] == '':
ind += 1 # depends on [control=['while'], data=[]] # depends on [control=['for'], data=[]]
ind += 1
while L[ind] == '':
ind += 1 # depends on [control=['while'], data=[]]
return ind
#
# initialize variables
#
(bed_dip, bed_dip_dir) = ('', '')
(sclass, lithology, _type) = ('', '', '')
DecCorr = 0.0
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# format and fix variables
specnum = int(specnum)
specnum = -specnum
(input_dir_path, dir_path) = pmag.fix_directories(input_dir, dir_path)
if samp_con:
Z = 1
if '4' in samp_con:
if '-' not in samp_con:
print('option [4] must be in form 4-Z where Z is an integer')
return (False, 'option [4] must be in form 4-Z where Z is an integer') # depends on [control=['if'], data=[]]
else:
Z = samp_con.split('-')[1]
samp_con = '4' # depends on [control=['if'], data=['samp_con']]
if '7' in samp_con:
if '-' not in samp_con:
print('option [7] must be in form 7-Z where Z is an integer')
return (False, 'option [7] must be in form 7-Z where Z is an integer') # depends on [control=['if'], data=[]]
else:
Z = samp_con.split('-')[1]
samp_con = '7' # depends on [control=['if'], data=['samp_con']]
if '6' in samp_con:
print('Naming convention option [6] not currently supported')
return (False, 'Naming convention option [6] not currently supported') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Z=1
# try:
# SampRecs,file_type=pmag.magic_read(os.path.join(input_dir_path, 'er_samples.txt'))
# except:
# print("there is no er_samples.txt file in your input directory - you can't use naming convention #6")
# return False, "there is no er_samples.txt file in your input directory - you can't use naming convention #6"
# if file_type == 'bad_file':
# print("there is no er_samples.txt file in your input directory - you can't use naming convention #6")
# return False, "there is no er_samples.txt file in your input directory - you can't use naming convention #6"
# else: Z=1
if not mag_file:
print('mag file is required input')
return (False, 'mag file is required input') # depends on [control=['if'], data=[]]
output_dir_path = dir_path
mag_file = pmag.resolve_file_name(mag_file, input_dir_path)
samplist = []
try:
(SampRecs, file_type) = pmag.magic_read(samp_file) # depends on [control=['try'], data=[]]
except:
SampRecs = [] # depends on [control=['except'], data=[]]
(MeasRecs, SpecRecs, SiteRecs, LocRecs) = ([], [], [], [])
try:
f = open(mag_file, 'br')
input = str(f.read()).strip("b '")
f.close() # depends on [control=['try'], data=[]]
except Exception as ex:
print('ex', ex)
print('bad mag file')
return (False, 'bad mag file') # depends on [control=['except'], data=['ex']]
(firstline, date) = (1, '')
d = input.split('\\xcd')
for line in d:
rec = line.split('\\x00')
# skip nearly empty lines
rec_not_null = [i for i in rec if i]
if len(rec_not_null) < 5:
continue # depends on [control=['if'], data=[]]
if firstline == 1:
firstline = 0
(spec, vol) = ('', 1)
el = 51
#while line[el:el+1] != "\\":
# spec = spec+line[el]
# el += 1
spec = rec[7]
# check for bad sample name
test = spec.split('.')
date = ''
if len(test) > 1:
spec = test[0]
kk = 24
while line[kk] != '\\x01' and line[kk] != '\\x00':
kk += 1 # depends on [control=['while'], data=[]]
vcc = line[24:kk]
el = 10
while rec[el].strip() != '':
el += 1 # depends on [control=['while'], data=[]]
(date, comments) = (rec[el + 7], []) # depends on [control=['if'], data=[]]
else:
el = 9
while rec[el] != '\\x01':
el += 1 # depends on [control=['while'], data=[]]
(vcc, date, comments) = (rec[el - 3], rec[el + 7], [])
specname = spec.lower()
print('importing ', specname)
el += 8
while rec[el].isdigit() == False:
comments.append(rec[el])
el += 1 # depends on [control=['while'], data=[]]
while rec[el] == '':
el += 1 # depends on [control=['while'], data=[]]
az = float(rec[el])
el += 1
while rec[el] == '':
el += 1 # depends on [control=['while'], data=[]]
pl = float(rec[el])
el += 1
while rec[el] == '':
el += 1 # depends on [control=['while'], data=[]]
bed_dip_dir = float(rec[el])
el += 1
while rec[el] == '':
el += 1 # depends on [control=['while'], data=[]]
bed_dip = float(rec[el])
el += 1
while rec[el] == '':
el += 1 # depends on [control=['while'], data=[]]
if rec[el] == '\\x01':
bed_dip = 180.0 - bed_dip
el += 1
while rec[el] == '':
el += 1 # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
fold_az = float(rec[el])
el += 1
while rec[el] == '':
el += 1 # depends on [control=['while'], data=[]]
fold_pl = rec[el]
el += 1
while rec[el] == '':
el += 1 # depends on [control=['while'], data=[]]
if rec[el] != '' and rec[el] != '\\x02' and (rec[el] != '\\x01'):
deccorr = float(rec[el])
az += deccorr
bed_dip_dir += deccorr
fold_az += deccorr
if bed_dip_dir >= 360:
bed_dip_dir = bed_dip_dir - 360.0 # depends on [control=['if'], data=['bed_dip_dir']]
if az >= 360.0:
az = az - 360.0 # depends on [control=['if'], data=['az']]
if fold_az >= 360.0:
fold_az = fold_az - 360.0 # depends on [control=['if'], data=['fold_az']] # depends on [control=['if'], data=[]]
else:
deccorr = 0
if specnum != 0:
sample = specname[:specnum] # depends on [control=['if'], data=['specnum']]
else:
sample = specname
methods = gmeths.split(':')
if deccorr != '0':
if 'SO-MAG' in methods:
del methods[methods.index('SO-MAG')] # depends on [control=['if'], data=['methods']]
methods.append('SO-CMD-NORTH') # depends on [control=['if'], data=[]]
meths = reduce(lambda x, y: x + ':' + y, methods)
method_codes = meths
# parse out the site name
site = pmag.parse_site(sample, samp_con, Z)
(SpecRec, SampRec, SiteRec, LocRec) = ({}, {}, {}, {})
SpecRec['specimen'] = specname
SpecRec['sample'] = sample
if vcc.strip() != '':
vol = float(vcc) * 1e-06 # convert to m^3 from cc # depends on [control=['if'], data=[]]
SpecRec['volume'] = '%10.3e' % vol
SpecRec['geologic_classes'] = sclass
SpecRec['lithologies'] = lithology
SpecRec['geologic_types'] = _type
SpecRecs.append(SpecRec)
if sample != '' and sample not in [x['sample'] if 'sample' in list(x.keys()) else '' for x in SampRecs]:
SampRec['sample'] = sample
SampRec['site'] = site
# convert to labaz, labpl
(labaz, labdip) = pmag.orient(az, pl, or_con)
SampRec['bed_dip'] = '%7.1f' % bed_dip
SampRec['bed_dip_direction'] = '%7.1f' % bed_dip_dir
SampRec['dip'] = '%7.1f' % labdip
SampRec['azimuth'] = '%7.1f' % labaz
SampRec['azimuth_dec_correction'] = '%7.1f' % deccorr
SampRec['geologic_classes'] = sclass
SampRec['lithologies'] = lithology
SampRec['geologic_types'] = _type
SampRec['method_codes'] = method_codes
SampRecs.append(SampRec) # depends on [control=['if'], data=[]]
if site != '' and site not in [x['site'] if 'site' in list(x.keys()) else '' for x in SiteRecs]:
SiteRec['site'] = site
SiteRec['location'] = location
SiteRec['lat'] = lat
SiteRec['lon'] = lon
SiteRec['geologic_classes'] = sclass
SiteRec['lithologies'] = lithology
SiteRec['geologic_types'] = _type
SiteRecs.append(SiteRec) # depends on [control=['if'], data=[]]
if location != '' and location not in [x['location'] if 'location' in list(x.keys()) else '' for x in LocRecs]:
LocRec['location'] = location
LocRec['lat_n'] = lat
LocRec['lon_e'] = lon
LocRec['lat_s'] = lat
LocRec['lon_w'] = lon
# LocRec["geologic_classes"]=sclass
# LocRec["lithologies"]=lithology
# LocRec["geologic_types"]=_type
LocRecs.append(LocRec) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['firstline']]
else:
MeasRec = {}
MeasRec['treat_temp'] = '%8.3e' % 273 # room temp in kelvin
MeasRec['meas_temp'] = '%8.3e' % 273 # room temp in kelvin
MeasRec['treat_ac_field'] = '0'
MeasRec['treat_dc_field'] = '0'
MeasRec['treat_dc_field_phi'] = '0'
MeasRec['treat_dc_field_theta'] = '0'
meas_type = 'LT-NO'
MeasRec['quality'] = 'g'
MeasRec['standard'] = 'u'
MeasRec['treat_step_num'] = 0
MeasRec['specimen'] = specname
(el, demag) = (1, '')
treat = rec[el]
if treat[-1] == 'C':
demag = 'T' # depends on [control=['if'], data=[]]
elif treat != 'NRM':
demag = 'AF' # depends on [control=['if'], data=[]]
el += 1
while rec[el] == '':
el += 1 # depends on [control=['while'], data=[]]
MeasRec['dir_dec'] = rec[el]
cdec = float(rec[el])
el += 1
while rec[el] == '':
el += 1 # depends on [control=['while'], data=[]]
MeasRec['dir_inc'] = rec[el]
cinc = float(rec[el])
el += 1
while rec[el] == '':
el += 1 # depends on [control=['while'], data=[]]
gdec = rec[el]
el += 1
while rec[el] == '':
el += 1 # depends on [control=['while'], data=[]]
ginc = rec[el]
el = skip(2, el, rec) # skip bdec,binc
# el=skip(4,el,rec) # skip gdec,ginc,bdec,binc
# print 'moment emu: ',rec[el]
MeasRec['magn_moment'] = '%10.3e' % (float(rec[el]) * 0.001) # moment in Am^2 (from emu)
MeasRec['magn_volume'] = '%10.3e' % (float(rec[el]) * 0.001 / vol) # magnetization in A/m
el = skip(2, el, rec) # skip to xsig
MeasRec['magn_x_sigma'] = '%10.3e' % (float(rec[el]) * 0.001) # convert from emu
el = skip(3, el, rec) # skip to ysig
MeasRec['magn_y_sigma'] = '%10.3e' % (float(rec[el]) * 0.001) # convert from emu
el = skip(3, el, rec) # skip to zsig
MeasRec['magn_z_sigma'] = '%10.3e' % (float(rec[el]) * 0.001) # convert from emu
el += 1 # skip to positions
MeasRec['meas_n_orient'] = rec[el]
# el=skip(5,el,rec) # skip to date
# mm=str(months.index(date[0]))
# if len(mm)==1:
# mm='0'+str(mm)
# else:
# mm=str(mm)
# dstring=date[2]+':'+mm+':'+date[1]+":"+date[3]
# MeasRec['measurement_date']=dstring
MeasRec['instrument_codes'] = inst
MeasRec['analysts'] = user
MeasRec['citations'] = 'This study'
MeasRec['method_codes'] = meas_type
if demag == 'AF':
MeasRec['treat_ac_field'] = '%8.3e' % (float(treat[:-2]) * 0.001) # peak field in tesla
meas_type = 'LT-AF-Z'
MeasRec['treat_dc_field'] = '0' # depends on [control=['if'], data=[]]
elif demag == 'T':
MeasRec['treat_temp'] = '%8.3e' % (float(treat[:-1]) + 273.0) # temp in kelvin
meas_type = 'LT-T-Z' # depends on [control=['if'], data=[]]
MeasRec['method_codes'] = meas_type
MeasRecs.append(MeasRec) # depends on [control=['for'], data=['line']]
con = cb.Contribution(output_dir_path, read_tables=[])
con.add_magic_table_from_data(dtype='specimens', data=SpecRecs)
con.add_magic_table_from_data(dtype='samples', data=SampRecs)
con.add_magic_table_from_data(dtype='sites', data=SiteRecs)
con.add_magic_table_from_data(dtype='locations', data=LocRecs)
MeasOuts = pmag.measurements_methods3(MeasRecs, noave)
con.add_magic_table_from_data(dtype='measurements', data=MeasOuts)
con.write_table_to_file('specimens', custom_name=spec_file)
con.write_table_to_file('samples', custom_name=samp_file)
con.write_table_to_file('sites', custom_name=site_file)
con.write_table_to_file('locations', custom_name=loc_file)
con.write_table_to_file('measurements', custom_name=meas_file)
return (True, meas_file) |
def to_b58check(self, testnet=False):
""" Generates a Base58Check encoding of this key.
Args:
testnet (bool): True if the key is to be used with
testnet, False otherwise.
Returns:
str: A Base58Check encoded string representing the key.
"""
b = self.testnet_bytes if testnet else bytes(self)
return base58.b58encode_check(b) | def function[to_b58check, parameter[self, testnet]]:
constant[ Generates a Base58Check encoding of this key.
Args:
testnet (bool): True if the key is to be used with
testnet, False otherwise.
Returns:
str: A Base58Check encoded string representing the key.
]
variable[b] assign[=] <ast.IfExp object at 0x7da1b22e9f60>
return[call[name[base58].b58encode_check, parameter[name[b]]]] | keyword[def] identifier[to_b58check] ( identifier[self] , identifier[testnet] = keyword[False] ):
literal[string]
identifier[b] = identifier[self] . identifier[testnet_bytes] keyword[if] identifier[testnet] keyword[else] identifier[bytes] ( identifier[self] )
keyword[return] identifier[base58] . identifier[b58encode_check] ( identifier[b] ) | def to_b58check(self, testnet=False):
""" Generates a Base58Check encoding of this key.
Args:
testnet (bool): True if the key is to be used with
testnet, False otherwise.
Returns:
str: A Base58Check encoded string representing the key.
"""
b = self.testnet_bytes if testnet else bytes(self)
return base58.b58encode_check(b) |
def flush_to_index(self):
"""Flush changes in our configuration file to the index"""
assert self._smref is not None
# should always have a file here
assert not isinstance(self._file_or_files, BytesIO)
sm = self._smref()
if sm is not None:
index = self._index
if index is None:
index = sm.repo.index
# END handle index
index.add([sm.k_modules_file], write=self._auto_write)
sm._clear_cache() | def function[flush_to_index, parameter[self]]:
constant[Flush changes in our configuration file to the index]
assert[compare[name[self]._smref is_not constant[None]]]
assert[<ast.UnaryOp object at 0x7da204346a40>]
variable[sm] assign[=] call[name[self]._smref, parameter[]]
if compare[name[sm] is_not constant[None]] begin[:]
variable[index] assign[=] name[self]._index
if compare[name[index] is constant[None]] begin[:]
variable[index] assign[=] name[sm].repo.index
call[name[index].add, parameter[list[[<ast.Attribute object at 0x7da2043442e0>]]]]
call[name[sm]._clear_cache, parameter[]] | keyword[def] identifier[flush_to_index] ( identifier[self] ):
literal[string]
keyword[assert] identifier[self] . identifier[_smref] keyword[is] keyword[not] keyword[None]
keyword[assert] keyword[not] identifier[isinstance] ( identifier[self] . identifier[_file_or_files] , identifier[BytesIO] )
identifier[sm] = identifier[self] . identifier[_smref] ()
keyword[if] identifier[sm] keyword[is] keyword[not] keyword[None] :
identifier[index] = identifier[self] . identifier[_index]
keyword[if] identifier[index] keyword[is] keyword[None] :
identifier[index] = identifier[sm] . identifier[repo] . identifier[index]
identifier[index] . identifier[add] ([ identifier[sm] . identifier[k_modules_file] ], identifier[write] = identifier[self] . identifier[_auto_write] )
identifier[sm] . identifier[_clear_cache] () | def flush_to_index(self):
"""Flush changes in our configuration file to the index"""
assert self._smref is not None
# should always have a file here
assert not isinstance(self._file_or_files, BytesIO)
sm = self._smref()
if sm is not None:
index = self._index
if index is None:
index = sm.repo.index # depends on [control=['if'], data=['index']]
# END handle index
index.add([sm.k_modules_file], write=self._auto_write)
sm._clear_cache() # depends on [control=['if'], data=['sm']] |
def read_google(self,url,**kwargs):
"""
Reads a google sheet
"""
if url[-1]!='/':
url+='/'
return self.read_csv(url+'export?gid=0&format=csv',**kwargs) | def function[read_google, parameter[self, url]]:
constant[
Reads a google sheet
]
if compare[call[name[url]][<ast.UnaryOp object at 0x7da1b216d870>] not_equal[!=] constant[/]] begin[:]
<ast.AugAssign object at 0x7da1b216ceb0>
return[call[name[self].read_csv, parameter[binary_operation[name[url] + constant[export?gid=0&format=csv]]]]] | keyword[def] identifier[read_google] ( identifier[self] , identifier[url] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[url] [- literal[int] ]!= literal[string] :
identifier[url] += literal[string]
keyword[return] identifier[self] . identifier[read_csv] ( identifier[url] + literal[string] ,** identifier[kwargs] ) | def read_google(self, url, **kwargs):
"""
Reads a google sheet
"""
if url[-1] != '/':
url += '/' # depends on [control=['if'], data=[]]
return self.read_csv(url + 'export?gid=0&format=csv', **kwargs) |
def acquire_reader(self):
"""
Acquire a read lock, several threads can hold this type of lock.
"""
with self.mutex:
while self.rwlock < 0 or self.rwlock == self.max_reader_concurrency or self.writers_waiting:
self.readers_ok.wait()
self.rwlock += 1 | def function[acquire_reader, parameter[self]]:
constant[
Acquire a read lock, several threads can hold this type of lock.
]
with name[self].mutex begin[:]
while <ast.BoolOp object at 0x7da18ede62c0> begin[:]
call[name[self].readers_ok.wait, parameter[]]
<ast.AugAssign object at 0x7da20c6ab9a0> | keyword[def] identifier[acquire_reader] ( identifier[self] ):
literal[string]
keyword[with] identifier[self] . identifier[mutex] :
keyword[while] identifier[self] . identifier[rwlock] < literal[int] keyword[or] identifier[self] . identifier[rwlock] == identifier[self] . identifier[max_reader_concurrency] keyword[or] identifier[self] . identifier[writers_waiting] :
identifier[self] . identifier[readers_ok] . identifier[wait] ()
identifier[self] . identifier[rwlock] += literal[int] | def acquire_reader(self):
"""
Acquire a read lock, several threads can hold this type of lock.
"""
with self.mutex:
while self.rwlock < 0 or self.rwlock == self.max_reader_concurrency or self.writers_waiting:
self.readers_ok.wait() # depends on [control=['while'], data=[]]
self.rwlock += 1 # depends on [control=['with'], data=[]] |
def create(input, template, field, outdir, prefix, otype, command, index,
dpi, verbose, unicode_support):
"""Use docstamp to create documents from the content of a CSV file or
a Google Spreadsheet.
Examples: \n
docstamp create -i badge.csv -t badge_template.svg -o badges
docstamp create -i badge.csv -t badge_template.svg -o ./badges -d pdf
"""
logging.basicConfig(level=LOGGING_LVL)
log = logging.getLogger(__name__)
# setup verbose mode
verbose_switch(verbose)
input_file = input
fields = field
# init set of template contents
log.debug('Reading CSV elements from {}.'.format(input_file))
items, fieldnames = get_items_from_csv(input_file)
# check if got any item
if len(items) == 0:
click.echo('Quiting because found 0 items.')
exit(-1)
if not fields:
# set the number of zeros that the files will have
n_zeros = int(math.floor(math.log10(len(items))) + 1)
else:
# check that fields has all valid fields
for field_name in fields:
if field_name not in fieldnames:
raise ValueError('Field name {} not found in input file '
' header.'.format(field_name))
# filter the items if index
if index:
myitems = {int(idx): items[int(idx)] for idx in index}
items = myitems
log.debug('Using the elements with index {} of the input '
'file.'.format(index))
# make output folder
if not os.path.exists(outdir):
os.mkdir(outdir)
# create template document model
log.debug('Creating the template object using the file {}.'.format(template))
template_doc = TextDocument.from_template_file(template, command)
log.debug('Created an object of type {}.'.format(type(template_doc)))
# let's stamp them!
for idx in items:
item = items[idx]
if not len(fields):
file_name = str(idx).zfill(n_zeros)
else:
field_values = []
try:
for field_name in fields:
field_values.append(item[field_name].replace(' ', ''))
except:
log.exception('Could not get field {} value from'
' {}'.format(field_name, item))
exit(-1)
else:
file_name = '_'.join(field_values)
log.debug('Filling template {} with values of item {}.'.format(file_name, idx))
try:
template_doc.fill(item)
except:
log.exception('Error filling document for {}th item'.format(idx))
continue
# set output file path
file_extension = get_extension(template)
if prefix is None:
basename = os.path.basename(template).replace(file_extension, '')
file_name = basename + '_' + file_name
file_path = os.path.join(outdir, file_name + '.' + otype)
kwargs = {'file_type': otype,
'dpi': dpi,
'support_unicode': unicode_support}
log.debug('Rendering file {}.'.format(file_path))
try:
template_doc.render(file_path, **kwargs)
except:
log.exception('Error creating {} for {}.'.format(file_path, item))
exit(-1)
else:
log.debug('Successfully rendered {}.'.format(file_path)) | def function[create, parameter[input, template, field, outdir, prefix, otype, command, index, dpi, verbose, unicode_support]]:
constant[Use docstamp to create documents from the content of a CSV file or
a Google Spreadsheet.
Examples:
docstamp create -i badge.csv -t badge_template.svg -o badges
docstamp create -i badge.csv -t badge_template.svg -o ./badges -d pdf
]
call[name[logging].basicConfig, parameter[]]
variable[log] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
call[name[verbose_switch], parameter[name[verbose]]]
variable[input_file] assign[=] name[input]
variable[fields] assign[=] name[field]
call[name[log].debug, parameter[call[constant[Reading CSV elements from {}.].format, parameter[name[input_file]]]]]
<ast.Tuple object at 0x7da1b01a6f20> assign[=] call[name[get_items_from_csv], parameter[name[input_file]]]
if compare[call[name[len], parameter[name[items]]] equal[==] constant[0]] begin[:]
call[name[click].echo, parameter[constant[Quiting because found 0 items.]]]
call[name[exit], parameter[<ast.UnaryOp object at 0x7da1b01a6620>]]
if <ast.UnaryOp object at 0x7da1b01a7f40> begin[:]
variable[n_zeros] assign[=] call[name[int], parameter[binary_operation[call[name[math].floor, parameter[call[name[math].log10, parameter[call[name[len], parameter[name[items]]]]]]] + constant[1]]]]
if name[index] begin[:]
variable[myitems] assign[=] <ast.DictComp object at 0x7da1b01a7190>
variable[items] assign[=] name[myitems]
call[name[log].debug, parameter[call[constant[Using the elements with index {} of the input file.].format, parameter[name[index]]]]]
if <ast.UnaryOp object at 0x7da204962500> begin[:]
call[name[os].mkdir, parameter[name[outdir]]]
call[name[log].debug, parameter[call[constant[Creating the template object using the file {}.].format, parameter[name[template]]]]]
variable[template_doc] assign[=] call[name[TextDocument].from_template_file, parameter[name[template], name[command]]]
call[name[log].debug, parameter[call[constant[Created an object of type {}.].format, parameter[call[name[type], parameter[name[template_doc]]]]]]]
for taget[name[idx]] in starred[name[items]] begin[:]
variable[item] assign[=] call[name[items]][name[idx]]
if <ast.UnaryOp object at 0x7da1b01a4580> begin[:]
variable[file_name] assign[=] call[call[name[str], parameter[name[idx]]].zfill, parameter[name[n_zeros]]]
call[name[log].debug, parameter[call[constant[Filling template {} with values of item {}.].format, parameter[name[file_name], name[idx]]]]]
<ast.Try object at 0x7da1b01a59f0>
variable[file_extension] assign[=] call[name[get_extension], parameter[name[template]]]
if compare[name[prefix] is constant[None]] begin[:]
variable[basename] assign[=] call[call[name[os].path.basename, parameter[name[template]]].replace, parameter[name[file_extension], constant[]]]
variable[file_name] assign[=] binary_operation[binary_operation[name[basename] + constant[_]] + name[file_name]]
variable[file_path] assign[=] call[name[os].path.join, parameter[name[outdir], binary_operation[binary_operation[name[file_name] + constant[.]] + name[otype]]]]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b01a6200>, <ast.Constant object at 0x7da1b01a5f60>, <ast.Constant object at 0x7da1b01a5db0>], [<ast.Name object at 0x7da18f00fcd0>, <ast.Name object at 0x7da18f00d270>, <ast.Name object at 0x7da18f00e620>]]
call[name[log].debug, parameter[call[constant[Rendering file {}.].format, parameter[name[file_path]]]]]
<ast.Try object at 0x7da18f00e110> | keyword[def] identifier[create] ( identifier[input] , identifier[template] , identifier[field] , identifier[outdir] , identifier[prefix] , identifier[otype] , identifier[command] , identifier[index] ,
identifier[dpi] , identifier[verbose] , identifier[unicode_support] ):
literal[string]
identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[LOGGING_LVL] )
identifier[log] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[verbose_switch] ( identifier[verbose] )
identifier[input_file] = identifier[input]
identifier[fields] = identifier[field]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[input_file] ))
identifier[items] , identifier[fieldnames] = identifier[get_items_from_csv] ( identifier[input_file] )
keyword[if] identifier[len] ( identifier[items] )== literal[int] :
identifier[click] . identifier[echo] ( literal[string] )
identifier[exit] (- literal[int] )
keyword[if] keyword[not] identifier[fields] :
identifier[n_zeros] = identifier[int] ( identifier[math] . identifier[floor] ( identifier[math] . identifier[log10] ( identifier[len] ( identifier[items] )))+ literal[int] )
keyword[else] :
keyword[for] identifier[field_name] keyword[in] identifier[fields] :
keyword[if] identifier[field_name] keyword[not] keyword[in] identifier[fieldnames] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[field_name] ))
keyword[if] identifier[index] :
identifier[myitems] ={ identifier[int] ( identifier[idx] ): identifier[items] [ identifier[int] ( identifier[idx] )] keyword[for] identifier[idx] keyword[in] identifier[index] }
identifier[items] = identifier[myitems]
identifier[log] . identifier[debug] ( literal[string]
literal[string] . identifier[format] ( identifier[index] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[outdir] ):
identifier[os] . identifier[mkdir] ( identifier[outdir] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[template] ))
identifier[template_doc] = identifier[TextDocument] . identifier[from_template_file] ( identifier[template] , identifier[command] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[type] ( identifier[template_doc] )))
keyword[for] identifier[idx] keyword[in] identifier[items] :
identifier[item] = identifier[items] [ identifier[idx] ]
keyword[if] keyword[not] identifier[len] ( identifier[fields] ):
identifier[file_name] = identifier[str] ( identifier[idx] ). identifier[zfill] ( identifier[n_zeros] )
keyword[else] :
identifier[field_values] =[]
keyword[try] :
keyword[for] identifier[field_name] keyword[in] identifier[fields] :
identifier[field_values] . identifier[append] ( identifier[item] [ identifier[field_name] ]. identifier[replace] ( literal[string] , literal[string] ))
keyword[except] :
identifier[log] . identifier[exception] ( literal[string]
literal[string] . identifier[format] ( identifier[field_name] , identifier[item] ))
identifier[exit] (- literal[int] )
keyword[else] :
identifier[file_name] = literal[string] . identifier[join] ( identifier[field_values] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[file_name] , identifier[idx] ))
keyword[try] :
identifier[template_doc] . identifier[fill] ( identifier[item] )
keyword[except] :
identifier[log] . identifier[exception] ( literal[string] . identifier[format] ( identifier[idx] ))
keyword[continue]
identifier[file_extension] = identifier[get_extension] ( identifier[template] )
keyword[if] identifier[prefix] keyword[is] keyword[None] :
identifier[basename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[template] ). identifier[replace] ( identifier[file_extension] , literal[string] )
identifier[file_name] = identifier[basename] + literal[string] + identifier[file_name]
identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[outdir] , identifier[file_name] + literal[string] + identifier[otype] )
identifier[kwargs] ={ literal[string] : identifier[otype] ,
literal[string] : identifier[dpi] ,
literal[string] : identifier[unicode_support] }
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[file_path] ))
keyword[try] :
identifier[template_doc] . identifier[render] ( identifier[file_path] ,** identifier[kwargs] )
keyword[except] :
identifier[log] . identifier[exception] ( literal[string] . identifier[format] ( identifier[file_path] , identifier[item] ))
identifier[exit] (- literal[int] )
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[file_path] )) | def create(input, template, field, outdir, prefix, otype, command, index, dpi, verbose, unicode_support):
"""Use docstamp to create documents from the content of a CSV file or
a Google Spreadsheet.
Examples:
docstamp create -i badge.csv -t badge_template.svg -o badges
docstamp create -i badge.csv -t badge_template.svg -o ./badges -d pdf
"""
logging.basicConfig(level=LOGGING_LVL)
log = logging.getLogger(__name__)
# setup verbose mode
verbose_switch(verbose)
input_file = input
fields = field
# init set of template contents
log.debug('Reading CSV elements from {}.'.format(input_file))
(items, fieldnames) = get_items_from_csv(input_file)
# check if got any item
if len(items) == 0:
click.echo('Quiting because found 0 items.')
exit(-1) # depends on [control=['if'], data=[]]
if not fields:
# set the number of zeros that the files will have
n_zeros = int(math.floor(math.log10(len(items))) + 1) # depends on [control=['if'], data=[]]
else:
# check that fields has all valid fields
for field_name in fields:
if field_name not in fieldnames:
raise ValueError('Field name {} not found in input file header.'.format(field_name)) # depends on [control=['if'], data=['field_name']] # depends on [control=['for'], data=['field_name']]
# filter the items if index
if index:
myitems = {int(idx): items[int(idx)] for idx in index}
items = myitems
log.debug('Using the elements with index {} of the input file.'.format(index)) # depends on [control=['if'], data=[]]
# make output folder
if not os.path.exists(outdir):
os.mkdir(outdir) # depends on [control=['if'], data=[]]
# create template document model
log.debug('Creating the template object using the file {}.'.format(template))
template_doc = TextDocument.from_template_file(template, command)
log.debug('Created an object of type {}.'.format(type(template_doc)))
# let's stamp them!
for idx in items:
item = items[idx]
if not len(fields):
file_name = str(idx).zfill(n_zeros) # depends on [control=['if'], data=[]]
else:
field_values = []
try:
for field_name in fields:
field_values.append(item[field_name].replace(' ', '')) # depends on [control=['for'], data=['field_name']] # depends on [control=['try'], data=[]]
except:
log.exception('Could not get field {} value from {}'.format(field_name, item))
exit(-1) # depends on [control=['except'], data=[]]
else:
file_name = '_'.join(field_values)
log.debug('Filling template {} with values of item {}.'.format(file_name, idx))
try:
template_doc.fill(item) # depends on [control=['try'], data=[]]
except:
log.exception('Error filling document for {}th item'.format(idx))
continue # depends on [control=['except'], data=[]]
# set output file path
file_extension = get_extension(template)
if prefix is None:
basename = os.path.basename(template).replace(file_extension, '') # depends on [control=['if'], data=[]]
file_name = basename + '_' + file_name
file_path = os.path.join(outdir, file_name + '.' + otype)
kwargs = {'file_type': otype, 'dpi': dpi, 'support_unicode': unicode_support}
log.debug('Rendering file {}.'.format(file_path))
try:
template_doc.render(file_path, **kwargs) # depends on [control=['try'], data=[]]
except:
log.exception('Error creating {} for {}.'.format(file_path, item))
exit(-1) # depends on [control=['except'], data=[]]
else:
log.debug('Successfully rendered {}.'.format(file_path)) # depends on [control=['for'], data=['idx']] |
def get_hours_data(self, entries, date_headers):
"""Sum billable and non-billable hours across all users."""
project_totals = get_project_totals(
entries, date_headers, total_column=False) if entries else []
data_map = {}
for rows, totals in project_totals:
for user, user_id, periods in rows:
for period in periods:
day = period['day']
if day not in data_map:
data_map[day] = {'billable': 0, 'nonbillable': 0}
data_map[day]['billable'] += period['billable']
data_map[day]['nonbillable'] += period['nonbillable']
return data_map | def function[get_hours_data, parameter[self, entries, date_headers]]:
constant[Sum billable and non-billable hours across all users.]
variable[project_totals] assign[=] <ast.IfExp object at 0x7da1b1039d20>
variable[data_map] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b103b1f0>, <ast.Name object at 0x7da1b103a680>]]] in starred[name[project_totals]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1039e70>, <ast.Name object at 0x7da1b1038760>, <ast.Name object at 0x7da1b103b580>]]] in starred[name[rows]] begin[:]
for taget[name[period]] in starred[name[periods]] begin[:]
variable[day] assign[=] call[name[period]][constant[day]]
if compare[name[day] <ast.NotIn object at 0x7da2590d7190> name[data_map]] begin[:]
call[name[data_map]][name[day]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1038eb0>, <ast.Constant object at 0x7da1b10385b0>], [<ast.Constant object at 0x7da1b1038430>, <ast.Constant object at 0x7da1b1038f40>]]
<ast.AugAssign object at 0x7da1b103ac50>
<ast.AugAssign object at 0x7da1b103b3d0>
return[name[data_map]] | keyword[def] identifier[get_hours_data] ( identifier[self] , identifier[entries] , identifier[date_headers] ):
literal[string]
identifier[project_totals] = identifier[get_project_totals] (
identifier[entries] , identifier[date_headers] , identifier[total_column] = keyword[False] ) keyword[if] identifier[entries] keyword[else] []
identifier[data_map] ={}
keyword[for] identifier[rows] , identifier[totals] keyword[in] identifier[project_totals] :
keyword[for] identifier[user] , identifier[user_id] , identifier[periods] keyword[in] identifier[rows] :
keyword[for] identifier[period] keyword[in] identifier[periods] :
identifier[day] = identifier[period] [ literal[string] ]
keyword[if] identifier[day] keyword[not] keyword[in] identifier[data_map] :
identifier[data_map] [ identifier[day] ]={ literal[string] : literal[int] , literal[string] : literal[int] }
identifier[data_map] [ identifier[day] ][ literal[string] ]+= identifier[period] [ literal[string] ]
identifier[data_map] [ identifier[day] ][ literal[string] ]+= identifier[period] [ literal[string] ]
keyword[return] identifier[data_map] | def get_hours_data(self, entries, date_headers):
"""Sum billable and non-billable hours across all users."""
project_totals = get_project_totals(entries, date_headers, total_column=False) if entries else []
data_map = {}
for (rows, totals) in project_totals:
for (user, user_id, periods) in rows:
for period in periods:
day = period['day']
if day not in data_map:
data_map[day] = {'billable': 0, 'nonbillable': 0} # depends on [control=['if'], data=['day', 'data_map']]
data_map[day]['billable'] += period['billable']
data_map[day]['nonbillable'] += period['nonbillable'] # depends on [control=['for'], data=['period']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return data_map |
def _generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a dataframe, generate a Google BigQuery schema.
This is a private method, but was used in external code to work around
issues in the default schema generation. Now that individual columns can
be overridden: https://github.com/pydata/pandas-gbq/issues/218, this
method can be removed after there is time to migrate away from this
method. """
from pandas_gbq import schema
return schema.generate_bq_schema(df, default_type=default_type) | def function[_generate_bq_schema, parameter[df, default_type]]:
constant[DEPRECATED: Given a dataframe, generate a Google BigQuery schema.
This is a private method, but was used in external code to work around
issues in the default schema generation. Now that individual columns can
be overridden: https://github.com/pydata/pandas-gbq/issues/218, this
method can be removed after there is time to migrate away from this
method. ]
from relative_module[pandas_gbq] import module[schema]
return[call[name[schema].generate_bq_schema, parameter[name[df]]]] | keyword[def] identifier[_generate_bq_schema] ( identifier[df] , identifier[default_type] = literal[string] ):
literal[string]
keyword[from] identifier[pandas_gbq] keyword[import] identifier[schema]
keyword[return] identifier[schema] . identifier[generate_bq_schema] ( identifier[df] , identifier[default_type] = identifier[default_type] ) | def _generate_bq_schema(df, default_type='STRING'):
"""DEPRECATED: Given a dataframe, generate a Google BigQuery schema.
This is a private method, but was used in external code to work around
issues in the default schema generation. Now that individual columns can
be overridden: https://github.com/pydata/pandas-gbq/issues/218, this
method can be removed after there is time to migrate away from this
method. """
from pandas_gbq import schema
return schema.generate_bq_schema(df, default_type=default_type) |
def _fill_dates_evo(self, query_json, catalog_name, periodicity):
"""Returns an array of dictionaries, where each dictionary contains the
amount of items created at a given date and grouped by review_state,
based on the passed in periodicity.
This is an expensive function that will not be called more than once
every 2 hours (note cache decorator with `time() // (60 * 60 * 2)
"""
outevoidx = {}
outevo = []
days = 1
if periodicity == PERIODICITY_YEARLY:
days = 336
elif periodicity == PERIODICITY_BIANNUAL:
days = 168
elif periodicity == PERIODICITY_QUARTERLY:
days = 84
elif periodicity == PERIODICITY_MONTHLY:
days = 28
elif periodicity == PERIODICITY_WEEKLY:
days = 7
elif periodicity == PERIODICITY_ALL:
days = 336
# Get the date range
date_from, date_to = self.get_date_range(periodicity)
query = json.loads(query_json)
if 'review_state' in query:
del query['review_state']
query['sort_on'] = 'created'
query['created'] = {'query': (date_from, date_to),
'range': 'min:max'}
otherstate = _('Other status')
statesmap = self.get_states_map(query['portal_type'])
stats = statesmap.values()
stats.sort()
stats.append(otherstate)
statscount = {s: 0 for s in stats}
# Add first all periods, cause we want all segments to be displayed
curr = date_from.asdatetime()
end = date_to.asdatetime()
while curr < end:
currstr = self._getDateStr(periodicity, DateTime(curr))
if currstr not in outevoidx:
outdict = {'date': currstr}
for k in stats:
outdict[k] = 0
outevo.append(outdict)
outevoidx[currstr] = len(outevo)-1
curr = curr + datetime.timedelta(days=days)
brains = search(query, catalog_name)
for brain in brains:
created = brain.created
state = brain.review_state
if state not in statesmap:
logger.warn("'%s' State for '%s' not available" % (state, query['portal_type']))
state = statesmap[state] if state in statesmap else otherstate
created = self._getDateStr(periodicity, created)
statscount[state] += 1
if created in outevoidx:
oidx = outevoidx[created]
if state in outevo[oidx]:
outevo[oidx][state] += 1
else:
outevo[oidx][state] = 1
else:
# Create new row
currow = {'date': created,
state: 1}
outevo.append(currow)
# Remove all those states for which there is no data
rstates = [k for k, v in statscount.items() if v == 0]
for o in outevo:
for r in rstates:
if r in o:
del o[r]
# Sort available status by number of occurences descending
sorted_states = sorted(statscount.items(), key=itemgetter(1))
sorted_states = map(lambda item: item[0], sorted_states)
sorted_states.reverse()
return {'data': outevo, 'states': sorted_states} | def function[_fill_dates_evo, parameter[self, query_json, catalog_name, periodicity]]:
constant[Returns an array of dictionaries, where each dictionary contains the
amount of items created at a given date and grouped by review_state,
based on the passed in periodicity.
This is an expensive function that will not be called more than once
every 2 hours (note cache decorator with `time() // (60 * 60 * 2)
]
variable[outevoidx] assign[=] dictionary[[], []]
variable[outevo] assign[=] list[[]]
variable[days] assign[=] constant[1]
if compare[name[periodicity] equal[==] name[PERIODICITY_YEARLY]] begin[:]
variable[days] assign[=] constant[336]
<ast.Tuple object at 0x7da207f9a410> assign[=] call[name[self].get_date_range, parameter[name[periodicity]]]
variable[query] assign[=] call[name[json].loads, parameter[name[query_json]]]
if compare[constant[review_state] in name[query]] begin[:]
<ast.Delete object at 0x7da207f99900>
call[name[query]][constant[sort_on]] assign[=] constant[created]
call[name[query]][constant[created]] assign[=] dictionary[[<ast.Constant object at 0x7da207f99120>, <ast.Constant object at 0x7da207f9af50>], [<ast.Tuple object at 0x7da207f9aad0>, <ast.Constant object at 0x7da207f9ace0>]]
variable[otherstate] assign[=] call[name[_], parameter[constant[Other status]]]
variable[statesmap] assign[=] call[name[self].get_states_map, parameter[call[name[query]][constant[portal_type]]]]
variable[stats] assign[=] call[name[statesmap].values, parameter[]]
call[name[stats].sort, parameter[]]
call[name[stats].append, parameter[name[otherstate]]]
variable[statscount] assign[=] <ast.DictComp object at 0x7da207f98e20>
variable[curr] assign[=] call[name[date_from].asdatetime, parameter[]]
variable[end] assign[=] call[name[date_to].asdatetime, parameter[]]
while compare[name[curr] less[<] name[end]] begin[:]
variable[currstr] assign[=] call[name[self]._getDateStr, parameter[name[periodicity], call[name[DateTime], parameter[name[curr]]]]]
if compare[name[currstr] <ast.NotIn object at 0x7da2590d7190> name[outevoidx]] begin[:]
variable[outdict] assign[=] dictionary[[<ast.Constant object at 0x7da18bcc9030>], [<ast.Name object at 0x7da18bcc9960>]]
for taget[name[k]] in starred[name[stats]] begin[:]
call[name[outdict]][name[k]] assign[=] constant[0]
call[name[outevo].append, parameter[name[outdict]]]
call[name[outevoidx]][name[currstr]] assign[=] binary_operation[call[name[len], parameter[name[outevo]]] - constant[1]]
variable[curr] assign[=] binary_operation[name[curr] + call[name[datetime].timedelta, parameter[]]]
variable[brains] assign[=] call[name[search], parameter[name[query], name[catalog_name]]]
for taget[name[brain]] in starred[name[brains]] begin[:]
variable[created] assign[=] name[brain].created
variable[state] assign[=] name[brain].review_state
if compare[name[state] <ast.NotIn object at 0x7da2590d7190> name[statesmap]] begin[:]
call[name[logger].warn, parameter[binary_operation[constant['%s' State for '%s' not available] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bccbb50>, <ast.Subscript object at 0x7da18bcc9330>]]]]]
variable[state] assign[=] <ast.IfExp object at 0x7da18bcca2f0>
variable[created] assign[=] call[name[self]._getDateStr, parameter[name[periodicity], name[created]]]
<ast.AugAssign object at 0x7da18bcc9570>
if compare[name[created] in name[outevoidx]] begin[:]
variable[oidx] assign[=] call[name[outevoidx]][name[created]]
if compare[name[state] in call[name[outevo]][name[oidx]]] begin[:]
<ast.AugAssign object at 0x7da18bcc82e0>
variable[rstates] assign[=] <ast.ListComp object at 0x7da18bcc9ae0>
for taget[name[o]] in starred[name[outevo]] begin[:]
for taget[name[r]] in starred[name[rstates]] begin[:]
if compare[name[r] in name[o]] begin[:]
<ast.Delete object at 0x7da18bccaa70>
variable[sorted_states] assign[=] call[name[sorted], parameter[call[name[statscount].items, parameter[]]]]
variable[sorted_states] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da18bcca560>, name[sorted_states]]]
call[name[sorted_states].reverse, parameter[]]
return[dictionary[[<ast.Constant object at 0x7da18bccb8e0>, <ast.Constant object at 0x7da18bccbd30>], [<ast.Name object at 0x7da18bcc97e0>, <ast.Name object at 0x7da18bcc9e40>]]] | keyword[def] identifier[_fill_dates_evo] ( identifier[self] , identifier[query_json] , identifier[catalog_name] , identifier[periodicity] ):
literal[string]
identifier[outevoidx] ={}
identifier[outevo] =[]
identifier[days] = literal[int]
keyword[if] identifier[periodicity] == identifier[PERIODICITY_YEARLY] :
identifier[days] = literal[int]
keyword[elif] identifier[periodicity] == identifier[PERIODICITY_BIANNUAL] :
identifier[days] = literal[int]
keyword[elif] identifier[periodicity] == identifier[PERIODICITY_QUARTERLY] :
identifier[days] = literal[int]
keyword[elif] identifier[periodicity] == identifier[PERIODICITY_MONTHLY] :
identifier[days] = literal[int]
keyword[elif] identifier[periodicity] == identifier[PERIODICITY_WEEKLY] :
identifier[days] = literal[int]
keyword[elif] identifier[periodicity] == identifier[PERIODICITY_ALL] :
identifier[days] = literal[int]
identifier[date_from] , identifier[date_to] = identifier[self] . identifier[get_date_range] ( identifier[periodicity] )
identifier[query] = identifier[json] . identifier[loads] ( identifier[query_json] )
keyword[if] literal[string] keyword[in] identifier[query] :
keyword[del] identifier[query] [ literal[string] ]
identifier[query] [ literal[string] ]= literal[string]
identifier[query] [ literal[string] ]={ literal[string] :( identifier[date_from] , identifier[date_to] ),
literal[string] : literal[string] }
identifier[otherstate] = identifier[_] ( literal[string] )
identifier[statesmap] = identifier[self] . identifier[get_states_map] ( identifier[query] [ literal[string] ])
identifier[stats] = identifier[statesmap] . identifier[values] ()
identifier[stats] . identifier[sort] ()
identifier[stats] . identifier[append] ( identifier[otherstate] )
identifier[statscount] ={ identifier[s] : literal[int] keyword[for] identifier[s] keyword[in] identifier[stats] }
identifier[curr] = identifier[date_from] . identifier[asdatetime] ()
identifier[end] = identifier[date_to] . identifier[asdatetime] ()
keyword[while] identifier[curr] < identifier[end] :
identifier[currstr] = identifier[self] . identifier[_getDateStr] ( identifier[periodicity] , identifier[DateTime] ( identifier[curr] ))
keyword[if] identifier[currstr] keyword[not] keyword[in] identifier[outevoidx] :
identifier[outdict] ={ literal[string] : identifier[currstr] }
keyword[for] identifier[k] keyword[in] identifier[stats] :
identifier[outdict] [ identifier[k] ]= literal[int]
identifier[outevo] . identifier[append] ( identifier[outdict] )
identifier[outevoidx] [ identifier[currstr] ]= identifier[len] ( identifier[outevo] )- literal[int]
identifier[curr] = identifier[curr] + identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[days] )
identifier[brains] = identifier[search] ( identifier[query] , identifier[catalog_name] )
keyword[for] identifier[brain] keyword[in] identifier[brains] :
identifier[created] = identifier[brain] . identifier[created]
identifier[state] = identifier[brain] . identifier[review_state]
keyword[if] identifier[state] keyword[not] keyword[in] identifier[statesmap] :
identifier[logger] . identifier[warn] ( literal[string] %( identifier[state] , identifier[query] [ literal[string] ]))
identifier[state] = identifier[statesmap] [ identifier[state] ] keyword[if] identifier[state] keyword[in] identifier[statesmap] keyword[else] identifier[otherstate]
identifier[created] = identifier[self] . identifier[_getDateStr] ( identifier[periodicity] , identifier[created] )
identifier[statscount] [ identifier[state] ]+= literal[int]
keyword[if] identifier[created] keyword[in] identifier[outevoidx] :
identifier[oidx] = identifier[outevoidx] [ identifier[created] ]
keyword[if] identifier[state] keyword[in] identifier[outevo] [ identifier[oidx] ]:
identifier[outevo] [ identifier[oidx] ][ identifier[state] ]+= literal[int]
keyword[else] :
identifier[outevo] [ identifier[oidx] ][ identifier[state] ]= literal[int]
keyword[else] :
identifier[currow] ={ literal[string] : identifier[created] ,
identifier[state] : literal[int] }
identifier[outevo] . identifier[append] ( identifier[currow] )
identifier[rstates] =[ identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[statscount] . identifier[items] () keyword[if] identifier[v] == literal[int] ]
keyword[for] identifier[o] keyword[in] identifier[outevo] :
keyword[for] identifier[r] keyword[in] identifier[rstates] :
keyword[if] identifier[r] keyword[in] identifier[o] :
keyword[del] identifier[o] [ identifier[r] ]
identifier[sorted_states] = identifier[sorted] ( identifier[statscount] . identifier[items] (), identifier[key] = identifier[itemgetter] ( literal[int] ))
identifier[sorted_states] = identifier[map] ( keyword[lambda] identifier[item] : identifier[item] [ literal[int] ], identifier[sorted_states] )
identifier[sorted_states] . identifier[reverse] ()
keyword[return] { literal[string] : identifier[outevo] , literal[string] : identifier[sorted_states] } | def _fill_dates_evo(self, query_json, catalog_name, periodicity):
"""Returns an array of dictionaries, where each dictionary contains the
amount of items created at a given date and grouped by review_state,
based on the passed in periodicity.
This is an expensive function that will not be called more than once
every 2 hours (note cache decorator with `time() // (60 * 60 * 2)
"""
outevoidx = {}
outevo = []
days = 1
if periodicity == PERIODICITY_YEARLY:
days = 336 # depends on [control=['if'], data=[]]
elif periodicity == PERIODICITY_BIANNUAL:
days = 168 # depends on [control=['if'], data=[]]
elif periodicity == PERIODICITY_QUARTERLY:
days = 84 # depends on [control=['if'], data=[]]
elif periodicity == PERIODICITY_MONTHLY:
days = 28 # depends on [control=['if'], data=[]]
elif periodicity == PERIODICITY_WEEKLY:
days = 7 # depends on [control=['if'], data=[]]
elif periodicity == PERIODICITY_ALL:
days = 336 # depends on [control=['if'], data=[]]
# Get the date range
(date_from, date_to) = self.get_date_range(periodicity)
query = json.loads(query_json)
if 'review_state' in query:
del query['review_state'] # depends on [control=['if'], data=['query']]
query['sort_on'] = 'created'
query['created'] = {'query': (date_from, date_to), 'range': 'min:max'}
otherstate = _('Other status')
statesmap = self.get_states_map(query['portal_type'])
stats = statesmap.values()
stats.sort()
stats.append(otherstate)
statscount = {s: 0 for s in stats}
# Add first all periods, cause we want all segments to be displayed
curr = date_from.asdatetime()
end = date_to.asdatetime()
while curr < end:
currstr = self._getDateStr(periodicity, DateTime(curr))
if currstr not in outevoidx:
outdict = {'date': currstr}
for k in stats:
outdict[k] = 0 # depends on [control=['for'], data=['k']]
outevo.append(outdict)
outevoidx[currstr] = len(outevo) - 1 # depends on [control=['if'], data=['currstr', 'outevoidx']]
curr = curr + datetime.timedelta(days=days) # depends on [control=['while'], data=['curr']]
brains = search(query, catalog_name)
for brain in brains:
created = brain.created
state = brain.review_state
if state not in statesmap:
logger.warn("'%s' State for '%s' not available" % (state, query['portal_type'])) # depends on [control=['if'], data=['state']]
state = statesmap[state] if state in statesmap else otherstate
created = self._getDateStr(periodicity, created)
statscount[state] += 1
if created in outevoidx:
oidx = outevoidx[created]
if state in outevo[oidx]:
outevo[oidx][state] += 1 # depends on [control=['if'], data=['state']]
else:
outevo[oidx][state] = 1 # depends on [control=['if'], data=['created', 'outevoidx']]
else:
# Create new row
currow = {'date': created, state: 1}
outevo.append(currow) # depends on [control=['for'], data=['brain']]
# Remove all those states for which there is no data
rstates = [k for (k, v) in statscount.items() if v == 0]
for o in outevo:
for r in rstates:
if r in o:
del o[r] # depends on [control=['if'], data=['r', 'o']] # depends on [control=['for'], data=['r']] # depends on [control=['for'], data=['o']]
# Sort available status by number of occurences descending
sorted_states = sorted(statscount.items(), key=itemgetter(1))
sorted_states = map(lambda item: item[0], sorted_states)
sorted_states.reverse()
return {'data': outevo, 'states': sorted_states} |
def lkruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0]*len(args)
all = []
n = [len(_) for _ in args]
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in lkruskalwallish')
h = h / float(T)
return h, chisqprob(h,df) | def function[lkruskalwallish, parameter[]]:
constant[
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
]
variable[args] assign[=] call[name[list], parameter[name[args]]]
variable[n] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0d521a0>]] * call[name[len], parameter[name[args]]]]
variable[all] assign[=] list[[]]
variable[n] assign[=] <ast.ListComp object at 0x7da1b0d50af0>
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[args]]]]]] begin[:]
variable[all] assign[=] binary_operation[name[all] + call[name[args]][name[i]]]
variable[ranked] assign[=] call[name[rankdata], parameter[name[all]]]
variable[T] assign[=] call[name[tiecorrect], parameter[name[ranked]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[args]]]]]] begin[:]
call[name[args]][name[i]] assign[=] call[name[ranked]][<ast.Slice object at 0x7da1b0d514e0>]
<ast.Delete object at 0x7da1b0d52290>
variable[rsums] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[args]]]]]] begin[:]
call[name[rsums].append, parameter[binary_operation[call[name[sum], parameter[call[name[args]][name[i]]]] ** constant[2]]]]
call[name[rsums]][name[i]] assign[=] binary_operation[call[name[rsums]][name[i]] / call[name[float], parameter[call[name[n]][name[i]]]]]
variable[ssbn] assign[=] call[name[sum], parameter[name[rsums]]]
variable[totaln] assign[=] call[name[sum], parameter[name[n]]]
variable[h] assign[=] binary_operation[binary_operation[binary_operation[constant[12.0] / binary_operation[name[totaln] * binary_operation[name[totaln] + constant[1]]]] * name[ssbn]] - binary_operation[constant[3] * binary_operation[name[totaln] + constant[1]]]]
variable[df] assign[=] binary_operation[call[name[len], parameter[name[args]]] - constant[1]]
if compare[name[T] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b0d503a0>
variable[h] assign[=] binary_operation[name[h] / call[name[float], parameter[name[T]]]]
return[tuple[[<ast.Name object at 0x7da1b0d53a30>, <ast.Call object at 0x7da1b0d50280>]]] | keyword[def] identifier[lkruskalwallish] (* identifier[args] ):
literal[string]
identifier[args] = identifier[list] ( identifier[args] )
identifier[n] =[ literal[int] ]* identifier[len] ( identifier[args] )
identifier[all] =[]
identifier[n] =[ identifier[len] ( identifier[_] ) keyword[for] identifier[_] keyword[in] identifier[args] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[args] )):
identifier[all] = identifier[all] + identifier[args] [ identifier[i] ]
identifier[ranked] = identifier[rankdata] ( identifier[all] )
identifier[T] = identifier[tiecorrect] ( identifier[ranked] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[args] )):
identifier[args] [ identifier[i] ]= identifier[ranked] [ literal[int] : identifier[n] [ identifier[i] ]]
keyword[del] identifier[ranked] [ literal[int] : identifier[n] [ identifier[i] ]]
identifier[rsums] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[args] )):
identifier[rsums] . identifier[append] ( identifier[sum] ( identifier[args] [ identifier[i] ])** literal[int] )
identifier[rsums] [ identifier[i] ]= identifier[rsums] [ identifier[i] ]/ identifier[float] ( identifier[n] [ identifier[i] ])
identifier[ssbn] = identifier[sum] ( identifier[rsums] )
identifier[totaln] = identifier[sum] ( identifier[n] )
identifier[h] = literal[int] /( identifier[totaln] *( identifier[totaln] + literal[int] ))* identifier[ssbn] - literal[int] *( identifier[totaln] + literal[int] )
identifier[df] = identifier[len] ( identifier[args] )- literal[int]
keyword[if] identifier[T] == literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[h] = identifier[h] / identifier[float] ( identifier[T] )
keyword[return] identifier[h] , identifier[chisqprob] ( identifier[h] , identifier[df] ) | def lkruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0] * len(args)
all = []
n = [len(_) for _ in args]
for i in range(len(args)):
all = all + args[i] # depends on [control=['for'], data=['i']]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]] # depends on [control=['for'], data=['i']]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i]) ** 2)
rsums[i] = rsums[i] / float(n[i]) # depends on [control=['for'], data=['i']]
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in lkruskalwallish') # depends on [control=['if'], data=[]]
h = h / float(T)
return (h, chisqprob(h, df)) |
def _deconv_rl_gpu_conv(data_g, h_g, Niter=10):
"""
using convolve
"""
# set up some gpu buffers
u_g = OCLArray.empty(data_g.shape, np.float32)
u_g.copy_buffer(data_g)
tmp_g = OCLArray.empty(data_g.shape, np.float32)
tmp2_g = OCLArray.empty(data_g.shape, np.float32)
# fix this
hflip_g = OCLArray.from_array((h_g.get()[::-1, ::-1]).copy())
for i in range(Niter):
convolve(u_g, h_g,
res_g=tmp_g)
_divide_inplace(data_g, tmp_g)
# return data_g, tmp_g
convolve(tmp_g, hflip_g,
res_g=tmp2_g)
_multiply_inplace(u_g, tmp2_g)
return u_g | def function[_deconv_rl_gpu_conv, parameter[data_g, h_g, Niter]]:
constant[
using convolve
]
variable[u_g] assign[=] call[name[OCLArray].empty, parameter[name[data_g].shape, name[np].float32]]
call[name[u_g].copy_buffer, parameter[name[data_g]]]
variable[tmp_g] assign[=] call[name[OCLArray].empty, parameter[name[data_g].shape, name[np].float32]]
variable[tmp2_g] assign[=] call[name[OCLArray].empty, parameter[name[data_g].shape, name[np].float32]]
variable[hflip_g] assign[=] call[name[OCLArray].from_array, parameter[call[call[call[name[h_g].get, parameter[]]][tuple[[<ast.Slice object at 0x7da18f00d720>, <ast.Slice object at 0x7da18f00f760>]]].copy, parameter[]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[Niter]]]] begin[:]
call[name[convolve], parameter[name[u_g], name[h_g]]]
call[name[_divide_inplace], parameter[name[data_g], name[tmp_g]]]
call[name[convolve], parameter[name[tmp_g], name[hflip_g]]]
call[name[_multiply_inplace], parameter[name[u_g], name[tmp2_g]]]
return[name[u_g]] | keyword[def] identifier[_deconv_rl_gpu_conv] ( identifier[data_g] , identifier[h_g] , identifier[Niter] = literal[int] ):
literal[string]
identifier[u_g] = identifier[OCLArray] . identifier[empty] ( identifier[data_g] . identifier[shape] , identifier[np] . identifier[float32] )
identifier[u_g] . identifier[copy_buffer] ( identifier[data_g] )
identifier[tmp_g] = identifier[OCLArray] . identifier[empty] ( identifier[data_g] . identifier[shape] , identifier[np] . identifier[float32] )
identifier[tmp2_g] = identifier[OCLArray] . identifier[empty] ( identifier[data_g] . identifier[shape] , identifier[np] . identifier[float32] )
identifier[hflip_g] = identifier[OCLArray] . identifier[from_array] (( identifier[h_g] . identifier[get] ()[::- literal[int] ,::- literal[int] ]). identifier[copy] ())
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[Niter] ):
identifier[convolve] ( identifier[u_g] , identifier[h_g] ,
identifier[res_g] = identifier[tmp_g] )
identifier[_divide_inplace] ( identifier[data_g] , identifier[tmp_g] )
identifier[convolve] ( identifier[tmp_g] , identifier[hflip_g] ,
identifier[res_g] = identifier[tmp2_g] )
identifier[_multiply_inplace] ( identifier[u_g] , identifier[tmp2_g] )
keyword[return] identifier[u_g] | def _deconv_rl_gpu_conv(data_g, h_g, Niter=10):
"""
using convolve
"""
# set up some gpu buffers
u_g = OCLArray.empty(data_g.shape, np.float32)
u_g.copy_buffer(data_g)
tmp_g = OCLArray.empty(data_g.shape, np.float32)
tmp2_g = OCLArray.empty(data_g.shape, np.float32)
# fix this
hflip_g = OCLArray.from_array(h_g.get()[::-1, ::-1].copy())
for i in range(Niter):
convolve(u_g, h_g, res_g=tmp_g)
_divide_inplace(data_g, tmp_g)
# return data_g, tmp_g
convolve(tmp_g, hflip_g, res_g=tmp2_g)
_multiply_inplace(u_g, tmp2_g) # depends on [control=['for'], data=[]]
return u_g |
def find_message(current):
"""
Search in messages. If "channel_key" given, search will be limited to that channel,
otherwise search will be performed on all of user's subscribed channels.
.. code-block:: python
# request:
{
'view':'_zops_search_unit,
'channel_key': key,
'query': string,
'page': int,
}
# response:
{
'results': [MSG_DICT, ],
'pagination': {
'page': int, # current page
'total_pages': int,
'total_objects': int,
'per_page': int, # object per page
},
'status': 'OK',
'code': 200
}
"""
current.output = {
'results': [],
'status': 'OK',
'code': 201
}
query_set = Message(current).objects.search_on(['msg_title', 'body', 'url'],
contains=current.input['query'])
if current.input['channel_key']:
query_set = query_set.filter(channel_id=current.input['channel_key'])
else:
subscribed_channels = Subscriber.objects.filter(user_id=current.user_id).values_list(
"channel_id", flatten=True)
query_set = query_set.filter(channel_id__in=subscribed_channels)
query_set, pagination_data = _paginate(current_page=current.input['page'], query_set=query_set)
current.output['pagination'] = pagination_data
for msg in query_set:
current.output['results'].append(msg.serialize(current.user)) | def function[find_message, parameter[current]]:
constant[
Search in messages. If "channel_key" given, search will be limited to that channel,
otherwise search will be performed on all of user's subscribed channels.
.. code-block:: python
# request:
{
'view':'_zops_search_unit,
'channel_key': key,
'query': string,
'page': int,
}
# response:
{
'results': [MSG_DICT, ],
'pagination': {
'page': int, # current page
'total_pages': int,
'total_objects': int,
'per_page': int, # object per page
},
'status': 'OK',
'code': 200
}
]
name[current].output assign[=] dictionary[[<ast.Constant object at 0x7da20c9933a0>, <ast.Constant object at 0x7da20c992e00>, <ast.Constant object at 0x7da20c9925f0>], [<ast.List object at 0x7da20c9938e0>, <ast.Constant object at 0x7da20c9926e0>, <ast.Constant object at 0x7da20c991390>]]
variable[query_set] assign[=] call[call[name[Message], parameter[name[current]]].objects.search_on, parameter[list[[<ast.Constant object at 0x7da20c991bd0>, <ast.Constant object at 0x7da20c9928c0>, <ast.Constant object at 0x7da20c9911b0>]]]]
if call[name[current].input][constant[channel_key]] begin[:]
variable[query_set] assign[=] call[name[query_set].filter, parameter[]]
<ast.Tuple object at 0x7da20c993070> assign[=] call[name[_paginate], parameter[]]
call[name[current].output][constant[pagination]] assign[=] name[pagination_data]
for taget[name[msg]] in starred[name[query_set]] begin[:]
call[call[name[current].output][constant[results]].append, parameter[call[name[msg].serialize, parameter[name[current].user]]]] | keyword[def] identifier[find_message] ( identifier[current] ):
literal[string]
identifier[current] . identifier[output] ={
literal[string] :[],
literal[string] : literal[string] ,
literal[string] : literal[int]
}
identifier[query_set] = identifier[Message] ( identifier[current] ). identifier[objects] . identifier[search_on] ([ literal[string] , literal[string] , literal[string] ],
identifier[contains] = identifier[current] . identifier[input] [ literal[string] ])
keyword[if] identifier[current] . identifier[input] [ literal[string] ]:
identifier[query_set] = identifier[query_set] . identifier[filter] ( identifier[channel_id] = identifier[current] . identifier[input] [ literal[string] ])
keyword[else] :
identifier[subscribed_channels] = identifier[Subscriber] . identifier[objects] . identifier[filter] ( identifier[user_id] = identifier[current] . identifier[user_id] ). identifier[values_list] (
literal[string] , identifier[flatten] = keyword[True] )
identifier[query_set] = identifier[query_set] . identifier[filter] ( identifier[channel_id__in] = identifier[subscribed_channels] )
identifier[query_set] , identifier[pagination_data] = identifier[_paginate] ( identifier[current_page] = identifier[current] . identifier[input] [ literal[string] ], identifier[query_set] = identifier[query_set] )
identifier[current] . identifier[output] [ literal[string] ]= identifier[pagination_data]
keyword[for] identifier[msg] keyword[in] identifier[query_set] :
identifier[current] . identifier[output] [ literal[string] ]. identifier[append] ( identifier[msg] . identifier[serialize] ( identifier[current] . identifier[user] )) | def find_message(current):
"""
Search in messages. If "channel_key" given, search will be limited to that channel,
otherwise search will be performed on all of user's subscribed channels.
.. code-block:: python
# request:
{
'view':'_zops_search_unit,
'channel_key': key,
'query': string,
'page': int,
}
# response:
{
'results': [MSG_DICT, ],
'pagination': {
'page': int, # current page
'total_pages': int,
'total_objects': int,
'per_page': int, # object per page
},
'status': 'OK',
'code': 200
}
"""
current.output = {'results': [], 'status': 'OK', 'code': 201}
query_set = Message(current).objects.search_on(['msg_title', 'body', 'url'], contains=current.input['query'])
if current.input['channel_key']:
query_set = query_set.filter(channel_id=current.input['channel_key']) # depends on [control=['if'], data=[]]
else:
subscribed_channels = Subscriber.objects.filter(user_id=current.user_id).values_list('channel_id', flatten=True)
query_set = query_set.filter(channel_id__in=subscribed_channels)
(query_set, pagination_data) = _paginate(current_page=current.input['page'], query_set=query_set)
current.output['pagination'] = pagination_data
for msg in query_set:
current.output['results'].append(msg.serialize(current.user)) # depends on [control=['for'], data=['msg']] |
def map_package_to_dataset(package, portal_url):
"""Mapea un diccionario con metadatos de cierto 'package' de CKAN a un
diccionario con metadatos de un 'dataset' según el estándar data.json."""
dataset = dict()
resources = package["resources"]
groups = package["groups"]
tags = package["tags"]
dataset_mapping = {
'title': 'title',
'notes': 'description',
'metadata_created': 'issued',
'metadata_modified': 'modified',
'license_title': 'license',
'id': 'identifier',
'url': 'landingPage'
}
for package_key, dataset_key in iteritems(dataset_mapping):
try:
dataset[dataset_key] = package[package_key]
except BaseException:
logger.exception("""
La clave '%s' no está en el endpoint 'package_show' para el
package '%s'. No se puede completar dataset['%s'].""",
package_key, package['name'], dataset_key)
publisher_mapping = {
'author': 'name',
'author_email': 'mbox'
}
if any([k in package for k in publisher_mapping.keys()]):
dataset["publisher"] = dict()
for package_key, publisher_key in iteritems(publisher_mapping):
try:
dataset['publisher'][publisher_key] = package[package_key]
except BaseException:
logger.exception("""
La clave '%s' no está en el endpoint 'package_show' para el
package '%s'. No se puede completar
dataset['publisher']['%s'].""",
package_key, package['name'], publisher_key)
contact_point_mapping = {
'maintainer': 'fn',
'maintainer_email': 'hasEmail'
}
if any([k in package for k in contact_point_mapping.keys()]):
dataset["contactPoint"] = dict()
for package_key, contact_key in iteritems(contact_point_mapping):
try:
dataset['contactPoint'][contact_key] = package[package_key]
except BaseException:
logger.exception("""
La clave '%s' no está en el endpoint 'package_show' para el
package '%s'. No se puede completar
dataset['contactPoint']['%s'].""",
package_key, package['name'], contact_key)
# Si existen campos extras en la información del package, busco las claves
# "Frecuencia de actualización" y "Temática global" para completar los
# campos "accrualPeriodicity" y "superTheme" del dataset, respectivamente.
if "extras" in package:
add_accrualPeriodicity(dataset, package)
add_superTheme(dataset, package)
add_temporal(dataset, package)
dataset["distribution"] = map_resources_to_distributions(resources,
portal_url)
dataset["theme"] = [grp['name'] for grp in groups]
dataset['keyword'] = [tag['name'] for tag in tags]
return dataset | def function[map_package_to_dataset, parameter[package, portal_url]]:
constant[Mapea un diccionario con metadatos de cierto 'package' de CKAN a un
diccionario con metadatos de un 'dataset' según el estándar data.json.]
variable[dataset] assign[=] call[name[dict], parameter[]]
variable[resources] assign[=] call[name[package]][constant[resources]]
variable[groups] assign[=] call[name[package]][constant[groups]]
variable[tags] assign[=] call[name[package]][constant[tags]]
variable[dataset_mapping] assign[=] dictionary[[<ast.Constant object at 0x7da1b04d4700>, <ast.Constant object at 0x7da1b04d7130>, <ast.Constant object at 0x7da1b04d7f10>, <ast.Constant object at 0x7da1b04d4850>, <ast.Constant object at 0x7da1b04d7460>, <ast.Constant object at 0x7da1b04d6ad0>, <ast.Constant object at 0x7da1b04d5c00>], [<ast.Constant object at 0x7da1b04d5e40>, <ast.Constant object at 0x7da1b04d5420>, <ast.Constant object at 0x7da1b04d6080>, <ast.Constant object at 0x7da1b04d4610>, <ast.Constant object at 0x7da1b04d7fa0>, <ast.Constant object at 0x7da1b04d4370>, <ast.Constant object at 0x7da1b04d6020>]]
for taget[tuple[[<ast.Name object at 0x7da1b04d6cb0>, <ast.Name object at 0x7da1b04d6920>]]] in starred[call[name[iteritems], parameter[name[dataset_mapping]]]] begin[:]
<ast.Try object at 0x7da1b26acf40>
variable[publisher_mapping] assign[=] dictionary[[<ast.Constant object at 0x7da1b04d5630>, <ast.Constant object at 0x7da1b04d7580>], [<ast.Constant object at 0x7da1b04d5bd0>, <ast.Constant object at 0x7da1b04d6950>]]
if call[name[any], parameter[<ast.ListComp object at 0x7da1b04d46d0>]] begin[:]
call[name[dataset]][constant[publisher]] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b04d4910>, <ast.Name object at 0x7da1b04d6aa0>]]] in starred[call[name[iteritems], parameter[name[publisher_mapping]]]] begin[:]
<ast.Try object at 0x7da1b04d7610>
variable[contact_point_mapping] assign[=] dictionary[[<ast.Constant object at 0x7da18f58f160>, <ast.Constant object at 0x7da18f58fb20>], [<ast.Constant object at 0x7da18f58e2c0>, <ast.Constant object at 0x7da18f58ee90>]]
if call[name[any], parameter[<ast.ListComp object at 0x7da18f58f8e0>]] begin[:]
call[name[dataset]][constant[contactPoint]] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18f58f610>, <ast.Name object at 0x7da18f58ea40>]]] in starred[call[name[iteritems], parameter[name[contact_point_mapping]]]] begin[:]
<ast.Try object at 0x7da18f58cee0>
if compare[constant[extras] in name[package]] begin[:]
call[name[add_accrualPeriodicity], parameter[name[dataset], name[package]]]
call[name[add_superTheme], parameter[name[dataset], name[package]]]
call[name[add_temporal], parameter[name[dataset], name[package]]]
call[name[dataset]][constant[distribution]] assign[=] call[name[map_resources_to_distributions], parameter[name[resources], name[portal_url]]]
call[name[dataset]][constant[theme]] assign[=] <ast.ListComp object at 0x7da20c6ab0a0>
call[name[dataset]][constant[keyword]] assign[=] <ast.ListComp object at 0x7da20c6a8a60>
return[name[dataset]] | keyword[def] identifier[map_package_to_dataset] ( identifier[package] , identifier[portal_url] ):
literal[string]
identifier[dataset] = identifier[dict] ()
identifier[resources] = identifier[package] [ literal[string] ]
identifier[groups] = identifier[package] [ literal[string] ]
identifier[tags] = identifier[package] [ literal[string] ]
identifier[dataset_mapping] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
keyword[for] identifier[package_key] , identifier[dataset_key] keyword[in] identifier[iteritems] ( identifier[dataset_mapping] ):
keyword[try] :
identifier[dataset] [ identifier[dataset_key] ]= identifier[package] [ identifier[package_key] ]
keyword[except] identifier[BaseException] :
identifier[logger] . identifier[exception] ( literal[string] ,
identifier[package_key] , identifier[package] [ literal[string] ], identifier[dataset_key] )
identifier[publisher_mapping] ={
literal[string] : literal[string] ,
literal[string] : literal[string]
}
keyword[if] identifier[any] ([ identifier[k] keyword[in] identifier[package] keyword[for] identifier[k] keyword[in] identifier[publisher_mapping] . identifier[keys] ()]):
identifier[dataset] [ literal[string] ]= identifier[dict] ()
keyword[for] identifier[package_key] , identifier[publisher_key] keyword[in] identifier[iteritems] ( identifier[publisher_mapping] ):
keyword[try] :
identifier[dataset] [ literal[string] ][ identifier[publisher_key] ]= identifier[package] [ identifier[package_key] ]
keyword[except] identifier[BaseException] :
identifier[logger] . identifier[exception] ( literal[string] ,
identifier[package_key] , identifier[package] [ literal[string] ], identifier[publisher_key] )
identifier[contact_point_mapping] ={
literal[string] : literal[string] ,
literal[string] : literal[string]
}
keyword[if] identifier[any] ([ identifier[k] keyword[in] identifier[package] keyword[for] identifier[k] keyword[in] identifier[contact_point_mapping] . identifier[keys] ()]):
identifier[dataset] [ literal[string] ]= identifier[dict] ()
keyword[for] identifier[package_key] , identifier[contact_key] keyword[in] identifier[iteritems] ( identifier[contact_point_mapping] ):
keyword[try] :
identifier[dataset] [ literal[string] ][ identifier[contact_key] ]= identifier[package] [ identifier[package_key] ]
keyword[except] identifier[BaseException] :
identifier[logger] . identifier[exception] ( literal[string] ,
identifier[package_key] , identifier[package] [ literal[string] ], identifier[contact_key] )
keyword[if] literal[string] keyword[in] identifier[package] :
identifier[add_accrualPeriodicity] ( identifier[dataset] , identifier[package] )
identifier[add_superTheme] ( identifier[dataset] , identifier[package] )
identifier[add_temporal] ( identifier[dataset] , identifier[package] )
identifier[dataset] [ literal[string] ]= identifier[map_resources_to_distributions] ( identifier[resources] ,
identifier[portal_url] )
identifier[dataset] [ literal[string] ]=[ identifier[grp] [ literal[string] ] keyword[for] identifier[grp] keyword[in] identifier[groups] ]
identifier[dataset] [ literal[string] ]=[ identifier[tag] [ literal[string] ] keyword[for] identifier[tag] keyword[in] identifier[tags] ]
keyword[return] identifier[dataset] | def map_package_to_dataset(package, portal_url):
"""Mapea un diccionario con metadatos de cierto 'package' de CKAN a un
diccionario con metadatos de un 'dataset' según el estándar data.json."""
dataset = dict()
resources = package['resources']
groups = package['groups']
tags = package['tags']
dataset_mapping = {'title': 'title', 'notes': 'description', 'metadata_created': 'issued', 'metadata_modified': 'modified', 'license_title': 'license', 'id': 'identifier', 'url': 'landingPage'}
for (package_key, dataset_key) in iteritems(dataset_mapping):
try:
dataset[dataset_key] = package[package_key] # depends on [control=['try'], data=[]]
except BaseException:
logger.exception("\n La clave '%s' no está en el endpoint 'package_show' para el\n package '%s'. No se puede completar dataset['%s'].", package_key, package['name'], dataset_key) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
publisher_mapping = {'author': 'name', 'author_email': 'mbox'}
if any([k in package for k in publisher_mapping.keys()]):
dataset['publisher'] = dict()
for (package_key, publisher_key) in iteritems(publisher_mapping):
try:
dataset['publisher'][publisher_key] = package[package_key] # depends on [control=['try'], data=[]]
except BaseException:
logger.exception("\n La clave '%s' no está en el endpoint 'package_show' para el\n package '%s'. No se puede completar\n dataset['publisher']['%s'].", package_key, package['name'], publisher_key) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
contact_point_mapping = {'maintainer': 'fn', 'maintainer_email': 'hasEmail'}
if any([k in package for k in contact_point_mapping.keys()]):
dataset['contactPoint'] = dict()
for (package_key, contact_key) in iteritems(contact_point_mapping):
try:
dataset['contactPoint'][contact_key] = package[package_key] # depends on [control=['try'], data=[]]
except BaseException:
logger.exception("\n La clave '%s' no está en el endpoint 'package_show' para el\n package '%s'. No se puede completar\n dataset['contactPoint']['%s'].", package_key, package['name'], contact_key) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# Si existen campos extras en la información del package, busco las claves
# "Frecuencia de actualización" y "Temática global" para completar los
# campos "accrualPeriodicity" y "superTheme" del dataset, respectivamente.
if 'extras' in package:
add_accrualPeriodicity(dataset, package)
add_superTheme(dataset, package)
add_temporal(dataset, package) # depends on [control=['if'], data=['package']]
dataset['distribution'] = map_resources_to_distributions(resources, portal_url)
dataset['theme'] = [grp['name'] for grp in groups]
dataset['keyword'] = [tag['name'] for tag in tags]
return dataset |
def get_storage_account_keys(access_token, subscription_id, rgname, account_name):
'''Get the access keys for the specified storage account.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
account_name (str): Name of the new storage account.
Returns:
HTTP response. JSON body of storage account keys.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.Storage/storageAccounts/', account_name,
'/listKeys',
'?api-version=', STORAGE_API])
return do_post(endpoint, '', access_token) | def function[get_storage_account_keys, parameter[access_token, subscription_id, rgname, account_name]]:
constant[Get the access keys for the specified storage account.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
account_name (str): Name of the new storage account.
Returns:
HTTP response. JSON body of storage account keys.
]
variable[endpoint] assign[=] call[constant[].join, parameter[list[[<ast.Call object at 0x7da1b04ca680>, <ast.Constant object at 0x7da1b04c83a0>, <ast.Name object at 0x7da1b04c9e10>, <ast.Constant object at 0x7da1b04c9bd0>, <ast.Name object at 0x7da1b04c9b40>, <ast.Constant object at 0x7da1b04c9b10>, <ast.Name object at 0x7da1b04c82b0>, <ast.Constant object at 0x7da1b04ca860>, <ast.Constant object at 0x7da1b04ca800>, <ast.Name object at 0x7da1b04c81c0>]]]]
return[call[name[do_post], parameter[name[endpoint], constant[], name[access_token]]]] | keyword[def] identifier[get_storage_account_keys] ( identifier[access_token] , identifier[subscription_id] , identifier[rgname] , identifier[account_name] ):
literal[string]
identifier[endpoint] = literal[string] . identifier[join] ([ identifier[get_rm_endpoint] (),
literal[string] , identifier[subscription_id] ,
literal[string] , identifier[rgname] ,
literal[string] , identifier[account_name] ,
literal[string] ,
literal[string] , identifier[STORAGE_API] ])
keyword[return] identifier[do_post] ( identifier[endpoint] , literal[string] , identifier[access_token] ) | def get_storage_account_keys(access_token, subscription_id, rgname, account_name):
"""Get the access keys for the specified storage account.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
account_name (str): Name of the new storage account.
Returns:
HTTP response. JSON body of storage account keys.
"""
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.Storage/storageAccounts/', account_name, '/listKeys', '?api-version=', STORAGE_API])
return do_post(endpoint, '', access_token) |
def sjuncChunk(key, chunk):
"""
Parse Super Junction (SJUNC) Chunk Method
"""
schunk = chunk[0].strip().split()
result = {'sjuncNumber': schunk[1],
'groundSurfaceElev': schunk[2],
'invertElev': schunk[3],
'manholeSA': schunk[4],
'inletCode': schunk[5],
'linkOrCellI': schunk[6],
'nodeOrCellJ': schunk[7],
'weirSideLength': schunk[8],
'orificeDiameter': schunk[9]}
return result | def function[sjuncChunk, parameter[key, chunk]]:
constant[
Parse Super Junction (SJUNC) Chunk Method
]
variable[schunk] assign[=] call[call[call[name[chunk]][constant[0]].strip, parameter[]].split, parameter[]]
variable[result] assign[=] dictionary[[<ast.Constant object at 0x7da18f721990>, <ast.Constant object at 0x7da18f720a60>, <ast.Constant object at 0x7da18f722740>, <ast.Constant object at 0x7da18f723cd0>, <ast.Constant object at 0x7da18f7229b0>, <ast.Constant object at 0x7da18f7234c0>, <ast.Constant object at 0x7da18f7220b0>, <ast.Constant object at 0x7da18f7224a0>, <ast.Constant object at 0x7da18f722ec0>], [<ast.Subscript object at 0x7da18f721090>, <ast.Subscript object at 0x7da18f722860>, <ast.Subscript object at 0x7da18f7227d0>, <ast.Subscript object at 0x7da18f722f20>, <ast.Subscript object at 0x7da18f720910>, <ast.Subscript object at 0x7da18f723f40>, <ast.Subscript object at 0x7da18f723b20>, <ast.Subscript object at 0x7da18f723220>, <ast.Subscript object at 0x7da18f720b80>]]
return[name[result]] | keyword[def] identifier[sjuncChunk] ( identifier[key] , identifier[chunk] ):
literal[string]
identifier[schunk] = identifier[chunk] [ literal[int] ]. identifier[strip] (). identifier[split] ()
identifier[result] ={ literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ]}
keyword[return] identifier[result] | def sjuncChunk(key, chunk):
"""
Parse Super Junction (SJUNC) Chunk Method
"""
schunk = chunk[0].strip().split()
result = {'sjuncNumber': schunk[1], 'groundSurfaceElev': schunk[2], 'invertElev': schunk[3], 'manholeSA': schunk[4], 'inletCode': schunk[5], 'linkOrCellI': schunk[6], 'nodeOrCellJ': schunk[7], 'weirSideLength': schunk[8], 'orificeDiameter': schunk[9]}
return result |
def _in_tag(self, tagname, attributes=None):
"""
Determine if we are already in a certain tag.
If we give attributes, make sure they match.
"""
node = self.cur_node
while not node is None:
if node.tag == tagname:
if attributes and node.attrib == attributes:
return True
elif attributes:
return False
return True
node = node.getparent()
return False | def function[_in_tag, parameter[self, tagname, attributes]]:
constant[
Determine if we are already in a certain tag.
If we give attributes, make sure they match.
]
variable[node] assign[=] name[self].cur_node
while <ast.UnaryOp object at 0x7da207f014b0> begin[:]
if compare[name[node].tag equal[==] name[tagname]] begin[:]
if <ast.BoolOp object at 0x7da207f02e00> begin[:]
return[constant[True]]
return[constant[True]]
variable[node] assign[=] call[name[node].getparent, parameter[]]
return[constant[False]] | keyword[def] identifier[_in_tag] ( identifier[self] , identifier[tagname] , identifier[attributes] = keyword[None] ):
literal[string]
identifier[node] = identifier[self] . identifier[cur_node]
keyword[while] keyword[not] identifier[node] keyword[is] keyword[None] :
keyword[if] identifier[node] . identifier[tag] == identifier[tagname] :
keyword[if] identifier[attributes] keyword[and] identifier[node] . identifier[attrib] == identifier[attributes] :
keyword[return] keyword[True]
keyword[elif] identifier[attributes] :
keyword[return] keyword[False]
keyword[return] keyword[True]
identifier[node] = identifier[node] . identifier[getparent] ()
keyword[return] keyword[False] | def _in_tag(self, tagname, attributes=None):
"""
Determine if we are already in a certain tag.
If we give attributes, make sure they match.
"""
node = self.cur_node
while not node is None:
if node.tag == tagname:
if attributes and node.attrib == attributes:
return True # depends on [control=['if'], data=[]]
elif attributes:
return False # depends on [control=['if'], data=[]]
return True # depends on [control=['if'], data=[]]
node = node.getparent() # depends on [control=['while'], data=[]]
return False |
def _pkl_periodogram(lspinfo,
plotdpi=100,
override_pfmethod=None):
'''This returns the periodogram plot PNG as base64, plus info as a dict.
Parameters
----------
lspinfo : dict
This is an lspinfo dict containing results from a period-finding
function. If it's from an astrobase period-finding function in
periodbase, this will already be in the correct format. To use external
period-finder results with this function, the `lspinfo` dict must be of
the following form, with at least the keys listed below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
`nbestperiods` and `nbestlspvals` must have at least 5 elements each,
e.g. describing the five 'best' (highest power) peaks in the
periodogram.
plotdpi : int
The resolution in DPI of the output periodogram plot to make.
override_pfmethod : str or None
This is used to set a custom label for this periodogram
method. Normally, this is taken from the 'method' key in the input
`lspinfo` dict, but if you want to override the output method name,
provide this as a string here. This can be useful if you have multiple
results you want to incorporate into a checkplotdict from a single
period-finder (e.g. if you ran BLS over several period ranges
separately).
Returns
-------
dict
Returns a dict that contains the following items::
{methodname: {'periods':the period array from lspinfo,
'lspval': the periodogram power array from lspinfo,
'bestperiod': the best period from lspinfo,
'nbestperiods': the 'nbestperiods' list from lspinfo,
'nbestlspvals': the 'nbestlspvals' list from lspinfo,
'periodogram': base64 encoded string representation of
the periodogram plot}}
The dict is returned in this format so it can be directly incorporated
under the period-finder's label `methodname` in a checkplotdict, using
Python's dict `update()` method.
'''
# get the appropriate plot ylabel
pgramylabel = PLOTYLABELS[lspinfo['method']]
# get the periods and lspvals from lspinfo
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
nbestperiods = lspinfo['nbestperiods']
nbestlspvals = lspinfo['nbestlspvals']
# open the figure instance
pgramfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
# make the plot
plt.plot(periods,lspvals)
plt.xscale('log',basex=10)
plt.xlabel('Period [days]')
plt.ylabel(pgramylabel)
plottitle = '%s - %.6f d' % (METHODLABELS[lspinfo['method']],
bestperiod)
plt.title(plottitle)
# show the best five peaks on the plot
for xbestperiod, xbestpeak in zip(nbestperiods,
nbestlspvals):
plt.annotate('%.6f' % xbestperiod,
xy=(xbestperiod, xbestpeak), xycoords='data',
xytext=(0.0,25.0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize='14.0')
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# this is the output instance
pgrampng = StrIO()
pgramfig.savefig(pgrampng,
# bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
pgrampng.seek(0)
pgramb64 = base64.b64encode(pgrampng.read())
# close the stringio buffer
pgrampng.close()
if not override_pfmethod:
# this is the dict to return
checkplotdict = {
lspinfo['method']:{
'periods':periods,
'lspvals':lspvals,
'bestperiod':bestperiod,
'nbestperiods':nbestperiods,
'nbestlspvals':nbestlspvals,
'periodogram':pgramb64,
}
}
else:
# this is the dict to return
checkplotdict = {
override_pfmethod:{
'periods':periods,
'lspvals':lspvals,
'bestperiod':bestperiod,
'nbestperiods':nbestperiods,
'nbestlspvals':nbestlspvals,
'periodogram':pgramb64,
}
}
return checkplotdict | def function[_pkl_periodogram, parameter[lspinfo, plotdpi, override_pfmethod]]:
constant[This returns the periodogram plot PNG as base64, plus info as a dict.
Parameters
----------
lspinfo : dict
This is an lspinfo dict containing results from a period-finding
function. If it's from an astrobase period-finding function in
periodbase, this will already be in the correct format. To use external
period-finder results with this function, the `lspinfo` dict must be of
the following form, with at least the keys listed below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
`nbestperiods` and `nbestlspvals` must have at least 5 elements each,
e.g. describing the five 'best' (highest power) peaks in the
periodogram.
plotdpi : int
The resolution in DPI of the output periodogram plot to make.
override_pfmethod : str or None
This is used to set a custom label for this periodogram
method. Normally, this is taken from the 'method' key in the input
`lspinfo` dict, but if you want to override the output method name,
provide this as a string here. This can be useful if you have multiple
results you want to incorporate into a checkplotdict from a single
period-finder (e.g. if you ran BLS over several period ranges
separately).
Returns
-------
dict
Returns a dict that contains the following items::
{methodname: {'periods':the period array from lspinfo,
'lspval': the periodogram power array from lspinfo,
'bestperiod': the best period from lspinfo,
'nbestperiods': the 'nbestperiods' list from lspinfo,
'nbestlspvals': the 'nbestlspvals' list from lspinfo,
'periodogram': base64 encoded string representation of
the periodogram plot}}
The dict is returned in this format so it can be directly incorporated
under the period-finder's label `methodname` in a checkplotdict, using
Python's dict `update()` method.
]
variable[pgramylabel] assign[=] call[name[PLOTYLABELS]][call[name[lspinfo]][constant[method]]]
variable[periods] assign[=] call[name[lspinfo]][constant[periods]]
variable[lspvals] assign[=] call[name[lspinfo]][constant[lspvals]]
variable[bestperiod] assign[=] call[name[lspinfo]][constant[bestperiod]]
variable[nbestperiods] assign[=] call[name[lspinfo]][constant[nbestperiods]]
variable[nbestlspvals] assign[=] call[name[lspinfo]][constant[nbestlspvals]]
variable[pgramfig] assign[=] call[name[plt].figure, parameter[]]
call[name[plt].plot, parameter[name[periods], name[lspvals]]]
call[name[plt].xscale, parameter[constant[log]]]
call[name[plt].xlabel, parameter[constant[Period [days]]]]
call[name[plt].ylabel, parameter[name[pgramylabel]]]
variable[plottitle] assign[=] binary_operation[constant[%s - %.6f d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1aff4a350>, <ast.Name object at 0x7da1aff4a620>]]]
call[name[plt].title, parameter[name[plottitle]]]
for taget[tuple[[<ast.Name object at 0x7da1aff4a740>, <ast.Name object at 0x7da1aff4a710>]]] in starred[call[name[zip], parameter[name[nbestperiods], name[nbestlspvals]]]] begin[:]
call[name[plt].annotate, parameter[binary_operation[constant[%.6f] <ast.Mod object at 0x7da2590d6920> name[xbestperiod]]]]
call[name[plt].grid, parameter[]]
variable[pgrampng] assign[=] call[name[StrIO], parameter[]]
call[name[pgramfig].savefig, parameter[name[pgrampng]]]
call[name[plt].close, parameter[]]
call[name[pgrampng].seek, parameter[constant[0]]]
variable[pgramb64] assign[=] call[name[base64].b64encode, parameter[call[name[pgrampng].read, parameter[]]]]
call[name[pgrampng].close, parameter[]]
if <ast.UnaryOp object at 0x7da1aff493c0> begin[:]
variable[checkplotdict] assign[=] dictionary[[<ast.Subscript object at 0x7da1aff496f0>], [<ast.Dict object at 0x7da1aff496c0>]]
return[name[checkplotdict]] | keyword[def] identifier[_pkl_periodogram] ( identifier[lspinfo] ,
identifier[plotdpi] = literal[int] ,
identifier[override_pfmethod] = keyword[None] ):
literal[string]
identifier[pgramylabel] = identifier[PLOTYLABELS] [ identifier[lspinfo] [ literal[string] ]]
identifier[periods] = identifier[lspinfo] [ literal[string] ]
identifier[lspvals] = identifier[lspinfo] [ literal[string] ]
identifier[bestperiod] = identifier[lspinfo] [ literal[string] ]
identifier[nbestperiods] = identifier[lspinfo] [ literal[string] ]
identifier[nbestlspvals] = identifier[lspinfo] [ literal[string] ]
identifier[pgramfig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] , literal[int] ), identifier[dpi] = identifier[plotdpi] )
identifier[plt] . identifier[plot] ( identifier[periods] , identifier[lspvals] )
identifier[plt] . identifier[xscale] ( literal[string] , identifier[basex] = literal[int] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( identifier[pgramylabel] )
identifier[plottitle] = literal[string] %( identifier[METHODLABELS] [ identifier[lspinfo] [ literal[string] ]],
identifier[bestperiod] )
identifier[plt] . identifier[title] ( identifier[plottitle] )
keyword[for] identifier[xbestperiod] , identifier[xbestpeak] keyword[in] identifier[zip] ( identifier[nbestperiods] ,
identifier[nbestlspvals] ):
identifier[plt] . identifier[annotate] ( literal[string] % identifier[xbestperiod] ,
identifier[xy] =( identifier[xbestperiod] , identifier[xbestpeak] ), identifier[xycoords] = literal[string] ,
identifier[xytext] =( literal[int] , literal[int] ), identifier[textcoords] = literal[string] ,
identifier[arrowprops] = identifier[dict] ( identifier[arrowstyle] = literal[string] ), identifier[fontsize] = literal[string] )
identifier[plt] . identifier[grid] ( identifier[color] = literal[string] ,
identifier[alpha] = literal[int] ,
identifier[zorder] = literal[int] ,
identifier[linewidth] = literal[int] ,
identifier[linestyle] = literal[string] )
identifier[pgrampng] = identifier[StrIO] ()
identifier[pgramfig] . identifier[savefig] ( identifier[pgrampng] ,
identifier[pad_inches] = literal[int] , identifier[format] = literal[string] )
identifier[plt] . identifier[close] ()
identifier[pgrampng] . identifier[seek] ( literal[int] )
identifier[pgramb64] = identifier[base64] . identifier[b64encode] ( identifier[pgrampng] . identifier[read] ())
identifier[pgrampng] . identifier[close] ()
keyword[if] keyword[not] identifier[override_pfmethod] :
identifier[checkplotdict] ={
identifier[lspinfo] [ literal[string] ]:{
literal[string] : identifier[periods] ,
literal[string] : identifier[lspvals] ,
literal[string] : identifier[bestperiod] ,
literal[string] : identifier[nbestperiods] ,
literal[string] : identifier[nbestlspvals] ,
literal[string] : identifier[pgramb64] ,
}
}
keyword[else] :
identifier[checkplotdict] ={
identifier[override_pfmethod] :{
literal[string] : identifier[periods] ,
literal[string] : identifier[lspvals] ,
literal[string] : identifier[bestperiod] ,
literal[string] : identifier[nbestperiods] ,
literal[string] : identifier[nbestlspvals] ,
literal[string] : identifier[pgramb64] ,
}
}
keyword[return] identifier[checkplotdict] | def _pkl_periodogram(lspinfo, plotdpi=100, override_pfmethod=None):
"""This returns the periodogram plot PNG as base64, plus info as a dict.
Parameters
----------
lspinfo : dict
This is an lspinfo dict containing results from a period-finding
function. If it's from an astrobase period-finding function in
periodbase, this will already be in the correct format. To use external
period-finder results with this function, the `lspinfo` dict must be of
the following form, with at least the keys listed below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
`nbestperiods` and `nbestlspvals` must have at least 5 elements each,
e.g. describing the five 'best' (highest power) peaks in the
periodogram.
plotdpi : int
The resolution in DPI of the output periodogram plot to make.
override_pfmethod : str or None
This is used to set a custom label for this periodogram
method. Normally, this is taken from the 'method' key in the input
`lspinfo` dict, but if you want to override the output method name,
provide this as a string here. This can be useful if you have multiple
results you want to incorporate into a checkplotdict from a single
period-finder (e.g. if you ran BLS over several period ranges
separately).
Returns
-------
dict
Returns a dict that contains the following items::
{methodname: {'periods':the period array from lspinfo,
'lspval': the periodogram power array from lspinfo,
'bestperiod': the best period from lspinfo,
'nbestperiods': the 'nbestperiods' list from lspinfo,
'nbestlspvals': the 'nbestlspvals' list from lspinfo,
'periodogram': base64 encoded string representation of
the periodogram plot}}
The dict is returned in this format so it can be directly incorporated
under the period-finder's label `methodname` in a checkplotdict, using
Python's dict `update()` method.
"""
# get the appropriate plot ylabel
pgramylabel = PLOTYLABELS[lspinfo['method']]
# get the periods and lspvals from lspinfo
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
nbestperiods = lspinfo['nbestperiods']
nbestlspvals = lspinfo['nbestlspvals']
# open the figure instance
pgramfig = plt.figure(figsize=(7.5, 4.8), dpi=plotdpi)
# make the plot
plt.plot(periods, lspvals)
plt.xscale('log', basex=10)
plt.xlabel('Period [days]')
plt.ylabel(pgramylabel)
plottitle = '%s - %.6f d' % (METHODLABELS[lspinfo['method']], bestperiod)
plt.title(plottitle)
# show the best five peaks on the plot
for (xbestperiod, xbestpeak) in zip(nbestperiods, nbestlspvals):
plt.annotate('%.6f' % xbestperiod, xy=(xbestperiod, xbestpeak), xycoords='data', xytext=(0.0, 25.0), textcoords='offset points', arrowprops=dict(arrowstyle='->'), fontsize='14.0') # depends on [control=['for'], data=[]]
# make a grid
plt.grid(color='#a9a9a9', alpha=0.9, zorder=0, linewidth=1.0, linestyle=':')
# this is the output instance
pgrampng = StrIO()
# bbox_inches='tight',
pgramfig.savefig(pgrampng, pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
pgrampng.seek(0)
pgramb64 = base64.b64encode(pgrampng.read())
# close the stringio buffer
pgrampng.close()
if not override_pfmethod:
# this is the dict to return
checkplotdict = {lspinfo['method']: {'periods': periods, 'lspvals': lspvals, 'bestperiod': bestperiod, 'nbestperiods': nbestperiods, 'nbestlspvals': nbestlspvals, 'periodogram': pgramb64}} # depends on [control=['if'], data=[]]
else:
# this is the dict to return
checkplotdict = {override_pfmethod: {'periods': periods, 'lspvals': lspvals, 'bestperiod': bestperiod, 'nbestperiods': nbestperiods, 'nbestlspvals': nbestlspvals, 'periodogram': pgramb64}}
return checkplotdict |
def _validate_prepostcmd_hook(cls, func: Callable, data_type: Type) -> None:
"""Check parameter and return types for pre and post command hooks."""
signature = inspect.signature(func)
# validate that the callable has the right number of parameters
cls._validate_callable_param_count(func, 1)
# validate the parameter has the right annotation
paramname = list(signature.parameters.keys())[0]
param = signature.parameters[paramname]
if param.annotation != data_type:
raise TypeError('argument 1 of {} has incompatible type {}, expected {}'.format(
func.__name__,
param.annotation,
data_type,
))
# validate the return value has the right annotation
if signature.return_annotation == signature.empty:
raise TypeError('{} does not have a declared return type, expected {}'.format(
func.__name__,
data_type,
))
if signature.return_annotation != data_type:
raise TypeError('{} has incompatible return type {}, expected {}'.format(
func.__name__,
signature.return_annotation,
data_type,
)) | def function[_validate_prepostcmd_hook, parameter[cls, func, data_type]]:
constant[Check parameter and return types for pre and post command hooks.]
variable[signature] assign[=] call[name[inspect].signature, parameter[name[func]]]
call[name[cls]._validate_callable_param_count, parameter[name[func], constant[1]]]
variable[paramname] assign[=] call[call[name[list], parameter[call[name[signature].parameters.keys, parameter[]]]]][constant[0]]
variable[param] assign[=] call[name[signature].parameters][name[paramname]]
if compare[name[param].annotation not_equal[!=] name[data_type]] begin[:]
<ast.Raise object at 0x7da204347e20>
if compare[name[signature].return_annotation equal[==] name[signature].empty] begin[:]
<ast.Raise object at 0x7da204346290>
if compare[name[signature].return_annotation not_equal[!=] name[data_type]] begin[:]
<ast.Raise object at 0x7da204346710> | keyword[def] identifier[_validate_prepostcmd_hook] ( identifier[cls] , identifier[func] : identifier[Callable] , identifier[data_type] : identifier[Type] )-> keyword[None] :
literal[string]
identifier[signature] = identifier[inspect] . identifier[signature] ( identifier[func] )
identifier[cls] . identifier[_validate_callable_param_count] ( identifier[func] , literal[int] )
identifier[paramname] = identifier[list] ( identifier[signature] . identifier[parameters] . identifier[keys] ())[ literal[int] ]
identifier[param] = identifier[signature] . identifier[parameters] [ identifier[paramname] ]
keyword[if] identifier[param] . identifier[annotation] != identifier[data_type] :
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] (
identifier[func] . identifier[__name__] ,
identifier[param] . identifier[annotation] ,
identifier[data_type] ,
))
keyword[if] identifier[signature] . identifier[return_annotation] == identifier[signature] . identifier[empty] :
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] (
identifier[func] . identifier[__name__] ,
identifier[data_type] ,
))
keyword[if] identifier[signature] . identifier[return_annotation] != identifier[data_type] :
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] (
identifier[func] . identifier[__name__] ,
identifier[signature] . identifier[return_annotation] ,
identifier[data_type] ,
)) | def _validate_prepostcmd_hook(cls, func: Callable, data_type: Type) -> None:
"""Check parameter and return types for pre and post command hooks."""
signature = inspect.signature(func)
# validate that the callable has the right number of parameters
cls._validate_callable_param_count(func, 1)
# validate the parameter has the right annotation
paramname = list(signature.parameters.keys())[0]
param = signature.parameters[paramname]
if param.annotation != data_type:
raise TypeError('argument 1 of {} has incompatible type {}, expected {}'.format(func.__name__, param.annotation, data_type)) # depends on [control=['if'], data=['data_type']]
# validate the return value has the right annotation
if signature.return_annotation == signature.empty:
raise TypeError('{} does not have a declared return type, expected {}'.format(func.__name__, data_type)) # depends on [control=['if'], data=[]]
if signature.return_annotation != data_type:
raise TypeError('{} has incompatible return type {}, expected {}'.format(func.__name__, signature.return_annotation, data_type)) # depends on [control=['if'], data=['data_type']] |
def _matrix2dict(matrix, etype=False):
"""Takes an adjacency matrix and returns an adjacency list."""
n = len(matrix)
adj = {k: {} for k in range(n)}
for k in range(n):
for j in range(n):
if matrix[k, j] != 0:
adj[k][j] = {} if not etype else matrix[k, j]
return adj | def function[_matrix2dict, parameter[matrix, etype]]:
constant[Takes an adjacency matrix and returns an adjacency list.]
variable[n] assign[=] call[name[len], parameter[name[matrix]]]
variable[adj] assign[=] <ast.DictComp object at 0x7da1b0051720>
for taget[name[k]] in starred[call[name[range], parameter[name[n]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[name[n]]]] begin[:]
if compare[call[name[matrix]][tuple[[<ast.Name object at 0x7da1b00550f0>, <ast.Name object at 0x7da1b00575e0>]]] not_equal[!=] constant[0]] begin[:]
call[call[name[adj]][name[k]]][name[j]] assign[=] <ast.IfExp object at 0x7da1b00551e0>
return[name[adj]] | keyword[def] identifier[_matrix2dict] ( identifier[matrix] , identifier[etype] = keyword[False] ):
literal[string]
identifier[n] = identifier[len] ( identifier[matrix] )
identifier[adj] ={ identifier[k] :{} keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[n] )}
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[n] ):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[n] ):
keyword[if] identifier[matrix] [ identifier[k] , identifier[j] ]!= literal[int] :
identifier[adj] [ identifier[k] ][ identifier[j] ]={} keyword[if] keyword[not] identifier[etype] keyword[else] identifier[matrix] [ identifier[k] , identifier[j] ]
keyword[return] identifier[adj] | def _matrix2dict(matrix, etype=False):
"""Takes an adjacency matrix and returns an adjacency list."""
n = len(matrix)
adj = {k: {} for k in range(n)}
for k in range(n):
for j in range(n):
if matrix[k, j] != 0:
adj[k][j] = {} if not etype else matrix[k, j] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['k']]
return adj |
def _set_l4protocol(self, v, load=False):
"""
Setter method for l4protocol, mapped from YANG variable /brocade_trilloam_rpc/l2traceroute/input/l4protocol (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_l4protocol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_l4protocol() directly.
YANG Description: Layer 4 protocol, TCP or UDP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'UDP': {'value': 2}, u'TCP': {'value': 1}},), is_leaf=True, yang_name="l4protocol", rest_name="l4protocol", parent=self, choice=(u'protocolType', u'IP'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-trilloam', defining_module='brocade-trilloam', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """l4protocol must be of a type compatible with enumeration""",
'defined-type': "brocade-trilloam:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'UDP': {'value': 2}, u'TCP': {'value': 1}},), is_leaf=True, yang_name="l4protocol", rest_name="l4protocol", parent=self, choice=(u'protocolType', u'IP'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-trilloam', defining_module='brocade-trilloam', yang_type='enumeration', is_config=True)""",
})
self.__l4protocol = t
if hasattr(self, '_set'):
self._set() | def function[_set_l4protocol, parameter[self, v, load]]:
constant[
Setter method for l4protocol, mapped from YANG variable /brocade_trilloam_rpc/l2traceroute/input/l4protocol (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_l4protocol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_l4protocol() directly.
YANG Description: Layer 4 protocol, TCP or UDP
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da20e9b0a00>
name[self].__l4protocol assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_l4protocol] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[unicode] , identifier[restriction_type] = literal[string] , identifier[restriction_arg] ={ literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }},), identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[choice] =( literal[string] , literal[string] ), identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[False] , identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__l4protocol] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_l4protocol(self, v, load=False):
"""
Setter method for l4protocol, mapped from YANG variable /brocade_trilloam_rpc/l2traceroute/input/l4protocol (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_l4protocol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_l4protocol() directly.
YANG Description: Layer 4 protocol, TCP or UDP
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_type='dict_key', restriction_arg={u'UDP': {'value': 2}, u'TCP': {'value': 1}}), is_leaf=True, yang_name='l4protocol', rest_name='l4protocol', parent=self, choice=(u'protocolType', u'IP'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-trilloam', defining_module='brocade-trilloam', yang_type='enumeration', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'l4protocol must be of a type compatible with enumeration', 'defined-type': 'brocade-trilloam:enumeration', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u\'UDP\': {\'value\': 2}, u\'TCP\': {\'value\': 1}},), is_leaf=True, yang_name="l4protocol", rest_name="l4protocol", parent=self, choice=(u\'protocolType\', u\'IP\'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace=\'urn:brocade.com:mgmt:brocade-trilloam\', defining_module=\'brocade-trilloam\', yang_type=\'enumeration\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__l4protocol = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def _make_area_from_coords(self, coords):
"""Create an appropriate area with the given *coords*."""
if len(coords) == 2:
lon_sn = coords[0].attrs.get('standard_name')
lat_sn = coords[1].attrs.get('standard_name')
if lon_sn == 'longitude' and lat_sn == 'latitude':
key = None
try:
key = (coords[0].data.name, coords[1].data.name)
sdef = self.coords_cache.get(key)
except AttributeError:
sdef = None
if sdef is None:
sdef = SwathDefinition(*coords)
if key is not None:
self.coords_cache[key] = sdef
sensor_str = '_'.join(self.info['sensors'])
shape_str = '_'.join(map(str, coords[0].shape))
sdef.name = "{}_{}_{}_{}".format(sensor_str, shape_str,
coords[0].attrs['name'],
coords[1].attrs['name'])
return sdef
else:
raise ValueError(
'Coordinates info object missing standard_name key: ' +
str(coords))
elif len(coords) != 0:
raise NameError("Don't know what to do with coordinates " + str(
coords)) | def function[_make_area_from_coords, parameter[self, coords]]:
constant[Create an appropriate area with the given *coords*.]
if compare[call[name[len], parameter[name[coords]]] equal[==] constant[2]] begin[:]
variable[lon_sn] assign[=] call[call[name[coords]][constant[0]].attrs.get, parameter[constant[standard_name]]]
variable[lat_sn] assign[=] call[call[name[coords]][constant[1]].attrs.get, parameter[constant[standard_name]]]
if <ast.BoolOp object at 0x7da1b1d6ff40> begin[:]
variable[key] assign[=] constant[None]
<ast.Try object at 0x7da1b1d6f5e0>
if compare[name[sdef] is constant[None]] begin[:]
variable[sdef] assign[=] call[name[SwathDefinition], parameter[<ast.Starred object at 0x7da1b1d6eef0>]]
if compare[name[key] is_not constant[None]] begin[:]
call[name[self].coords_cache][name[key]] assign[=] name[sdef]
variable[sensor_str] assign[=] call[constant[_].join, parameter[call[name[self].info][constant[sensors]]]]
variable[shape_str] assign[=] call[constant[_].join, parameter[call[name[map], parameter[name[str], call[name[coords]][constant[0]].shape]]]]
name[sdef].name assign[=] call[constant[{}_{}_{}_{}].format, parameter[name[sensor_str], name[shape_str], call[call[name[coords]][constant[0]].attrs][constant[name]], call[call[name[coords]][constant[1]].attrs][constant[name]]]]
return[name[sdef]] | keyword[def] identifier[_make_area_from_coords] ( identifier[self] , identifier[coords] ):
literal[string]
keyword[if] identifier[len] ( identifier[coords] )== literal[int] :
identifier[lon_sn] = identifier[coords] [ literal[int] ]. identifier[attrs] . identifier[get] ( literal[string] )
identifier[lat_sn] = identifier[coords] [ literal[int] ]. identifier[attrs] . identifier[get] ( literal[string] )
keyword[if] identifier[lon_sn] == literal[string] keyword[and] identifier[lat_sn] == literal[string] :
identifier[key] = keyword[None]
keyword[try] :
identifier[key] =( identifier[coords] [ literal[int] ]. identifier[data] . identifier[name] , identifier[coords] [ literal[int] ]. identifier[data] . identifier[name] )
identifier[sdef] = identifier[self] . identifier[coords_cache] . identifier[get] ( identifier[key] )
keyword[except] identifier[AttributeError] :
identifier[sdef] = keyword[None]
keyword[if] identifier[sdef] keyword[is] keyword[None] :
identifier[sdef] = identifier[SwathDefinition] (* identifier[coords] )
keyword[if] identifier[key] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[coords_cache] [ identifier[key] ]= identifier[sdef]
identifier[sensor_str] = literal[string] . identifier[join] ( identifier[self] . identifier[info] [ literal[string] ])
identifier[shape_str] = literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[coords] [ literal[int] ]. identifier[shape] ))
identifier[sdef] . identifier[name] = literal[string] . identifier[format] ( identifier[sensor_str] , identifier[shape_str] ,
identifier[coords] [ literal[int] ]. identifier[attrs] [ literal[string] ],
identifier[coords] [ literal[int] ]. identifier[attrs] [ literal[string] ])
keyword[return] identifier[sdef]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] +
identifier[str] ( identifier[coords] ))
keyword[elif] identifier[len] ( identifier[coords] )!= literal[int] :
keyword[raise] identifier[NameError] ( literal[string] + identifier[str] (
identifier[coords] )) | def _make_area_from_coords(self, coords):
"""Create an appropriate area with the given *coords*."""
if len(coords) == 2:
lon_sn = coords[0].attrs.get('standard_name')
lat_sn = coords[1].attrs.get('standard_name')
if lon_sn == 'longitude' and lat_sn == 'latitude':
key = None
try:
key = (coords[0].data.name, coords[1].data.name)
sdef = self.coords_cache.get(key) # depends on [control=['try'], data=[]]
except AttributeError:
sdef = None # depends on [control=['except'], data=[]]
if sdef is None:
sdef = SwathDefinition(*coords)
if key is not None:
self.coords_cache[key] = sdef # depends on [control=['if'], data=['key']] # depends on [control=['if'], data=['sdef']]
sensor_str = '_'.join(self.info['sensors'])
shape_str = '_'.join(map(str, coords[0].shape))
sdef.name = '{}_{}_{}_{}'.format(sensor_str, shape_str, coords[0].attrs['name'], coords[1].attrs['name'])
return sdef # depends on [control=['if'], data=[]]
else:
raise ValueError('Coordinates info object missing standard_name key: ' + str(coords)) # depends on [control=['if'], data=[]]
elif len(coords) != 0:
raise NameError("Don't know what to do with coordinates " + str(coords)) # depends on [control=['if'], data=[]] |
def allow_exception(self, exc_class):
"""Allow raising this class of exceptions from commands.
When a command fails on the server side due to an exception, by
default it is turned into a string and raised on the client side as an
ExternalError. The original class name is sent but ignored. If you
would like to instead raise an instance of the same exception on the
client side, you can pass the exception class object to this method
and instances of that exception will be reraised.
The caveat is that the exception must be creatable with a single
string parameter and it should have a ``msg`` property.
Args:
exc_class (class): A class object with the exception that
we should allow to pass from server to client.
"""
name = exc_class.__name__
self._allowed_exceptions[name] = exc_class | def function[allow_exception, parameter[self, exc_class]]:
constant[Allow raising this class of exceptions from commands.
When a command fails on the server side due to an exception, by
default it is turned into a string and raised on the client side as an
ExternalError. The original class name is sent but ignored. If you
would like to instead raise an instance of the same exception on the
client side, you can pass the exception class object to this method
and instances of that exception will be reraised.
The caveat is that the exception must be creatable with a single
string parameter and it should have a ``msg`` property.
Args:
exc_class (class): A class object with the exception that
we should allow to pass from server to client.
]
variable[name] assign[=] name[exc_class].__name__
call[name[self]._allowed_exceptions][name[name]] assign[=] name[exc_class] | keyword[def] identifier[allow_exception] ( identifier[self] , identifier[exc_class] ):
literal[string]
identifier[name] = identifier[exc_class] . identifier[__name__]
identifier[self] . identifier[_allowed_exceptions] [ identifier[name] ]= identifier[exc_class] | def allow_exception(self, exc_class):
"""Allow raising this class of exceptions from commands.
When a command fails on the server side due to an exception, by
default it is turned into a string and raised on the client side as an
ExternalError. The original class name is sent but ignored. If you
would like to instead raise an instance of the same exception on the
client side, you can pass the exception class object to this method
and instances of that exception will be reraised.
The caveat is that the exception must be creatable with a single
string parameter and it should have a ``msg`` property.
Args:
exc_class (class): A class object with the exception that
we should allow to pass from server to client.
"""
name = exc_class.__name__
self._allowed_exceptions[name] = exc_class |
def _query_params(self):
"""Default query parameters."""
params = {}
if self.generation is not None:
params["generation"] = self.generation
if self.user_project is not None:
params["userProject"] = self.user_project
return params | def function[_query_params, parameter[self]]:
constant[Default query parameters.]
variable[params] assign[=] dictionary[[], []]
if compare[name[self].generation is_not constant[None]] begin[:]
call[name[params]][constant[generation]] assign[=] name[self].generation
if compare[name[self].user_project is_not constant[None]] begin[:]
call[name[params]][constant[userProject]] assign[=] name[self].user_project
return[name[params]] | keyword[def] identifier[_query_params] ( identifier[self] ):
literal[string]
identifier[params] ={}
keyword[if] identifier[self] . identifier[generation] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[self] . identifier[generation]
keyword[if] identifier[self] . identifier[user_project] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[self] . identifier[user_project]
keyword[return] identifier[params] | def _query_params(self):
"""Default query parameters."""
params = {}
if self.generation is not None:
params['generation'] = self.generation # depends on [control=['if'], data=[]]
if self.user_project is not None:
params['userProject'] = self.user_project # depends on [control=['if'], data=[]]
return params |
def debug_pre(f, *args, **kwargs):
"""Automatically log progress on function entry. Logging value: debug.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args.
"""
kwargs.update({'log': logging.DEBUG})
kwargs.update({'prefix_only': True})
return _stump(f, *args, **kwargs) | def function[debug_pre, parameter[f]]:
constant[Automatically log progress on function entry. Logging value: debug.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args.
]
call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da18f09ebc0>], [<ast.Attribute object at 0x7da18f09db70>]]]]
call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da18f09cee0>], [<ast.Constant object at 0x7da18f09ceb0>]]]]
return[call[name[_stump], parameter[name[f], <ast.Starred object at 0x7da20c76d8a0>]]] | keyword[def] identifier[debug_pre] ( identifier[f] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] . identifier[update] ({ literal[string] : identifier[logging] . identifier[DEBUG] })
identifier[kwargs] . identifier[update] ({ literal[string] : keyword[True] })
keyword[return] identifier[_stump] ( identifier[f] ,* identifier[args] ,** identifier[kwargs] ) | def debug_pre(f, *args, **kwargs):
"""Automatically log progress on function entry. Logging value: debug.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args.
"""
kwargs.update({'log': logging.DEBUG})
kwargs.update({'prefix_only': True})
return _stump(f, *args, **kwargs) |
def _check_holiday_structure(self, times):
""" To check the structure of the HolidayClass
:param list times: years or months or days or number week
:rtype: None or Exception
:return: in the case of exception returns the exception
"""
if not isinstance(times, list):
raise TypeError("an list is required")
for time in times:
if not isinstance(time, tuple):
raise TypeError("a tuple is required")
if len(time) > 5:
raise TypeError("Target time takes at most 5 arguments"
" ('%d' given)" % len(time))
if len(time) < 5:
raise TypeError("Required argument '%s' (pos '%d')"
" not found" % (TIME_LABEL[len(time)], len(time)))
self._check_time_format(TIME_LABEL, time) | def function[_check_holiday_structure, parameter[self, times]]:
constant[ To check the structure of the HolidayClass
:param list times: years or months or days or number week
:rtype: None or Exception
:return: in the case of exception returns the exception
]
if <ast.UnaryOp object at 0x7da1b1fbb310> begin[:]
<ast.Raise object at 0x7da1b1fba890>
for taget[name[time]] in starred[name[times]] begin[:]
if <ast.UnaryOp object at 0x7da1b1e08550> begin[:]
<ast.Raise object at 0x7da1b1e08ca0>
if compare[call[name[len], parameter[name[time]]] greater[>] constant[5]] begin[:]
<ast.Raise object at 0x7da1b1e0a9e0>
if compare[call[name[len], parameter[name[time]]] less[<] constant[5]] begin[:]
<ast.Raise object at 0x7da1b1e0b400>
call[name[self]._check_time_format, parameter[name[TIME_LABEL], name[time]]] | keyword[def] identifier[_check_holiday_structure] ( identifier[self] , identifier[times] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[times] , identifier[list] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[for] identifier[time] keyword[in] identifier[times] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[time] , identifier[tuple] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[len] ( identifier[time] )> literal[int] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] % identifier[len] ( identifier[time] ))
keyword[if] identifier[len] ( identifier[time] )< literal[int] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] %( identifier[TIME_LABEL] [ identifier[len] ( identifier[time] )], identifier[len] ( identifier[time] )))
identifier[self] . identifier[_check_time_format] ( identifier[TIME_LABEL] , identifier[time] ) | def _check_holiday_structure(self, times):
""" To check the structure of the HolidayClass
:param list times: years or months or days or number week
:rtype: None or Exception
:return: in the case of exception returns the exception
"""
if not isinstance(times, list):
raise TypeError('an list is required') # depends on [control=['if'], data=[]]
for time in times:
if not isinstance(time, tuple):
raise TypeError('a tuple is required') # depends on [control=['if'], data=[]]
if len(time) > 5:
raise TypeError("Target time takes at most 5 arguments ('%d' given)" % len(time)) # depends on [control=['if'], data=[]]
if len(time) < 5:
raise TypeError("Required argument '%s' (pos '%d') not found" % (TIME_LABEL[len(time)], len(time))) # depends on [control=['if'], data=[]]
self._check_time_format(TIME_LABEL, time) # depends on [control=['for'], data=['time']] |
def encode(self, fd, mtu, data):
"""
Encode the supplied data (byte array) and write to
the media transport file descriptor encapsulated
as RTP packets. The encoder will calculate the
required number of SBC frames and encapsulate as
RTP to fit the MTU size.
:param int fd: Media transport file descriptor
:param int mtu: Media transport MTU size as returned
when the media transport was acquired.
:param array{byte} data: Data to encode and send
over the media transport.
:return:
"""
self.codec.rtp_sbc_encode_to_fd(self.config,
ffi.new('char[]',
data),
len(data),
mtu,
self.ts,
self.seq_num,
fd) | def function[encode, parameter[self, fd, mtu, data]]:
constant[
Encode the supplied data (byte array) and write to
the media transport file descriptor encapsulated
as RTP packets. The encoder will calculate the
required number of SBC frames and encapsulate as
RTP to fit the MTU size.
:param int fd: Media transport file descriptor
:param int mtu: Media transport MTU size as returned
when the media transport was acquired.
:param array{byte} data: Data to encode and send
over the media transport.
:return:
]
call[name[self].codec.rtp_sbc_encode_to_fd, parameter[name[self].config, call[name[ffi].new, parameter[constant[char[]], name[data]]], call[name[len], parameter[name[data]]], name[mtu], name[self].ts, name[self].seq_num, name[fd]]] | keyword[def] identifier[encode] ( identifier[self] , identifier[fd] , identifier[mtu] , identifier[data] ):
literal[string]
identifier[self] . identifier[codec] . identifier[rtp_sbc_encode_to_fd] ( identifier[self] . identifier[config] ,
identifier[ffi] . identifier[new] ( literal[string] ,
identifier[data] ),
identifier[len] ( identifier[data] ),
identifier[mtu] ,
identifier[self] . identifier[ts] ,
identifier[self] . identifier[seq_num] ,
identifier[fd] ) | def encode(self, fd, mtu, data):
"""
Encode the supplied data (byte array) and write to
the media transport file descriptor encapsulated
as RTP packets. The encoder will calculate the
required number of SBC frames and encapsulate as
RTP to fit the MTU size.
:param int fd: Media transport file descriptor
:param int mtu: Media transport MTU size as returned
when the media transport was acquired.
:param array{byte} data: Data to encode and send
over the media transport.
:return:
"""
self.codec.rtp_sbc_encode_to_fd(self.config, ffi.new('char[]', data), len(data), mtu, self.ts, self.seq_num, fd) |
def is_visible(self, selector):
"""Check if an element is visible in the dom or not
This method will check if the element is displayed or not
This method might (according
to the config highlight:element_is_visible)
highlight the element if it is visible
This method won't wait until the element is visible or present
This method won't raise any exception if the element is not visible
Returns:
bool: True if the element is visible; False otherwise
"""
self.debug_log("Is visible (%s)" % selector)
element = self.find(
selector,
raise_exception=False,
wait_until_present=False,
wait_until_visible=False
)
if element:
if element.is_displayed(raise_exception=False):
element.highlight(
style=BROME_CONFIG['highlight']['element_is_visible']
)
self.debug_log("is visible (%s): True" % selector)
return True
self.debug_log("is visible (%s): False" % selector)
return False | def function[is_visible, parameter[self, selector]]:
constant[Check if an element is visible in the dom or not
This method will check if the element is displayed or not
This method might (according
to the config highlight:element_is_visible)
highlight the element if it is visible
This method won't wait until the element is visible or present
This method won't raise any exception if the element is not visible
Returns:
bool: True if the element is visible; False otherwise
]
call[name[self].debug_log, parameter[binary_operation[constant[Is visible (%s)] <ast.Mod object at 0x7da2590d6920> name[selector]]]]
variable[element] assign[=] call[name[self].find, parameter[name[selector]]]
if name[element] begin[:]
if call[name[element].is_displayed, parameter[]] begin[:]
call[name[element].highlight, parameter[]]
call[name[self].debug_log, parameter[binary_operation[constant[is visible (%s): True] <ast.Mod object at 0x7da2590d6920> name[selector]]]]
return[constant[True]]
call[name[self].debug_log, parameter[binary_operation[constant[is visible (%s): False] <ast.Mod object at 0x7da2590d6920> name[selector]]]]
return[constant[False]] | keyword[def] identifier[is_visible] ( identifier[self] , identifier[selector] ):
literal[string]
identifier[self] . identifier[debug_log] ( literal[string] % identifier[selector] )
identifier[element] = identifier[self] . identifier[find] (
identifier[selector] ,
identifier[raise_exception] = keyword[False] ,
identifier[wait_until_present] = keyword[False] ,
identifier[wait_until_visible] = keyword[False]
)
keyword[if] identifier[element] :
keyword[if] identifier[element] . identifier[is_displayed] ( identifier[raise_exception] = keyword[False] ):
identifier[element] . identifier[highlight] (
identifier[style] = identifier[BROME_CONFIG] [ literal[string] ][ literal[string] ]
)
identifier[self] . identifier[debug_log] ( literal[string] % identifier[selector] )
keyword[return] keyword[True]
identifier[self] . identifier[debug_log] ( literal[string] % identifier[selector] )
keyword[return] keyword[False] | def is_visible(self, selector):
"""Check if an element is visible in the dom or not
This method will check if the element is displayed or not
This method might (according
to the config highlight:element_is_visible)
highlight the element if it is visible
This method won't wait until the element is visible or present
This method won't raise any exception if the element is not visible
Returns:
bool: True if the element is visible; False otherwise
"""
self.debug_log('Is visible (%s)' % selector)
element = self.find(selector, raise_exception=False, wait_until_present=False, wait_until_visible=False)
if element:
if element.is_displayed(raise_exception=False):
element.highlight(style=BROME_CONFIG['highlight']['element_is_visible'])
self.debug_log('is visible (%s): True' % selector)
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.debug_log('is visible (%s): False' % selector)
return False |
def create(self, data, *args, **kwargs):
"""Creates an entity in Mambu
This method must be implemented in child classes
Args:
data (dictionary): dictionary with data to send, this dictionary
is specific for each Mambu entity
"""
# if module of the function is diferent from the module of the object
# that means create is not implemented in child class
if self.create.__func__.__module__ != self.__module__:
raise Exception("Child method not implemented")
self._MambuStruct__method = "POST"
self._MambuStruct__data = data
self.connect(*args, **kwargs)
self._MambuStruct__method = "GET"
self._MambuStruct__data = None | def function[create, parameter[self, data]]:
constant[Creates an entity in Mambu
This method must be implemented in child classes
Args:
data (dictionary): dictionary with data to send, this dictionary
is specific for each Mambu entity
]
if compare[name[self].create.__func__.__module__ not_equal[!=] name[self].__module__] begin[:]
<ast.Raise object at 0x7da2054a7c70>
name[self]._MambuStruct__method assign[=] constant[POST]
name[self]._MambuStruct__data assign[=] name[data]
call[name[self].connect, parameter[<ast.Starred object at 0x7da2054a5bd0>]]
name[self]._MambuStruct__method assign[=] constant[GET]
name[self]._MambuStruct__data assign[=] constant[None] | keyword[def] identifier[create] ( identifier[self] , identifier[data] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[create] . identifier[__func__] . identifier[__module__] != identifier[self] . identifier[__module__] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[_MambuStruct__method] = literal[string]
identifier[self] . identifier[_MambuStruct__data] = identifier[data]
identifier[self] . identifier[connect] (* identifier[args] ,** identifier[kwargs] )
identifier[self] . identifier[_MambuStruct__method] = literal[string]
identifier[self] . identifier[_MambuStruct__data] = keyword[None] | def create(self, data, *args, **kwargs):
"""Creates an entity in Mambu
This method must be implemented in child classes
Args:
data (dictionary): dictionary with data to send, this dictionary
is specific for each Mambu entity
"""
# if module of the function is diferent from the module of the object
# that means create is not implemented in child class
if self.create.__func__.__module__ != self.__module__:
raise Exception('Child method not implemented') # depends on [control=['if'], data=[]]
self._MambuStruct__method = 'POST'
self._MambuStruct__data = data
self.connect(*args, **kwargs)
self._MambuStruct__method = 'GET'
self._MambuStruct__data = None |
def is_valid_image(self, raw_data):
"""
This is not very good for imagemagick because it will say anything is
valid that it can use as input.
"""
with NamedTemporaryFile(mode='wb') as fp:
fp.write(raw_data)
fp.flush()
args = settings.THUMBNAIL_IDENTIFY.split(' ')
args.append(fp.name + '[0]')
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
retcode = p.wait()
return retcode == 0 | def function[is_valid_image, parameter[self, raw_data]]:
constant[
This is not very good for imagemagick because it will say anything is
valid that it can use as input.
]
with call[name[NamedTemporaryFile], parameter[]] begin[:]
call[name[fp].write, parameter[name[raw_data]]]
call[name[fp].flush, parameter[]]
variable[args] assign[=] call[name[settings].THUMBNAIL_IDENTIFY.split, parameter[constant[ ]]]
call[name[args].append, parameter[binary_operation[name[fp].name + constant[[0]]]]]
variable[p] assign[=] call[name[subprocess].Popen, parameter[name[args]]]
variable[retcode] assign[=] call[name[p].wait, parameter[]]
return[compare[name[retcode] equal[==] constant[0]]] | keyword[def] identifier[is_valid_image] ( identifier[self] , identifier[raw_data] ):
literal[string]
keyword[with] identifier[NamedTemporaryFile] ( identifier[mode] = literal[string] ) keyword[as] identifier[fp] :
identifier[fp] . identifier[write] ( identifier[raw_data] )
identifier[fp] . identifier[flush] ()
identifier[args] = identifier[settings] . identifier[THUMBNAIL_IDENTIFY] . identifier[split] ( literal[string] )
identifier[args] . identifier[append] ( identifier[fp] . identifier[name] + literal[string] )
identifier[p] = identifier[subprocess] . identifier[Popen] ( identifier[args] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] )
identifier[retcode] = identifier[p] . identifier[wait] ()
keyword[return] identifier[retcode] == literal[int] | def is_valid_image(self, raw_data):
"""
This is not very good for imagemagick because it will say anything is
valid that it can use as input.
"""
with NamedTemporaryFile(mode='wb') as fp:
fp.write(raw_data)
fp.flush()
args = settings.THUMBNAIL_IDENTIFY.split(' ')
args.append(fp.name + '[0]')
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
retcode = p.wait() # depends on [control=['with'], data=['fp']]
return retcode == 0 |
def filter_format(filter_template, assertion_values):
"""
filter_template
String containing %s as placeholder for assertion values.
assertion_values
List or tuple of assertion values. Length must match
count of %s in filter_template.
"""
assert isinstance(filter_template, bytes)
return filter_template % (
tuple(map(escape_filter_chars, assertion_values))) | def function[filter_format, parameter[filter_template, assertion_values]]:
constant[
filter_template
String containing %s as placeholder for assertion values.
assertion_values
List or tuple of assertion values. Length must match
count of %s in filter_template.
]
assert[call[name[isinstance], parameter[name[filter_template], name[bytes]]]]
return[binary_operation[name[filter_template] <ast.Mod object at 0x7da2590d6920> call[name[tuple], parameter[call[name[map], parameter[name[escape_filter_chars], name[assertion_values]]]]]]] | keyword[def] identifier[filter_format] ( identifier[filter_template] , identifier[assertion_values] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[filter_template] , identifier[bytes] )
keyword[return] identifier[filter_template] %(
identifier[tuple] ( identifier[map] ( identifier[escape_filter_chars] , identifier[assertion_values] ))) | def filter_format(filter_template, assertion_values):
"""
filter_template
String containing %s as placeholder for assertion values.
assertion_values
List or tuple of assertion values. Length must match
count of %s in filter_template.
"""
assert isinstance(filter_template, bytes)
return filter_template % tuple(map(escape_filter_chars, assertion_values)) |
def load_tag_library(libname):
"""
Load a templatetag library on multiple Django versions.
Returns None if the library isn't loaded.
"""
from django.template.backends.django import get_installed_libraries
from django.template.library import InvalidTemplateLibrary
try:
lib = get_installed_libraries()[libname]
lib = importlib.import_module(lib).register
return lib
except (InvalidTemplateLibrary, KeyError):
return None | def function[load_tag_library, parameter[libname]]:
constant[
Load a templatetag library on multiple Django versions.
Returns None if the library isn't loaded.
]
from relative_module[django.template.backends.django] import module[get_installed_libraries]
from relative_module[django.template.library] import module[InvalidTemplateLibrary]
<ast.Try object at 0x7da1b1830910> | keyword[def] identifier[load_tag_library] ( identifier[libname] ):
literal[string]
keyword[from] identifier[django] . identifier[template] . identifier[backends] . identifier[django] keyword[import] identifier[get_installed_libraries]
keyword[from] identifier[django] . identifier[template] . identifier[library] keyword[import] identifier[InvalidTemplateLibrary]
keyword[try] :
identifier[lib] = identifier[get_installed_libraries] ()[ identifier[libname] ]
identifier[lib] = identifier[importlib] . identifier[import_module] ( identifier[lib] ). identifier[register]
keyword[return] identifier[lib]
keyword[except] ( identifier[InvalidTemplateLibrary] , identifier[KeyError] ):
keyword[return] keyword[None] | def load_tag_library(libname):
"""
Load a templatetag library on multiple Django versions.
Returns None if the library isn't loaded.
"""
from django.template.backends.django import get_installed_libraries
from django.template.library import InvalidTemplateLibrary
try:
lib = get_installed_libraries()[libname]
lib = importlib.import_module(lib).register
return lib # depends on [control=['try'], data=[]]
except (InvalidTemplateLibrary, KeyError):
return None # depends on [control=['except'], data=[]] |
def _timedelta_total_seconds(td):
"""Python 2.6 backward compatibility function for timedelta.total_seconds.
:type td: timedelta object
:param td: timedelta object
:rtype: float
:return: The total number of seconds for the given timedelta object.
"""
if hasattr(timedelta, "total_seconds"):
return getattr(td, "total_seconds")()
# Python 2.6 backward compatibility
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / float(10**6) | def function[_timedelta_total_seconds, parameter[td]]:
constant[Python 2.6 backward compatibility function for timedelta.total_seconds.
:type td: timedelta object
:param td: timedelta object
:rtype: float
:return: The total number of seconds for the given timedelta object.
]
if call[name[hasattr], parameter[name[timedelta], constant[total_seconds]]] begin[:]
return[call[call[name[getattr], parameter[name[td], constant[total_seconds]]], parameter[]]]
return[binary_operation[binary_operation[name[td].microseconds + binary_operation[binary_operation[name[td].seconds + binary_operation[binary_operation[name[td].days * constant[24]] * constant[3600]]] * binary_operation[constant[10] ** constant[6]]]] / call[name[float], parameter[binary_operation[constant[10] ** constant[6]]]]]] | keyword[def] identifier[_timedelta_total_seconds] ( identifier[td] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[timedelta] , literal[string] ):
keyword[return] identifier[getattr] ( identifier[td] , literal[string] )()
keyword[return] ( identifier[td] . identifier[microseconds] +( identifier[td] . identifier[seconds] + identifier[td] . identifier[days] * literal[int] * literal[int] )* literal[int] ** literal[int] )/ identifier[float] ( literal[int] ** literal[int] ) | def _timedelta_total_seconds(td):
"""Python 2.6 backward compatibility function for timedelta.total_seconds.
:type td: timedelta object
:param td: timedelta object
:rtype: float
:return: The total number of seconds for the given timedelta object.
"""
if hasattr(timedelta, 'total_seconds'):
return getattr(td, 'total_seconds')() # depends on [control=['if'], data=[]]
# Python 2.6 backward compatibility
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) |
def do_or_fake_filter( value, formatter ):
"""
call a faker if value is None
uses:
{{ myint|or_fake:'randomInt' }}
"""
if not value:
value = Faker.getGenerator().format( formatter )
return value | def function[do_or_fake_filter, parameter[value, formatter]]:
constant[
call a faker if value is None
uses:
{{ myint|or_fake:'randomInt' }}
]
if <ast.UnaryOp object at 0x7da1b11a91e0> begin[:]
variable[value] assign[=] call[call[name[Faker].getGenerator, parameter[]].format, parameter[name[formatter]]]
return[name[value]] | keyword[def] identifier[do_or_fake_filter] ( identifier[value] , identifier[formatter] ):
literal[string]
keyword[if] keyword[not] identifier[value] :
identifier[value] = identifier[Faker] . identifier[getGenerator] (). identifier[format] ( identifier[formatter] )
keyword[return] identifier[value] | def do_or_fake_filter(value, formatter):
"""
call a faker if value is None
uses:
{{ myint|or_fake:'randomInt' }}
"""
if not value:
value = Faker.getGenerator().format(formatter) # depends on [control=['if'], data=[]]
return value |
def select_pane(self, target_pane):
"""
Return selected :class:`Pane` through ``$ tmux select-pane``.
Parameters
----------
target_pane : str
'target_pane', '-U' ,'-D', '-L', '-R', or '-l'.
Return
------
:class:`Pane`
"""
if target_pane in ['-l', '-U', '-D', '-L', '-R']:
proc = self.cmd('select-pane', '-t%s' % self.id, target_pane)
else:
proc = self.cmd('select-pane', '-t%s' % target_pane)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
return self.attached_pane | def function[select_pane, parameter[self, target_pane]]:
constant[
Return selected :class:`Pane` through ``$ tmux select-pane``.
Parameters
----------
target_pane : str
'target_pane', '-U' ,'-D', '-L', '-R', or '-l'.
Return
------
:class:`Pane`
]
if compare[name[target_pane] in list[[<ast.Constant object at 0x7da1b2344400>, <ast.Constant object at 0x7da1b2345870>, <ast.Constant object at 0x7da1b2347a30>, <ast.Constant object at 0x7da1b2347e20>, <ast.Constant object at 0x7da1b23461a0>]]] begin[:]
variable[proc] assign[=] call[name[self].cmd, parameter[constant[select-pane], binary_operation[constant[-t%s] <ast.Mod object at 0x7da2590d6920> name[self].id], name[target_pane]]]
if name[proc].stderr begin[:]
<ast.Raise object at 0x7da18f58fa00>
return[name[self].attached_pane] | keyword[def] identifier[select_pane] ( identifier[self] , identifier[target_pane] ):
literal[string]
keyword[if] identifier[target_pane] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[proc] = identifier[self] . identifier[cmd] ( literal[string] , literal[string] % identifier[self] . identifier[id] , identifier[target_pane] )
keyword[else] :
identifier[proc] = identifier[self] . identifier[cmd] ( literal[string] , literal[string] % identifier[target_pane] )
keyword[if] identifier[proc] . identifier[stderr] :
keyword[raise] identifier[exc] . identifier[LibTmuxException] ( identifier[proc] . identifier[stderr] )
keyword[return] identifier[self] . identifier[attached_pane] | def select_pane(self, target_pane):
"""
Return selected :class:`Pane` through ``$ tmux select-pane``.
Parameters
----------
target_pane : str
'target_pane', '-U' ,'-D', '-L', '-R', or '-l'.
Return
------
:class:`Pane`
"""
if target_pane in ['-l', '-U', '-D', '-L', '-R']:
proc = self.cmd('select-pane', '-t%s' % self.id, target_pane) # depends on [control=['if'], data=['target_pane']]
else:
proc = self.cmd('select-pane', '-t%s' % target_pane)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr) # depends on [control=['if'], data=[]]
return self.attached_pane |
def get_overlay(self, overlay_name):
"""Return overlay as a dictionary.
:param overlay_name: name of the overlay
:returns: overlay as a dictionary
"""
url = self.http_manifest["overlays"][overlay_name]
return self._get_json_from_url(url) | def function[get_overlay, parameter[self, overlay_name]]:
constant[Return overlay as a dictionary.
:param overlay_name: name of the overlay
:returns: overlay as a dictionary
]
variable[url] assign[=] call[call[name[self].http_manifest][constant[overlays]]][name[overlay_name]]
return[call[name[self]._get_json_from_url, parameter[name[url]]]] | keyword[def] identifier[get_overlay] ( identifier[self] , identifier[overlay_name] ):
literal[string]
identifier[url] = identifier[self] . identifier[http_manifest] [ literal[string] ][ identifier[overlay_name] ]
keyword[return] identifier[self] . identifier[_get_json_from_url] ( identifier[url] ) | def get_overlay(self, overlay_name):
"""Return overlay as a dictionary.
:param overlay_name: name of the overlay
:returns: overlay as a dictionary
"""
url = self.http_manifest['overlays'][overlay_name]
return self._get_json_from_url(url) |
def parse_bugs_details(raw_xml):
"""Parse a Bugilla bugs details XML stream.
This method returns a generator which parses the given XML,
producing an iterator of dictionaries. Each dictionary stores
the information related to a parsed bug.
If the given XML is invalid or does not contains any bug, the
method will raise a ParseError exception.
:param raw_xml: XML string to parse
:returns: a generator of parsed bugs
:raises ParseError: raised when an error occurs parsing
the given XML stream
"""
bugs = xml_to_dict(raw_xml)
if 'bug' not in bugs:
cause = "No bugs found. XML stream seems to be invalid."
raise ParseError(cause=cause)
for bug in bugs['bug']:
yield bug | def function[parse_bugs_details, parameter[raw_xml]]:
constant[Parse a Bugilla bugs details XML stream.
This method returns a generator which parses the given XML,
producing an iterator of dictionaries. Each dictionary stores
the information related to a parsed bug.
If the given XML is invalid or does not contains any bug, the
method will raise a ParseError exception.
:param raw_xml: XML string to parse
:returns: a generator of parsed bugs
:raises ParseError: raised when an error occurs parsing
the given XML stream
]
variable[bugs] assign[=] call[name[xml_to_dict], parameter[name[raw_xml]]]
if compare[constant[bug] <ast.NotIn object at 0x7da2590d7190> name[bugs]] begin[:]
variable[cause] assign[=] constant[No bugs found. XML stream seems to be invalid.]
<ast.Raise object at 0x7da1b0381d50>
for taget[name[bug]] in starred[call[name[bugs]][constant[bug]]] begin[:]
<ast.Yield object at 0x7da1b03829b0> | keyword[def] identifier[parse_bugs_details] ( identifier[raw_xml] ):
literal[string]
identifier[bugs] = identifier[xml_to_dict] ( identifier[raw_xml] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[bugs] :
identifier[cause] = literal[string]
keyword[raise] identifier[ParseError] ( identifier[cause] = identifier[cause] )
keyword[for] identifier[bug] keyword[in] identifier[bugs] [ literal[string] ]:
keyword[yield] identifier[bug] | def parse_bugs_details(raw_xml):
"""Parse a Bugilla bugs details XML stream.
This method returns a generator which parses the given XML,
producing an iterator of dictionaries. Each dictionary stores
the information related to a parsed bug.
If the given XML is invalid or does not contains any bug, the
method will raise a ParseError exception.
:param raw_xml: XML string to parse
:returns: a generator of parsed bugs
:raises ParseError: raised when an error occurs parsing
the given XML stream
"""
bugs = xml_to_dict(raw_xml)
if 'bug' not in bugs:
cause = 'No bugs found. XML stream seems to be invalid.'
raise ParseError(cause=cause) # depends on [control=['if'], data=[]]
for bug in bugs['bug']:
yield bug # depends on [control=['for'], data=['bug']] |
def labeller(rows=None, cols=None, multi_line=True,
default=label_value, **kwargs):
"""
Return a labeller function
Parameters
----------
rows : str | function | None
How to label the rows
cols : str | function | None
How to label the columns
multi_line : bool
Whether to place each variable on a separate line
default : function | str
Fallback labelling function. If it is a string,
it should be the name of one the labelling
functions provided by plotnine.
kwargs : dict
{variable name : function | string} pairs for
renaming variables. A function to rename the variable
or a string name.
Returns
-------
out : function
Function to do the labelling
"""
# Sort out the labellers along each dimension
rows_labeller = as_labeller(rows, default, multi_line)
cols_labeller = as_labeller(cols, default, multi_line)
def _labeller(label_info):
# When there is no variable specific labeller,
# use that of the dimension
if label_info._meta['dimension'] == 'rows':
margin_labeller = rows_labeller
else:
margin_labeller = cols_labeller
# Labelling functions expect string values
label_info = label_info.astype(str)
# Each facetting variable is labelled independently
for name, value in label_info.iteritems():
func = as_labeller(kwargs.get(name), margin_labeller)
new_info = func(label_info[[name]])
label_info[name] = new_info[name]
if not multi_line:
label_info = collapse_label_lines(label_info)
return label_info
return _labeller | def function[labeller, parameter[rows, cols, multi_line, default]]:
constant[
Return a labeller function
Parameters
----------
rows : str | function | None
How to label the rows
cols : str | function | None
How to label the columns
multi_line : bool
Whether to place each variable on a separate line
default : function | str
Fallback labelling function. If it is a string,
it should be the name of one the labelling
functions provided by plotnine.
kwargs : dict
{variable name : function | string} pairs for
renaming variables. A function to rename the variable
or a string name.
Returns
-------
out : function
Function to do the labelling
]
variable[rows_labeller] assign[=] call[name[as_labeller], parameter[name[rows], name[default], name[multi_line]]]
variable[cols_labeller] assign[=] call[name[as_labeller], parameter[name[cols], name[default], name[multi_line]]]
def function[_labeller, parameter[label_info]]:
if compare[call[name[label_info]._meta][constant[dimension]] equal[==] constant[rows]] begin[:]
variable[margin_labeller] assign[=] name[rows_labeller]
variable[label_info] assign[=] call[name[label_info].astype, parameter[name[str]]]
for taget[tuple[[<ast.Name object at 0x7da18dc05b10>, <ast.Name object at 0x7da18dc05000>]]] in starred[call[name[label_info].iteritems, parameter[]]] begin[:]
variable[func] assign[=] call[name[as_labeller], parameter[call[name[kwargs].get, parameter[name[name]]], name[margin_labeller]]]
variable[new_info] assign[=] call[name[func], parameter[call[name[label_info]][list[[<ast.Name object at 0x7da18dc061a0>]]]]]
call[name[label_info]][name[name]] assign[=] call[name[new_info]][name[name]]
if <ast.UnaryOp object at 0x7da18dc06980> begin[:]
variable[label_info] assign[=] call[name[collapse_label_lines], parameter[name[label_info]]]
return[name[label_info]]
return[name[_labeller]] | keyword[def] identifier[labeller] ( identifier[rows] = keyword[None] , identifier[cols] = keyword[None] , identifier[multi_line] = keyword[True] ,
identifier[default] = identifier[label_value] ,** identifier[kwargs] ):
literal[string]
identifier[rows_labeller] = identifier[as_labeller] ( identifier[rows] , identifier[default] , identifier[multi_line] )
identifier[cols_labeller] = identifier[as_labeller] ( identifier[cols] , identifier[default] , identifier[multi_line] )
keyword[def] identifier[_labeller] ( identifier[label_info] ):
keyword[if] identifier[label_info] . identifier[_meta] [ literal[string] ]== literal[string] :
identifier[margin_labeller] = identifier[rows_labeller]
keyword[else] :
identifier[margin_labeller] = identifier[cols_labeller]
identifier[label_info] = identifier[label_info] . identifier[astype] ( identifier[str] )
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[label_info] . identifier[iteritems] ():
identifier[func] = identifier[as_labeller] ( identifier[kwargs] . identifier[get] ( identifier[name] ), identifier[margin_labeller] )
identifier[new_info] = identifier[func] ( identifier[label_info] [[ identifier[name] ]])
identifier[label_info] [ identifier[name] ]= identifier[new_info] [ identifier[name] ]
keyword[if] keyword[not] identifier[multi_line] :
identifier[label_info] = identifier[collapse_label_lines] ( identifier[label_info] )
keyword[return] identifier[label_info]
keyword[return] identifier[_labeller] | def labeller(rows=None, cols=None, multi_line=True, default=label_value, **kwargs):
"""
Return a labeller function
Parameters
----------
rows : str | function | None
How to label the rows
cols : str | function | None
How to label the columns
multi_line : bool
Whether to place each variable on a separate line
default : function | str
Fallback labelling function. If it is a string,
it should be the name of one the labelling
functions provided by plotnine.
kwargs : dict
{variable name : function | string} pairs for
renaming variables. A function to rename the variable
or a string name.
Returns
-------
out : function
Function to do the labelling
"""
# Sort out the labellers along each dimension
rows_labeller = as_labeller(rows, default, multi_line)
cols_labeller = as_labeller(cols, default, multi_line)
def _labeller(label_info):
# When there is no variable specific labeller,
# use that of the dimension
if label_info._meta['dimension'] == 'rows':
margin_labeller = rows_labeller # depends on [control=['if'], data=[]]
else:
margin_labeller = cols_labeller
# Labelling functions expect string values
label_info = label_info.astype(str)
# Each facetting variable is labelled independently
for (name, value) in label_info.iteritems():
func = as_labeller(kwargs.get(name), margin_labeller)
new_info = func(label_info[[name]])
label_info[name] = new_info[name] # depends on [control=['for'], data=[]]
if not multi_line:
label_info = collapse_label_lines(label_info) # depends on [control=['if'], data=[]]
return label_info
return _labeller |
def _cull(potential, matches, verbose=0):
"""Cull inappropriate matches. Possible reasons:
- a duplicate of a previous match
- not a disk file
- not executable (non-Windows)
If 'potential' is approved it is returned and added to 'matches'.
Otherwise, None is returned.
"""
for match in matches: # don't yield duplicates
if _samefile(potential[0], match[0]):
if verbose:
sys.stderr.write("duplicate: %s (%s)\n" % potential)
return None
if not stat.S_ISREG(os.stat(potential[0]).st_mode):
if verbose:
sys.stderr.write("not a regular file: %s (%s)\n" % potential)
elif not os.access(potential[0], os.X_OK):
if verbose:
sys.stderr.write("no executable access: %s (%s)\n" % potential)
else:
matches.append(potential)
return potential
return None | def function[_cull, parameter[potential, matches, verbose]]:
constant[Cull inappropriate matches. Possible reasons:
- a duplicate of a previous match
- not a disk file
- not executable (non-Windows)
If 'potential' is approved it is returned and added to 'matches'.
Otherwise, None is returned.
]
for taget[name[match]] in starred[name[matches]] begin[:]
if call[name[_samefile], parameter[call[name[potential]][constant[0]], call[name[match]][constant[0]]]] begin[:]
if name[verbose] begin[:]
call[name[sys].stderr.write, parameter[binary_operation[constant[duplicate: %s (%s)
] <ast.Mod object at 0x7da2590d6920> name[potential]]]]
return[constant[None]]
if <ast.UnaryOp object at 0x7da18ede5fc0> begin[:]
if name[verbose] begin[:]
call[name[sys].stderr.write, parameter[binary_operation[constant[not a regular file: %s (%s)
] <ast.Mod object at 0x7da2590d6920> name[potential]]]]
return[constant[None]] | keyword[def] identifier[_cull] ( identifier[potential] , identifier[matches] , identifier[verbose] = literal[int] ):
literal[string]
keyword[for] identifier[match] keyword[in] identifier[matches] :
keyword[if] identifier[_samefile] ( identifier[potential] [ literal[int] ], identifier[match] [ literal[int] ]):
keyword[if] identifier[verbose] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] % identifier[potential] )
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[stat] . identifier[S_ISREG] ( identifier[os] . identifier[stat] ( identifier[potential] [ literal[int] ]). identifier[st_mode] ):
keyword[if] identifier[verbose] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] % identifier[potential] )
keyword[elif] keyword[not] identifier[os] . identifier[access] ( identifier[potential] [ literal[int] ], identifier[os] . identifier[X_OK] ):
keyword[if] identifier[verbose] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] % identifier[potential] )
keyword[else] :
identifier[matches] . identifier[append] ( identifier[potential] )
keyword[return] identifier[potential]
keyword[return] keyword[None] | def _cull(potential, matches, verbose=0):
"""Cull inappropriate matches. Possible reasons:
- a duplicate of a previous match
- not a disk file
- not executable (non-Windows)
If 'potential' is approved it is returned and added to 'matches'.
Otherwise, None is returned.
"""
for match in matches: # don't yield duplicates
if _samefile(potential[0], match[0]):
if verbose:
sys.stderr.write('duplicate: %s (%s)\n' % potential) # depends on [control=['if'], data=[]]
return None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['match']]
if not stat.S_ISREG(os.stat(potential[0]).st_mode):
if verbose:
sys.stderr.write('not a regular file: %s (%s)\n' % potential) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not os.access(potential[0], os.X_OK):
if verbose:
sys.stderr.write('no executable access: %s (%s)\n' % potential) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
matches.append(potential)
return potential
return None |
def random_subset_ids_by_count(self, count_per_class=1):
"""
Returns a random subset of sample ids of specified size by count,
within each class.
Parameters
----------
count_per_class : int
Exact number of samples per each class.
Returns
-------
subset : list
Combined list of sample ids from all classes.
"""
class_sizes = self.class_sizes
subsets = list()
if count_per_class < 1:
warnings.warn('Atleast one sample must be selected from each class')
return list()
elif count_per_class >= self.num_samples:
warnings.warn('All samples requested - returning a copy!')
return self.keys
# seeding the random number generator
# random.seed(random_seed)
for class_id, class_size in class_sizes.items():
# samples belonging to the class
this_class = self.keys_with_value(self.classes, class_id)
# shuffling the sample order; shuffling works in-place!
random.shuffle(this_class)
# clipping the range to [0, class_size]
subset_size_this_class = max(0, min(class_size, count_per_class))
if subset_size_this_class < 1 or this_class is None:
# warning if none were selected
warnings.warn('No subjects from class {} were selected.'.format(class_id))
else:
subsets_this_class = this_class[0:count_per_class]
subsets.extend(subsets_this_class)
if len(subsets) > 0:
return subsets
else:
warnings.warn('Zero samples were selected. Returning an empty list!')
return list() | def function[random_subset_ids_by_count, parameter[self, count_per_class]]:
constant[
Returns a random subset of sample ids of specified size by count,
within each class.
Parameters
----------
count_per_class : int
Exact number of samples per each class.
Returns
-------
subset : list
Combined list of sample ids from all classes.
]
variable[class_sizes] assign[=] name[self].class_sizes
variable[subsets] assign[=] call[name[list], parameter[]]
if compare[name[count_per_class] less[<] constant[1]] begin[:]
call[name[warnings].warn, parameter[constant[Atleast one sample must be selected from each class]]]
return[call[name[list], parameter[]]]
for taget[tuple[[<ast.Name object at 0x7da20c6a81c0>, <ast.Name object at 0x7da20c6a95a0>]]] in starred[call[name[class_sizes].items, parameter[]]] begin[:]
variable[this_class] assign[=] call[name[self].keys_with_value, parameter[name[self].classes, name[class_id]]]
call[name[random].shuffle, parameter[name[this_class]]]
variable[subset_size_this_class] assign[=] call[name[max], parameter[constant[0], call[name[min], parameter[name[class_size], name[count_per_class]]]]]
if <ast.BoolOp object at 0x7da18bc71b70> begin[:]
call[name[warnings].warn, parameter[call[constant[No subjects from class {} were selected.].format, parameter[name[class_id]]]]]
if compare[call[name[len], parameter[name[subsets]]] greater[>] constant[0]] begin[:]
return[name[subsets]] | keyword[def] identifier[random_subset_ids_by_count] ( identifier[self] , identifier[count_per_class] = literal[int] ):
literal[string]
identifier[class_sizes] = identifier[self] . identifier[class_sizes]
identifier[subsets] = identifier[list] ()
keyword[if] identifier[count_per_class] < literal[int] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[return] identifier[list] ()
keyword[elif] identifier[count_per_class] >= identifier[self] . identifier[num_samples] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[return] identifier[self] . identifier[keys]
keyword[for] identifier[class_id] , identifier[class_size] keyword[in] identifier[class_sizes] . identifier[items] ():
identifier[this_class] = identifier[self] . identifier[keys_with_value] ( identifier[self] . identifier[classes] , identifier[class_id] )
identifier[random] . identifier[shuffle] ( identifier[this_class] )
identifier[subset_size_this_class] = identifier[max] ( literal[int] , identifier[min] ( identifier[class_size] , identifier[count_per_class] ))
keyword[if] identifier[subset_size_this_class] < literal[int] keyword[or] identifier[this_class] keyword[is] keyword[None] :
identifier[warnings] . identifier[warn] ( literal[string] . identifier[format] ( identifier[class_id] ))
keyword[else] :
identifier[subsets_this_class] = identifier[this_class] [ literal[int] : identifier[count_per_class] ]
identifier[subsets] . identifier[extend] ( identifier[subsets_this_class] )
keyword[if] identifier[len] ( identifier[subsets] )> literal[int] :
keyword[return] identifier[subsets]
keyword[else] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[return] identifier[list] () | def random_subset_ids_by_count(self, count_per_class=1):
"""
Returns a random subset of sample ids of specified size by count,
within each class.
Parameters
----------
count_per_class : int
Exact number of samples per each class.
Returns
-------
subset : list
Combined list of sample ids from all classes.
"""
class_sizes = self.class_sizes
subsets = list()
if count_per_class < 1:
warnings.warn('Atleast one sample must be selected from each class')
return list() # depends on [control=['if'], data=[]]
elif count_per_class >= self.num_samples:
warnings.warn('All samples requested - returning a copy!')
return self.keys # depends on [control=['if'], data=[]]
# seeding the random number generator
# random.seed(random_seed)
for (class_id, class_size) in class_sizes.items():
# samples belonging to the class
this_class = self.keys_with_value(self.classes, class_id)
# shuffling the sample order; shuffling works in-place!
random.shuffle(this_class)
# clipping the range to [0, class_size]
subset_size_this_class = max(0, min(class_size, count_per_class))
if subset_size_this_class < 1 or this_class is None:
# warning if none were selected
warnings.warn('No subjects from class {} were selected.'.format(class_id)) # depends on [control=['if'], data=[]]
else:
subsets_this_class = this_class[0:count_per_class]
subsets.extend(subsets_this_class) # depends on [control=['for'], data=[]]
if len(subsets) > 0:
return subsets # depends on [control=['if'], data=[]]
else:
warnings.warn('Zero samples were selected. Returning an empty list!')
return list() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.