_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q261700 | _distances | validation | def _distances(value_domain, distance_metric, n_v):
"""Distances of the different possible values.
Parameters
----------
value_domain : array_like, with shape (V,)
Possible values V the units can take.
If the level of measurement is not nominal, it must be ordered.
distance_metric : callable
Callable that return the distance of two given values.
n_v : ndarray, with shape (V,)
Number of pairable elements for each value.
Returns
-------
d : ndarray, with shape (V, V)
Distance matrix for each value pair.
"""
return np.array([[distance_metric(v1, v2, i1=i1, i2=i2, n_v=n_v)
for i2, v2 in enumerate(value_domain)]
for i1, v1 in enumerate(value_domain)]) | python | {
"resource": ""
} |
q261701 | _reliability_data_to_value_counts | validation | def _reliability_data_to_value_counts(reliability_data, value_domain):
"""Return the value counts given the reliability data.
Parameters
----------
reliability_data : ndarray, with shape (M, N)
Reliability data matrix which has the rate the i coder gave to the j unit, where M is the number of raters
and N is the unit count.
Missing rates are represented with `np.nan`.
value_domain : array_like, with shape (V,)
Possible values the units can take.
Returns
-------
value_counts : ndarray, with shape (N, V)
Number of coders that assigned a certain value to a determined unit, where N is the number of units
and V is the value count.
"""
return np.array([[sum(1 for rate in unit if rate == v) for v in value_domain] for unit in reliability_data.T]) | python | {
"resource": ""
} |
q261702 | alpha | validation | def alpha(reliability_data=None, value_counts=None, value_domain=None, level_of_measurement='interval',
dtype=np.float64):
"""Compute Krippendorff's alpha.
See https://en.wikipedia.org/wiki/Krippendorff%27s_alpha for more information.
Parameters
----------
reliability_data : array_like, with shape (M, N)
Reliability data matrix which has the rate the i coder gave to the j unit, where M is the number of raters
and N is the unit count.
Missing rates are represented with `np.nan`.
If it's provided then `value_counts` must not be provided.
value_counts : ndarray, with shape (N, V)
Number of coders that assigned a certain value to a determined unit, where N is the number of units
and V is the value count.
If it's provided then `reliability_data` must not be provided.
value_domain : array_like, with shape (V,)
Possible values the units can take.
If the level of measurement is not nominal, it must be ordered.
If `reliability_data` is provided, then the default value is the ordered list of unique rates that appear.
Else, the default value is `list(range(V))`.
level_of_measurement : string or callable
Steven's level of measurement of the variable.
It must be one of 'nominal', 'ordinal', 'interval', 'ratio' or a callable.
dtype : data-type
Result and computation data-type.
Returns
-------
alpha : `dtype`
Scalar value of Krippendorff's alpha of type `dtype`.
Examples
--------
>>> reliability_data = [[np.nan, np.nan, np.nan, np.nan, np.nan, 3, 4, 1, 2, 1, 1, 3, 3, np.nan, 3],
... [1, np.nan, 2, 1, 3, 3, 4, 3, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
... [np.nan, np.nan, 2, 1, 3, 4, 4, np.nan, 2, 1, 1, 3, 3, np.nan, 4]]
>>> print(round(alpha(reliability_data=reliability_data, level_of_measurement='nominal'), 6))
0.691358
>>> print(round(alpha(reliability_data=reliability_data, level_of_measurement='interval'), 6))
0.810845
>>> value_counts = np.array([[1, 0, 0, 0],
... [0, 0, 0, 0],
... [0, 2, 0, 0],
... [2, 0, 0, 0],
... [0, 0, 2, 0],
... [0, 0, 2, 1],
... [0, 0, 0, 3],
... [1, 0, 1, 0],
... [0, 2, 0, 0],
... [2, 0, 0, 0],
... [2, 0, 0, 0],
... [0, 0, 2, 0],
... [0, 0, 2, 0],
... [0, 0, 0, 0],
... [0, 0, 1, 1]])
>>> print(round(alpha(value_counts=value_counts, level_of_measurement='nominal'), 6))
0.691358
>>> # The following examples were extracted from
>>> # https://www.statisticshowto.datasciencecentral.com/wp-content/uploads/2016/07/fulltext.pdf, page 8.
>>> reliability_data = [[1, 2, 3, 3, 2, 1, 4, 1, 2, np.nan, np.nan, np.nan],
... [1, 2, 3, 3, 2, 2, 4, 1, 2, 5, np.nan, 3.],
... [np.nan, 3, 3, 3, 2, 3, 4, 2, 2, 5, 1, np.nan],
... [1, 2, 3, 3, 2, 4, 4, 1, 2, 5, 1, np.nan]]
>>> print(round(alpha(reliability_data, level_of_measurement='ordinal'), 3))
0.815
>>> print(round(alpha(reliability_data, level_of_measurement='ratio'), 3))
0.797
"""
if (reliability_data is None) == (value_counts is None):
raise ValueError("Either reliability_data or value_counts must be provided, but not both.")
# Don't know if it's a list or numpy array. If it's the latter, the truth value is ambiguous. So, ask for None.
if value_counts is None:
if type(reliability_data) is not np.ndarray:
reliability_data = np.array(reliability_data)
value_domain = value_domain or np.unique(reliability_data[~np.isnan(reliability_data)])
value_counts = _reliability_data_to_value_counts(reliability_data, value_domain)
else: # elif reliability_data is None
if value_domain:
assert value_counts.shape[1] == len(value_domain), \
"The value domain should be equal to the number of columns of value_counts."
else:
value_domain = tuple(range(value_counts.shape[1]))
distance_metric = _distance_metric(level_of_measurement)
o = _coincidences(value_counts, value_domain, dtype=dtype)
n_v = np.sum(o, axis=0)
n = np.sum(n_v)
e = _random_coincidences(value_domain, n, n_v)
d = _distances(value_domain, distance_metric, n_v)
return 1 - np.sum(o * d) / np.sum(e * d) | python | {
"resource": ""
} |
q261703 | CDF.inquire | validation | def inquire(self):
"""Maps to fortran CDF_Inquire.
Assigns parameters returned by CDF_Inquire
to pysatCDF instance. Not intended
for regular direct use by user.
"""
name = copy.deepcopy(self.fname)
stats = fortran_cdf.inquire(name)
# break out fortran output into something meaningful
status = stats[0]
if status == 0:
self._num_dims = stats[1]
self._dim_sizes = stats[2]
self._encoding = stats[3]
self._majority = stats[4]
self._max_rec = stats[5]
self._num_r_vars = stats[6]
self._num_z_vars = stats[7]
self._num_attrs = stats[8]
else:
raise IOError(fortran_cdf.statusreporter(status)) | python | {
"resource": ""
} |
q261704 | CDF._read_all_z_variable_info | validation | def _read_all_z_variable_info(self):
"""Gets all CDF z-variable information, not data though.
Maps to calls using var_inquire. Gets information on
data type, number of elements, number of dimensions, etc.
"""
self.z_variable_info = {}
self.z_variable_names_by_num = {}
# call Fortran that grabs all of the basic stats on all of the
# zVariables in one go.
info = fortran_cdf.z_var_all_inquire(self.fname, self._num_z_vars,
len(self.fname))
status = info[0]
data_types = info[1]
num_elems = info[2]
rec_varys = info[3]
dim_varys = info[4]
num_dims = info[5]
dim_sizes = info[6]
rec_nums = info[7]
var_nums = info[8]
var_names = info[9]
if status == 0:
for i in np.arange(len(data_types)):
out = {}
out['data_type'] = data_types[i]
out['num_elems'] = num_elems[i]
out['rec_vary'] = rec_varys[i]
out['dim_varys'] = dim_varys[i]
out['num_dims'] = num_dims[i]
# only looking at first possible extra dimension
out['dim_sizes'] = dim_sizes[i, :1]
if out['dim_sizes'][0] == 0:
out['dim_sizes'][0] += 1
out['rec_num'] = rec_nums[i]
out['var_num'] = var_nums[i]
var_name = ''.join(var_names[i].astype('U'))
out['var_name'] = var_name.rstrip()
self.z_variable_info[out['var_name']] = out
self.z_variable_names_by_num[out['var_num']] = var_name
else:
raise IOError(fortran_cdf.statusreporter(status)) | python | {
"resource": ""
} |
q261705 | CDF.load_all_variables | validation | def load_all_variables(self):
"""Loads all variables from CDF.
Note this routine is called automatically
upon instantiation.
"""
self.data = {}
# need to add r variable names
file_var_names = self.z_variable_info.keys()
# collect variable information for each
# organize it neatly for fortran call
dim_sizes = []
rec_nums = []
data_types = []
names = []
for i, name in enumerate(file_var_names):
dim_sizes.extend(self.z_variable_info[name]['dim_sizes'])
rec_nums.append(self.z_variable_info[name]['rec_num'])
data_types.append(self.z_variable_info[name]['data_type'])
names.append(name.ljust(256))
dim_sizes = np.array(dim_sizes)
rec_nums = np.array(rec_nums)
data_types = np.array(data_types)
# individually load all variables by each data type
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['real4'],
fortran_cdf.get_multi_z_real4)
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['float'],
fortran_cdf.get_multi_z_real4)
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['real8'],
fortran_cdf.get_multi_z_real8)
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['double'],
fortran_cdf.get_multi_z_real8)
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['int4'],
fortran_cdf.get_multi_z_int4)
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['uint4'],
fortran_cdf.get_multi_z_int4,
data_offset=2 ** 32)
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['int2'],
fortran_cdf.get_multi_z_int2)
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['uint2'],
fortran_cdf.get_multi_z_int2,
data_offset=2 ** 16)
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['int1'],
fortran_cdf.get_multi_z_int1)
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['uint1'],
fortran_cdf.get_multi_z_int1,
data_offset=2 ** 8)
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['byte'],
fortran_cdf.get_multi_z_int1)
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['epoch'],
fortran_cdf.get_multi_z_real8,
epoch=True)
self._call_multi_fortran_z(names, data_types, rec_nums, 2 * dim_sizes,
self.cdf_data_types['epoch16'],
fortran_cdf.get_multi_z_epoch16,
epoch16=True)
self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,
self.cdf_data_types['TT2000'],
fortran_cdf.get_multi_z_tt2000,
epoch=True)
# mark data has been loaded
self.data_loaded = True | python | {
"resource": ""
} |
q261706 | CDF._call_multi_fortran_z | validation | def _call_multi_fortran_z(self, names, data_types, rec_nums,
dim_sizes, input_type_code, func,
epoch=False, data_offset=None, epoch16=False):
"""Calls fortran functions to load CDF variable data
Parameters
----------
names : list_like
list of variables names
data_types : list_like
list of all loaded data type codes as used by CDF
rec_nums : list_like
list of record numbers in CDF file. Provided by variable_info
dim_sizes :
list of dimensions as provided by variable_info.
input_type_code : int
Specific type code to load
func : function
Fortran function via python interface that will be used for actual loading.
epoch : bool
Flag indicating type is epoch. Translates things to datetime standard.
data_offset :
Offset value to be applied to data. Required for unsigned integers in CDF.
epoch16 : bool
Flag indicating type is epoch16. Translates things to datetime standard.
"""
# isolate input type code variables from total supplied types
idx, = np.where(data_types == input_type_code)
if len(idx) > 0:
# read all data of a given type at once
max_rec = rec_nums[idx].max()
sub_names = np.array(names)[idx]
sub_sizes = dim_sizes[idx]
status, data = func(self.fname, sub_names.tolist(),
sub_sizes, sub_sizes.sum(), max_rec, len(sub_names))
if status == 0:
# account for quirks of CDF data storage for certain types
if data_offset is not None:
data = data.astype(int)
idx, idy, = np.where(data < 0)
data[idx, idy] += data_offset
if epoch:
# account for difference in seconds between
# CDF epoch and python's epoch, leap year in there
# (datetime(1971,1,2) -
# datetime(1,1,1)).total_seconds()*1000
data -= 62167219200000
data = data.astype('<M8[ms]')
if epoch16:
data[0::2, :] -= 62167219200
data = data[0::2, :] * 1E9 + data[1::2, :] / 1.E3
data = data.astype('datetime64[ns]')
sub_sizes /= 2
# all data of a type has been loaded and tweaked as necessary
# parse through returned array to break out the individual variables
# as appropriate
self._process_return_multi_z(data, sub_names, sub_sizes)
else:
raise IOError(fortran_cdf.statusreporter(status)) | python | {
"resource": ""
} |
q261707 | CDF._read_all_attribute_info | validation | def _read_all_attribute_info(self):
"""Read all attribute properties, g, r, and z attributes"""
num = copy.deepcopy(self._num_attrs)
fname = copy.deepcopy(self.fname)
out = fortran_cdf.inquire_all_attr(fname, num, len(fname))
status = out[0]
names = out[1].astype('U')
scopes = out[2]
max_gentries = out[3]
max_rentries = out[4]
max_zentries = out[5]
attr_nums = out[6]
global_attrs_info = {}
var_attrs_info = {}
if status == 0:
for name, scope, gentry, rentry, zentry, num in zip(names, scopes, max_gentries,
max_rentries, max_zentries,
attr_nums):
name = ''.join(name)
name = name.rstrip()
nug = {}
nug['scope'] = scope
nug['max_gentry'] = gentry
nug['max_rentry'] = rentry
nug['max_zentry'] = zentry
nug['attr_num'] = num
flag = (gentry == 0) & (rentry == 0) & (zentry == 0)
if not flag:
if scope == 1:
global_attrs_info[name] = nug
elif scope == 2:
var_attrs_info[name] = nug
self.global_attrs_info = global_attrs_info
self.var_attrs_info = var_attrs_info
else:
raise IOError(fortran_cdf.statusreporter(status)) | python | {
"resource": ""
} |
q261708 | CDF._call_multi_fortran_z_attr | validation | def _call_multi_fortran_z_attr(self, names, data_types, num_elems,
entry_nums, attr_nums, var_names,
input_type_code, func, data_offset=None):
"""Calls Fortran function that reads attribute data.
data_offset translates unsigned into signed.
If number read in is negative, offset added.
"""
# isolate input type code variables
idx, = np.where(data_types == input_type_code)
if len(idx) > 0:
# maximimum array dimension
max_num = num_elems[idx].max()
sub_num_elems = num_elems[idx]
sub_names = np.array(names)[idx]
sub_var_names = np.array(var_names)[idx]
# zVariable numbers, 'entry' number
sub_entry_nums = entry_nums[idx]
# attribute number
sub_attr_nums = attr_nums[idx]
status, data = func(self.fname, sub_attr_nums, sub_entry_nums,
len(sub_attr_nums), max_num, len(self.fname))
if (status == 0).all():
if data_offset is not None:
data = data.astype(int)
idx, idy, = np.where(data < 0)
data[idx, idy] += data_offset
self._process_return_multi_z_attr(data, sub_names,
sub_var_names, sub_num_elems)
else:
# raise ValueError('CDF Error code :', status)
idx, = np.where(status != 0)
# raise first error
raise IOError(fortran_cdf.statusreporter(status[idx][0])) | python | {
"resource": ""
} |
q261709 | _uptime_linux | validation | def _uptime_linux():
"""Returns uptime in seconds or None, on Linux."""
# With procfs
try:
f = open('/proc/uptime', 'r')
up = float(f.readline().split()[0])
f.close()
return up
except (IOError, ValueError):
pass
# Without procfs (really?)
try:
libc = ctypes.CDLL('libc.so')
except AttributeError:
return None
except OSError:
# Debian and derivatives do the wrong thing because /usr/lib/libc.so
# is a GNU ld script rather than an ELF object. To get around this, we
# have to be more specific.
# We don't want to use ctypes.util.find_library because that creates a
# new process on Linux. We also don't want to try too hard because at
# this point we're already pretty sure this isn't Linux.
try:
libc = ctypes.CDLL('libc.so.6')
except OSError:
return None
if not hasattr(libc, 'sysinfo'):
# Not Linux.
return None
buf = ctypes.create_string_buffer(128) # 64 suffices on 32-bit, whatever.
if libc.sysinfo(buf) < 0:
return None
up = struct.unpack_from('@l', buf.raw)[0]
if up < 0:
up = None
return up | python | {
"resource": ""
} |
q261710 | _boottime_linux | validation | def _boottime_linux():
"""A way to figure out the boot time directly on Linux."""
global __boottime
try:
f = open('/proc/stat', 'r')
for line in f:
if line.startswith('btime'):
__boottime = int(line.split()[1])
if datetime is None:
raise NotImplementedError('datetime module required.')
return datetime.fromtimestamp(__boottime)
except (IOError, IndexError):
return None | python | {
"resource": ""
} |
q261711 | _uptime_amiga | validation | def _uptime_amiga():
"""Returns uptime in seconds or None, on AmigaOS."""
global __boottime
try:
__boottime = os.stat('RAM:').st_ctime
return time.time() - __boottime
except (NameError, OSError):
return None | python | {
"resource": ""
} |
q261712 | _uptime_minix | validation | def _uptime_minix():
"""Returns uptime in seconds or None, on MINIX."""
try:
f = open('/proc/uptime', 'r')
up = float(f.read())
f.close()
return up
except (IOError, ValueError):
return None | python | {
"resource": ""
} |
q261713 | _uptime_plan9 | validation | def _uptime_plan9():
"""Returns uptime in seconds or None, on Plan 9."""
# Apparently Plan 9 only has Python 2.2, which I'm not prepared to
# support. Maybe some Linuxes implement /dev/time, though, someone was
# talking about it somewhere.
try:
# The time file holds one 32-bit number representing the sec-
# onds since start of epoch and three 64-bit numbers, repre-
# senting nanoseconds since start of epoch, clock ticks, and
# clock frequency.
# -- cons(3)
f = open('/dev/time', 'r')
s, ns, ct, cf = f.read().split()
f.close()
return float(ct) / float(cf)
except (IOError, ValueError):
return None | python | {
"resource": ""
} |
q261714 | _uptime_solaris | validation | def _uptime_solaris():
"""Returns uptime in seconds or None, on Solaris."""
global __boottime
try:
kstat = ctypes.CDLL('libkstat.so')
except (AttributeError, OSError):
return None
# kstat doesn't have uptime, but it does have boot time.
# Unfortunately, getting at it isn't perfectly straightforward.
# First, let's pretend to be kstat.h
# Constant
KSTAT_STRLEN = 31 # According to every kstat.h I could find.
# Data structures
class anon_union(ctypes.Union):
# The ``value'' union in kstat_named_t actually has a bunch more
# members, but we're only using it for boot_time, so we only need
# the padding and the one we're actually using.
_fields_ = [('c', ctypes.c_char * 16),
('time', ctypes.c_int)]
class kstat_named_t(ctypes.Structure):
_fields_ = [('name', ctypes.c_char * KSTAT_STRLEN),
('data_type', ctypes.c_char),
('value', anon_union)]
# Function signatures
kstat.kstat_open.restype = ctypes.c_void_p
kstat.kstat_lookup.restype = ctypes.c_void_p
kstat.kstat_lookup.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_char_p]
kstat.kstat_read.restype = ctypes.c_int
kstat.kstat_read.argtypes = [ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
kstat.kstat_data_lookup.restype = ctypes.POINTER(kstat_named_t)
kstat.kstat_data_lookup.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
# Now, let's do something useful.
# Initialise kstat control structure.
kc = kstat.kstat_open()
if not kc:
return None
# We're looking for unix:0:system_misc:boot_time.
ksp = kstat.kstat_lookup(kc, 'unix', 0, 'system_misc')
if ksp and kstat.kstat_read(kc, ksp, None) != -1:
data = kstat.kstat_data_lookup(ksp, 'boot_time')
if data:
__boottime = data.contents.value.time
# Clean-up.
kstat.kstat_close(kc)
if __boottime is not None:
return time.time() - __boottime
return None | python | {
"resource": ""
} |
q261715 | _uptime_syllable | validation | def _uptime_syllable():
"""Returns uptime in seconds or None, on Syllable."""
global __boottime
try:
__boottime = os.stat('/dev/pty/mst/pty0').st_mtime
return time.time() - __boottime
except (NameError, OSError):
return None | python | {
"resource": ""
} |
q261716 | uptime | validation | def uptime():
"""Returns uptime in seconds if even remotely possible, or None if not."""
if __boottime is not None:
return time.time() - __boottime
return {'amiga': _uptime_amiga,
'aros12': _uptime_amiga,
'beos5': _uptime_beos,
'cygwin': _uptime_linux,
'darwin': _uptime_osx,
'haiku1': _uptime_beos,
'linux': _uptime_linux,
'linux-armv71': _uptime_linux,
'linux2': _uptime_linux,
'mac': _uptime_mac,
'minix3': _uptime_minix,
'riscos': _uptime_riscos,
'sunos5': _uptime_solaris,
'syllable': _uptime_syllable,
'win32': _uptime_windows,
'wince': _uptime_windows}.get(sys.platform, _uptime_bsd)() or \
_uptime_bsd() or _uptime_plan9() or _uptime_linux() or \
_uptime_windows() or _uptime_solaris() or _uptime_beos() or \
_uptime_amiga() or _uptime_riscos() or _uptime_posix() or \
_uptime_syllable() or _uptime_mac() or _uptime_osx() | python | {
"resource": ""
} |
q261717 | boottime | validation | def boottime():
"""Returns boot time if remotely possible, or None if not."""
global __boottime
if __boottime is None:
up = uptime()
if up is None:
return None
if __boottime is None:
_boottime_linux()
if datetime is None:
raise RuntimeError('datetime module required.')
return datetime.fromtimestamp(__boottime or time.time() - up) | python | {
"resource": ""
} |
q261718 | _initfile | validation | def _initfile(path, data="dict"):
"""Initialize an empty JSON file."""
data = {} if data.lower() == "dict" else []
# The file will need to be created if it doesn't exist
if not os.path.exists(path): # The file doesn't exist
# Raise exception if the directory that should contain the file doesn't
# exist
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
raise IOError(
("Could not initialize empty JSON file in non-existant "
"directory '{}'").format(os.path.dirname(path))
)
# Write an empty file there
with open(path, "w") as f:
json.dump(data, f)
return True
elif os.path.getsize(path) == 0: # The file is empty
with open(path, "w") as f:
json.dump(data, f)
else: # The file exists and contains content
return False | python | {
"resource": ""
} |
q261719 | _BaseFile._data | validation | def _data(self):
"""A simpler version of data to avoid infinite recursion in some cases.
Don't use this.
"""
if self.is_caching:
return self.cache
with open(self.path, "r") as f:
return json.load(f) | python | {
"resource": ""
} |
q261720 | _BaseFile.data | validation | def data(self, data):
"""Overwrite the file with new data. You probably shouldn't do
this yourself, it's easy to screw up your whole file with this."""
if self.is_caching:
self.cache = data
else:
fcontents = self.file_contents
with open(self.path, "w") as f:
try:
# Write the file. Keep user settings about indentation, etc
indent = self.indent if self.pretty else None
json.dump(data, f, sort_keys=self.sort_keys, indent=indent)
except Exception as e:
# Rollback to prevent data loss
f.seek(0)
f.truncate()
f.write(fcontents)
# And re-raise the exception
raise e
self._updateType() | python | {
"resource": ""
} |
q261721 | _BaseFile._updateType | validation | def _updateType(self):
"""Make sure that the class behaves like the data structure that it
is, so that we don't get a ListFile trying to represent a dict."""
data = self._data()
# Change type if needed
if isinstance(data, dict) and isinstance(self, ListFile):
self.__class__ = DictFile
elif isinstance(data, list) and isinstance(self, DictFile):
self.__class__ = ListFile | python | {
"resource": ""
} |
q261722 | File.with_data | validation | def with_data(path, data):
"""Initialize a new file that starts out with some data. Pass data
as a list, dict, or JSON string.
"""
# De-jsonize data if necessary
if isinstance(data, str):
data = json.loads(data)
# Make sure this is really a new file
if os.path.exists(path):
raise ValueError("File exists, not overwriting data. Set the "
"'data' attribute on a normally-initialized "
"'livejson.File' instance if you really "
"want to do this.")
else:
f = File(path)
f.data = data
return f | python | {
"resource": ""
} |
q261723 | ZabbixPlugin.is_configured | validation | def is_configured(self, project, **kwargs):
"""
Check if plugin is configured.
"""
params = self.get_option
return bool(params('server_host', project) and params('server_port', project)) | python | {
"resource": ""
} |
q261724 | ZabbixPlugin.post_process | validation | def post_process(self, group, event, is_new, is_sample, **kwargs):
"""
Process error.
"""
if not self.is_configured(group.project):
return
host = self.get_option('server_host', group.project)
port = int(self.get_option('server_port', group.project))
prefix = self.get_option('prefix', group.project)
hostname = self.get_option('hostname', group.project) or socket.gethostname()
resolve_age = group.project.get_option('sentry:resolve_age', None)
now = int(time.time())
template = '%s.%%s[%s]' % (prefix, group.project.slug)
level = group.get_level_display()
label = template % level
groups = group.project.group_set.filter(status=STATUS_UNRESOLVED)
if resolve_age:
oldest = timezone.now() - timedelta(hours=int(resolve_age))
groups = groups.filter(last_seen__gt=oldest)
num_errors = groups.filter(level=group.level).count()
metric = Metric(hostname, label, num_errors, now)
log.info('will send %s=%s to zabbix', label, num_errors)
send_to_zabbix([metric], host, port) | python | {
"resource": ""
} |
q261725 | PingTransmitter.ping | validation | def ping(self):
"""
Sending ICMP packets.
:return: ``ping`` command execution result.
:rtype: :py:class:`.PingResult`
:raises ValueError: If parameters not valid.
"""
self.__validate_ping_param()
ping_proc = subprocrunner.SubprocessRunner(self.__get_ping_command())
ping_proc.run()
return PingResult(ping_proc.stdout, ping_proc.stderr, ping_proc.returncode) | python | {
"resource": ""
} |
q261726 | PingParsing.parse | validation | def parse(self, ping_message):
"""
Parse ping command output.
Args:
ping_message (str or :py:class:`~pingparsing.PingResult`):
``ping`` command output.
Returns:
:py:class:`~pingparsing.PingStats`: Parsed result.
"""
try:
# accept PingResult instance as an input
if typepy.is_not_null_string(ping_message.stdout):
ping_message = ping_message.stdout
except AttributeError:
pass
logger.debug("parsing ping result: {}".format(ping_message))
self.__parser = NullPingParser()
if typepy.is_null_string(ping_message):
logger.debug("ping_message is empty")
self.__stats = PingStats()
return self.__stats
ping_lines = _to_unicode(ping_message).splitlines()
parser_class_list = (
LinuxPingParser,
WindowsPingParser,
MacOsPingParser,
AlpineLinuxPingParser,
)
for parser_class in parser_class_list:
self.__parser = parser_class()
try:
self.__stats = self.__parser.parse(ping_lines)
return self.__stats
except ParseError as e:
if e.reason != ParseErrorReason.HEADER_NOT_FOUND:
raise e
except pp.ParseException:
pass
self.__parser = NullPingParser()
return self.__stats | python | {
"resource": ""
} |
q261727 | EmailAddress.send_confirmation | validation | def send_confirmation(self):
"""
Send a verification email for the email address.
"""
confirmation = EmailConfirmation.objects.create(email=self)
confirmation.send() | python | {
"resource": ""
} |
q261728 | EmailAddress.send_duplicate_notification | validation | def send_duplicate_notification(self):
"""
Send a notification about a duplicate signup.
"""
email_utils.send_email(
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[self.email],
subject=_("Registration Attempt"),
template_name="rest_email_auth/emails/duplicate-email",
)
logger.info("Sent duplicate email notification to: %s", self.email) | python | {
"resource": ""
} |
q261729 | EmailAddress.set_primary | validation | def set_primary(self):
"""
Set this email address as the user's primary email.
"""
query = EmailAddress.objects.filter(is_primary=True, user=self.user)
query = query.exclude(pk=self.pk)
# The transaction is atomic so there is never a gap where a user
# has no primary email address.
with transaction.atomic():
query.update(is_primary=False)
self.is_primary = True
self.save()
logger.info(
"Set %s as the primary email address for %s.",
self.email,
self.user,
) | python | {
"resource": ""
} |
q261730 | EmailConfirmation.confirm | validation | def confirm(self):
"""
Mark the instance's email as verified.
"""
self.email.is_verified = True
self.email.save()
signals.email_verified.send(email=self.email, sender=self.__class__)
logger.info("Verified email address: %s", self.email.email) | python | {
"resource": ""
} |
q261731 | EmailConfirmation.is_expired | validation | def is_expired(self):
"""
Determine if the confirmation has expired.
Returns:
bool:
``True`` if the confirmation has expired and ``False``
otherwise.
"""
expiration_time = self.created_at + datetime.timedelta(days=1)
return timezone.now() > expiration_time | python | {
"resource": ""
} |
q261732 | EmailConfirmation.send | validation | def send(self):
"""
Send a verification email to the user.
"""
context = {
"verification_url": app_settings.EMAIL_VERIFICATION_URL.format(
key=self.key
)
}
email_utils.send_email(
context=context,
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[self.email.email],
subject=_("Please Verify Your Email Address"),
template_name="rest_email_auth/emails/verify-email",
)
logger.info(
"Sent confirmation email to %s for user #%d",
self.email.email,
self.email.user.id,
) | python | {
"resource": ""
} |
q261733 | UserFactory._create | validation | def _create(cls, model_class, *args, **kwargs):
"""
Create a new user instance.
Args:
model_class:
The type of model to create an instance of.
args:
Positional arguments to create the instance with.
kwargs:
Keyword arguments to create the instance with.
Returns:
A new user instance of the type specified by
``model_class``.
"""
manager = cls._get_manager(model_class)
return manager.create_user(*args, **kwargs) | python | {
"resource": ""
} |
q261734 | EmailSerializer.create | validation | def create(self, validated_data):
"""
Create a new email and send a confirmation to it.
Returns:
The newly creating ``EmailAddress`` instance.
"""
email_query = models.EmailAddress.objects.filter(
email=self.validated_data["email"]
)
if email_query.exists():
email = email_query.get()
email.send_duplicate_notification()
else:
email = super(EmailSerializer, self).create(validated_data)
email.send_confirmation()
user = validated_data.get("user")
query = models.EmailAddress.objects.filter(
is_primary=True, user=user
)
if not query.exists():
email.set_primary()
return email | python | {
"resource": ""
} |
q261735 | EmailSerializer.update | validation | def update(self, instance, validated_data):
"""
Update the instance the serializer is bound to.
Args:
instance:
The instance the serializer is bound to.
validated_data:
The data to update the serializer with.
Returns:
The updated instance.
"""
is_primary = validated_data.pop("is_primary", False)
instance = super(EmailSerializer, self).update(
instance, validated_data
)
if is_primary:
instance.set_primary()
return instance | python | {
"resource": ""
} |
q261736 | EmailSerializer.validate_is_primary | validation | def validate_is_primary(self, is_primary):
"""
Validate the provided 'is_primary' parameter.
Returns:
The validated 'is_primary' value.
Raises:
serializers.ValidationError:
If the user attempted to mark an unverified email as
their primary email address.
"""
# TODO: Setting 'is_primary' to 'False' should probably not be
# allowed.
if is_primary and not (self.instance and self.instance.is_verified):
raise serializers.ValidationError(
_(
"Unverified email addresses may not be used as the "
"primary address."
)
)
return is_primary | python | {
"resource": ""
} |
q261737 | EmailVerificationSerializer.validate | validation | def validate(self, data):
"""
Validate the provided data.
Returns:
dict:
The validated data.
Raises:
serializers.ValidationError:
If the provided password is invalid.
"""
user = self._confirmation.email.user
if (
app_settings.EMAIL_VERIFICATION_PASSWORD_REQUIRED
and not user.check_password(data["password"])
):
raise serializers.ValidationError(
_("The provided password is invalid.")
)
# Add email to returned data
data["email"] = self._confirmation.email.email
return data | python | {
"resource": ""
} |
q261738 | EmailVerificationSerializer.validate_key | validation | def validate_key(self, key):
"""
Validate the provided confirmation key.
Returns:
str:
The validated confirmation key.
Raises:
serializers.ValidationError:
If there is no email confirmation with the given key or
the confirmation has expired.
"""
try:
confirmation = models.EmailConfirmation.objects.select_related(
"email__user"
).get(key=key)
except models.EmailConfirmation.DoesNotExist:
raise serializers.ValidationError(
_("The provided verification key is invalid.")
)
if confirmation.is_expired:
raise serializers.ValidationError(
_("That verification code has expired.")
)
# Cache confirmation instance
self._confirmation = confirmation
return key | python | {
"resource": ""
} |
q261739 | PasswordResetRequestSerializer.save | validation | def save(self):
"""
Send out a password reset if the provided data is valid.
If the provided email address exists and is verified, a reset
email is sent to the address.
Returns:
The password reset token if it was returned and ``None``
otherwise.
"""
try:
email = models.EmailAddress.objects.get(
email=self.validated_data["email"], is_verified=True
)
except models.EmailAddress.DoesNotExist:
return None
token = models.PasswordResetToken.objects.create(email=email)
token.send()
return token | python | {
"resource": ""
} |
q261740 | PasswordResetSerializer.save | validation | def save(self):
"""
Reset the user's password if the provided information is valid.
"""
token = models.PasswordResetToken.objects.get(
key=self.validated_data["key"]
)
token.email.user.set_password(self.validated_data["password"])
token.email.user.save()
logger.info("Reset password for %s", token.email.user)
token.delete() | python | {
"resource": ""
} |
q261741 | PasswordResetSerializer.validate_key | validation | def validate_key(self, key):
"""
Validate the provided reset key.
Returns:
The validated key.
Raises:
serializers.ValidationError:
If the provided key does not exist.
"""
if not models.PasswordResetToken.valid_tokens.filter(key=key).exists():
raise serializers.ValidationError(
_("The provided reset token does not exist, or is expired.")
)
return key | python | {
"resource": ""
} |
q261742 | RegistrationSerializer.create | validation | def create(self, validated_data):
"""
Create a new user from the data passed to the serializer.
If the provided email has not been verified yet, the user is
created and a verification email is sent to the address.
Otherwise we send a notification to the email address that
someone attempted to register with an email that's already been
verified.
Args:
validated_data (dict):
The data passed to the serializer after it has been
validated.
Returns:
A new user created from the provided data.
"""
email = validated_data.pop("email")
password = validated_data.pop("password")
# We don't save the user instance yet in case the provided email
# address already exists.
user = get_user_model()(**validated_data)
user.set_password(password)
# We set an ephemeral email property so that it is included in
# the data returned by the serializer.
user.email = email
email_query = models.EmailAddress.objects.filter(email=email)
if email_query.exists():
existing_email = email_query.get()
existing_email.send_duplicate_notification()
else:
user.save()
email_instance = models.EmailAddress.objects.create(
email=email, user=user
)
email_instance.send_confirmation()
signals.user_registered.send(sender=self.__class__, user=user)
return user | python | {
"resource": ""
} |
q261743 | ResendVerificationSerializer.save | validation | def save(self):
"""
Resend a verification email to the provided address.
If the provided email is already verified no action is taken.
"""
try:
email = models.EmailAddress.objects.get(
email=self.validated_data["email"], is_verified=False
)
logger.debug(
"Resending verification email to %s",
self.validated_data["email"],
)
email.send_confirmation()
except models.EmailAddress.DoesNotExist:
logger.debug(
"Not resending verification email to %s because the address "
"doesn't exist in the database.",
self.validated_data["email"],
) | python | {
"resource": ""
} |
q261744 | EmailAddressManager.create | validation | def create(self, *args, **kwargs):
"""
Create a new email address.
"""
is_primary = kwargs.pop("is_primary", False)
with transaction.atomic():
email = super(EmailAddressManager, self).create(*args, **kwargs)
if is_primary:
email.set_primary()
return email | python | {
"resource": ""
} |
q261745 | ValidPasswordResetTokenManager.get_queryset | validation | def get_queryset(self):
"""
Return all unexpired password reset tokens.
"""
oldest = timezone.now() - app_settings.PASSWORD_RESET_EXPIRATION
queryset = super(ValidPasswordResetTokenManager, self).get_queryset()
return queryset.filter(created_at__gt=oldest) | python | {
"resource": ""
} |
q261746 | Command.handle | validation | def handle(self, *args, **kwargs):
"""
Handle execution of the command.
"""
cutoff = timezone.now()
cutoff -= app_settings.CONFIRMATION_EXPIRATION
cutoff -= app_settings.CONFIRMATION_SAVE_PERIOD
queryset = models.EmailConfirmation.objects.filter(
created_at__lte=cutoff
)
count = queryset.count()
queryset.delete()
if count:
self.stdout.write(
self.style.SUCCESS(
"Removed {count} old email confirmation(s)".format(
count=count
)
)
)
else:
self.stdout.write("No email confirmations to remove.") | python | {
"resource": ""
} |
q261747 | BaseBackend.get_user | validation | def get_user(self, user_id):
"""
Get a user by their ID.
Args:
user_id:
The ID of the user to fetch.
Returns:
The user with the specified ID if they exist and ``None``
otherwise.
"""
try:
return get_user_model().objects.get(id=user_id)
except get_user_model().DoesNotExist:
return None | python | {
"resource": ""
} |
q261748 | VerifiedEmailBackend.authenticate | validation | def authenticate(self, request, email=None, password=None, username=None):
"""
Attempt to authenticate a set of credentials.
Args:
request:
The request associated with the authentication attempt.
email:
The user's email address.
password:
The user's password.
username:
An alias for the ``email`` field. This is provided for
compatability with Django's built in authentication
views.
Returns:
The user associated with the provided credentials if they
are valid. Returns ``None`` otherwise.
"""
email = email or username
try:
email_instance = models.EmailAddress.objects.get(
is_verified=True, email=email
)
except models.EmailAddress.DoesNotExist:
return None
user = email_instance.user
if user.check_password(password):
return user
return None | python | {
"resource": ""
} |
q261749 | authenticate | validation | def authenticate(username, password, service='login', encoding='utf-8',
resetcred=True):
"""Returns True if the given username and password authenticate for the
given service. Returns False otherwise.
``username``: the username to authenticate
``password``: the password in plain text
``service``: the PAM service to authenticate against.
Defaults to 'login'
The above parameters can be strings or bytes. If they are strings,
they will be encoded using the encoding given by:
``encoding``: the encoding to use for the above parameters if they
are given as strings. Defaults to 'utf-8'
``resetcred``: Use the pam_setcred() function to
reinitialize the credentials.
Defaults to 'True'.
"""
if sys.version_info >= (3,):
if isinstance(username, str):
username = username.encode(encoding)
if isinstance(password, str):
password = password.encode(encoding)
if isinstance(service, str):
service = service.encode(encoding)
@conv_func
def my_conv(n_messages, messages, p_response, app_data):
"""Simple conversation function that responds to any
prompt where the echo is off with the supplied password"""
# Create an array of n_messages response objects
addr = calloc(n_messages, sizeof(PamResponse))
p_response[0] = cast(addr, POINTER(PamResponse))
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
pw_copy = strdup(password)
p_response.contents[i].resp = cast(pw_copy, c_char_p)
p_response.contents[i].resp_retcode = 0
return 0
handle = PamHandle()
conv = PamConv(my_conv, 0)
retval = pam_start(service, username, byref(conv), byref(handle))
if retval != 0:
# TODO: This is not an authentication error, something
# has gone wrong starting up PAM
return False
retval = pam_authenticate(handle, 0)
auth_success = (retval == 0)
# Re-initialize credentials (for Kerberos users, etc)
# Don't check return code of pam_setcred(), it shouldn't matter
# if this fails
if auth_success and resetcred:
retval = pam_setcred(handle, PAM_REINITIALIZE_CRED)
pam_end(handle, retval)
return auth_success | python | {
"resource": ""
} |
q261750 | SerializerSaveView.post | validation | def post(self, request):
"""
Save the provided data using the class' serializer.
Args:
request:
The request being made.
Returns:
An ``APIResponse`` instance. If the request was successful
the response will have a 200 status code and contain the
serializer's data. Otherwise a 400 status code and the
request's errors will be returned.
"""
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | python | {
"resource": ""
} |
q261751 | ReferrerTree.get_repr | validation | def get_repr(self, obj, referent=None):
"""Return an HTML tree block describing the given object."""
objtype = type(obj)
typename = str(objtype.__module__) + "." + objtype.__name__
prettytype = typename.replace("__builtin__.", "")
name = getattr(obj, "__name__", "")
if name:
prettytype = "%s %r" % (prettytype, name)
key = ""
if referent:
key = self.get_refkey(obj, referent)
url = reverse('dowser_trace_object', args=(
typename,
id(obj)
))
return ('<a class="objectid" href="%s">%s</a> '
'<span class="typename">%s</span>%s<br />'
'<span class="repr">%s</span>'
% (url, id(obj), prettytype, key, get_repr(obj, 100))
) | python | {
"resource": ""
} |
q261752 | ReferrerTree.get_refkey | validation | def get_refkey(self, obj, referent):
"""Return the dict key or attribute name of obj which refers to
referent."""
if isinstance(obj, dict):
for k, v in obj.items():
if v is referent:
return " (via its %r key)" % k
for k in dir(obj) + ['__dict__']:
if getattr(obj, k, None) is referent:
return " (via its %r attribute)" % k
return "" | python | {
"resource": ""
} |
q261753 | Tree.walk | validation | def walk(self, maxresults=100, maxdepth=None):
"""Walk the object tree, ignoring duplicates and circular refs."""
log.debug("step")
self.seen = {}
self.ignore(self, self.__dict__, self.obj, self.seen, self._ignore)
# Ignore the calling frame, its builtins, globals and locals
self.ignore_caller()
self.maxdepth = maxdepth
count = 0
log.debug("will iterate results")
for result in self._gen(self.obj):
log.debug("will yeld")
yield result
count += 1
if maxresults and count >= maxresults:
yield 0, 0, "==== Max results reached ===="
return | python | {
"resource": ""
} |
q261754 | get_finder | validation | def get_finder(import_path):
"""
Imports the media fixtures files finder class described by import_path, where
import_path is the full Python path to the class.
"""
Finder = import_string(import_path)
if not issubclass(Finder, BaseFinder):
raise ImproperlyConfigured('Finder "%s" is not a subclass of "%s"' %
(Finder, BaseFinder))
return Finder() | python | {
"resource": ""
} |
q261755 | FileSystemFinder.find | validation | def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``MEDIA_FIXTURES_FILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
if root not in searched_locations:
searched_locations.append(root)
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches | python | {
"resource": ""
} |
q261756 | FileSystemFinder.list | validation | def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage | python | {
"resource": ""
} |
q261757 | AppDirectoriesFinder.list | validation | def list(self, ignore_patterns):
"""
List all files in all app storages.
"""
for storage in six.itervalues(self.storages):
if storage.exists(''): # check if storage location exists
for path in utils.get_files(storage, ignore_patterns):
yield path, storage | python | {
"resource": ""
} |
q261758 | AppDirectoriesFinder.find | validation | def find(self, path, all=False):
"""
Looks for files in the app directories.
"""
matches = []
for app in self.apps:
app_location = self.storages[app].location
if app_location not in searched_locations:
searched_locations.append(app_location)
match = self.find_in_app(app, path)
if match:
if not all:
return match
matches.append(match)
return matches | python | {
"resource": ""
} |
q261759 | AppDirectoriesFinder.find_in_app | validation | def find_in_app(self, app, path):
"""
Find a requested media file in an app's media fixtures locations.
"""
storage = self.storages.get(app, None)
if storage:
# only try to find a file if the source dir actually exists
if storage.exists(path):
matched_path = storage.path(path)
if matched_path:
return matched_path | python | {
"resource": ""
} |
q261760 | Command.set_options | validation | def set_options(self, **options):
"""
Set instance variables based on an options dict
"""
self.interactive = options['interactive']
self.verbosity = options['verbosity']
self.symlink = options['link']
self.clear = options['clear']
self.dry_run = options['dry_run']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += ['CVS', '.*', '*~']
self.ignore_patterns = list(set(ignore_patterns))
self.post_process = options['post_process'] | python | {
"resource": ""
} |
q261761 | Command.collect | validation | def collect(self):
"""
Perform the bulk of the work of collectmedia.
Split off from handle() to facilitate testing.
"""
if self.symlink and not self.local:
raise CommandError("Can't symlink to a remote destination.")
if self.clear:
self.clear_dir('')
if self.symlink:
handler = self.link_file
else:
handler = self.copy_file
found_files = OrderedDict()
for finder in get_finders():
for path, storage in finder.list(self.ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
if prefixed_path not in found_files:
found_files[prefixed_path] = (storage, path)
handler(path, prefixed_path, storage)
# Here we check if the storage backend has a post_process
# method and pass it the list of modified files.
if self.post_process and hasattr(self.storage, 'post_process'):
processor = self.storage.post_process(found_files,
dry_run=self.dry_run)
for original_path, processed_path, processed in processor:
if isinstance(processed, Exception):
self.stderr.write("Post-processing '%s' failed!" % original_path)
# Add a blank line before the traceback, otherwise it's
# too easy to miss the relevant part of the error message.
self.stderr.write("")
raise processed
if processed:
self.log("Post-processed '%s' as '%s'" %
(original_path, processed_path), level=1)
self.post_processed_files.append(original_path)
else:
self.log("Skipped post-processing '%s'" % original_path)
return {
'modified': self.copied_files + self.symlinked_files,
'unmodified': self.unmodified_files,
'post_processed': self.post_processed_files,
} | python | {
"resource": ""
} |
q261762 | Command.clear_dir | validation | def clear_dir(self, path):
"""
Deletes the given relative path using the destination storage backend.
"""
dirs, files = self.storage.listdir(path)
for f in files:
fpath = os.path.join(path, f)
if self.dry_run:
self.log("Pretending to delete '%s'" %
smart_text(fpath), level=1)
else:
self.log("Deleting '%s'" % smart_text(fpath), level=1)
self.storage.delete(fpath)
for d in dirs:
self.clear_dir(os.path.join(path, d)) | python | {
"resource": ""
} |
q261763 | Command.delete_file | validation | def delete_file(self, path, prefixed_path, source_storage):
"""
Checks if the target file should be deleted if it already exists
"""
if self.storage.exists(prefixed_path):
try:
# When was the target file modified last time?
target_last_modified = \
self.storage.modified_time(prefixed_path)
except (OSError, NotImplementedError, AttributeError):
# The storage doesn't support ``modified_time`` or failed
pass
else:
try:
# When was the source file modified last time?
source_last_modified = source_storage.modified_time(path)
except (OSError, NotImplementedError, AttributeError):
pass
else:
# The full path of the target file
if self.local:
full_path = self.storage.path(prefixed_path)
else:
full_path = None
# Skip the file if the source file is younger
# Avoid sub-second precision (see #14665, #19540)
if (target_last_modified.replace(microsecond=0)
>= source_last_modified.replace(microsecond=0)):
if not ((self.symlink and full_path
and not os.path.islink(full_path)) or
(not self.symlink and full_path
and os.path.islink(full_path))):
if prefixed_path not in self.unmodified_files:
self.unmodified_files.append(prefixed_path)
self.log("Skipping '%s' (not modified)" % path)
return False
# Then delete the existing file if really needed
if self.dry_run:
self.log("Pretending to delete '%s'" % path)
else:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
return True | python | {
"resource": ""
} |
q261764 | Command.link_file | validation | def link_file(self, path, prefixed_path, source_storage):
"""
Attempt to link ``path``
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.symlinked_files:
return self.log("Skipping '%s' (already linked earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally link the file
if self.dry_run:
self.log("Pretending to link '%s'" % source_path, level=1)
else:
self.log("Linking '%s'" % source_path, level=1)
full_path = self.storage.path(prefixed_path)
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
try:
if os.path.lexists(full_path):
os.unlink(full_path)
os.symlink(source_path, full_path)
except AttributeError:
import platform
raise CommandError("Symlinking is not supported by Python %s." %
platform.python_version())
except NotImplementedError:
import platform
raise CommandError("Symlinking is not supported in this "
"platform (%s)." % platform.platform())
except OSError as e:
raise CommandError(e)
if prefixed_path not in self.symlinked_files:
self.symlinked_files.append(prefixed_path) | python | {
"resource": ""
} |
q261765 | Command.copy_file | validation | def copy_file(self, path, prefixed_path, source_storage):
"""
Attempt to copy ``path`` with storage
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.copied_files:
return self.log("Skipping '%s' (already copied earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally start copying
if self.dry_run:
self.log("Pretending to copy '%s'" % source_path, level=1)
else:
self.log("Copying '%s'" % source_path, level=1)
with source_storage.open(path) as source_file:
self.storage.save(prefixed_path, source_file)
self.copied_files.append(prefixed_path) | python | {
"resource": ""
} |
q261766 | BaseSpaceContainer.cur_space | validation | def cur_space(self, name=None):
"""Set the current space to Space ``name`` and return it.
If called without arguments, the current space is returned.
Otherwise, the current space is set to the space named ``name``
and the space is returned.
"""
if name is None:
return self._impl.model.currentspace.interface
else:
self._impl.model.currentspace = self._impl.spaces[name]
return self.cur_space() | python | {
"resource": ""
} |
q261767 | EditableSpaceContainer.new_space | validation | def new_space(self, name=None, bases=None, formula=None, refs=None):
"""Create a child space.
Args:
name (str, optional): Name of the space. Defaults to ``SpaceN``,
where ``N`` is a number determined automatically.
bases (optional): A space or a sequence of spaces to be the base
space(s) of the created space.
formula (optional): Function to specify the parameters of
dynamic child spaces. The signature of this function is used
for setting parameters for dynamic child spaces.
This function should return a mapping of keyword arguments
to be passed to this method when the dynamic child spaces
are created.
Returns:
The new child space.
"""
space = self._impl.model.currentspace = self._impl.new_space(
name=name, bases=get_impls(bases), formula=formula, refs=refs
)
return space.interface | python | {
"resource": ""
} |
q261768 | EditableSpaceContainer.new_space_from_excel | validation | def new_space_from_excel(
self,
book,
range_,
sheet=None,
name=None,
names_row=None,
param_cols=None,
space_param_order=None,
cells_param_order=None,
transpose=False,
names_col=None,
param_rows=None,
):
"""Create a child space from an Excel range.
To use this method, ``openpyxl`` package must be installed.
Args:
book (str): Path to an Excel file.
range_ (str): Range expression, such as "A1", "$G4:$K10",
or named range "NamedRange1".
sheet (str): Sheet name (case ignored).
name (str, optional): Name of the space. Defaults to ``SpaceN``,
where ``N`` is a number determined automatically.
names_row (optional): an index number indicating
what row contains the names of cells and parameters.
Defaults to the top row (0).
param_cols (optional): a sequence of index numbers
indicating parameter columns.
Defaults to only the leftmost column ([0]).
names_col (optional): an index number, starting from 0,
indicating what column contains additional parameters.
param_rows (optional): a sequence of index numbers, starting from
0, indicating rows of additional parameters, in case cells are
defined in two dimensions.
transpose (optional): Defaults to ``False``.
If set to ``True``, "row(s)" and "col(s)" in the parameter
names are interpreted inversely, i.e.
all indexes passed to "row(s)" parameters are interpreted
as column indexes,
and all indexes passed to "col(s)" parameters as row indexes.
space_param_order: a sequence to specify space parameters and
their orders. The elements of the sequence denote the indexes
of ``param_cols`` elements, and optionally the index of
``param_rows`` elements shifted by the length of
``param_cols``. The elements of this parameter and
``cell_param_order`` must not overlap.
cell_param_order (optional): a sequence to reorder the parameters.
The elements of the sequence denote the indexes of
``param_cols`` elements, and optionally the index of
``param_rows`` elements shifted by the length of
``param_cols``. The elements of this parameter and
``cell_space_order`` must not overlap.
Returns:
The new child space created from the Excel range.
"""
space = self._impl.new_space_from_excel(
book,
range_,
sheet,
name,
names_row,
param_cols,
space_param_order,
cells_param_order,
transpose,
names_col,
param_rows,
)
return get_interfaces(space) | python | {
"resource": ""
} |
q261769 | EditableSpaceContainerImpl.new_space | validation | def new_space(
self,
name=None,
bases=None,
formula=None,
*,
refs=None,
source=None,
is_derived=False,
prefix=""
):
"""Create a new child space.
Args:
name (str): Name of the space. If omitted, the space is
created automatically.
bases: If specified, the new space becomes a derived space of
the `base` space.
formula: Function whose parameters used to set space parameters.
refs: a mapping of refs to be added.
arguments: ordered dict of space parameter names to their values.
source: A source module from which cell definitions are read.
prefix: Prefix to the autogenerated name when name is None.
"""
from modelx.core.space import StaticSpaceImpl
if name is None:
name = self.spacenamer.get_next(self.namespace, prefix)
if name in self.namespace:
raise ValueError("Name '%s' already exists." % name)
if not prefix and not is_valid_name(name):
raise ValueError("Invalid name '%s'." % name)
space = self._new_space(
name=name,
formula=formula,
refs=refs,
source=source,
is_derived=is_derived,
)
self._set_space(space)
self.model.spacegraph.add_space(space)
# Set up direct base spaces and mro
if bases is not None:
if isinstance(bases, StaticSpaceImpl):
bases = [bases]
space.add_bases(bases)
return space | python | {
"resource": ""
} |
q261770 | get_node | validation | def get_node(obj, args, kwargs):
"""Create a node from arguments and return it"""
if args is None and kwargs is None:
return (obj,)
if kwargs is None:
kwargs = {}
return obj, _bind_args(obj, args, kwargs) | python | {
"resource": ""
} |
q261771 | node_get_args | validation | def node_get_args(node):
"""Return an ordered mapping from params to args"""
obj = node[OBJ]
key = node[KEY]
boundargs = obj.formula.signature.bind(*key)
boundargs.apply_defaults()
return boundargs.arguments | python | {
"resource": ""
} |
q261772 | get_object | validation | def get_object(name: str):
"""Get a modelx object from its full name."""
# TODO: Duplicate of system.get_object
elms = name.split(".")
parent = get_models()[elms.pop(0)]
while len(elms) > 0:
obj = elms.pop(0)
parent = getattr(parent, obj)
return parent | python | {
"resource": ""
} |
q261773 | _get_node | validation | def _get_node(name: str, args: str):
"""Get node from object name and arg string
Not Used. Left for future reference purpose.
"""
obj = get_object(name)
args = ast.literal_eval(args)
if not isinstance(args, tuple):
args = (args,)
return obj.node(*args) | python | {
"resource": ""
} |
q261774 | custom_showwarning | validation | def custom_showwarning(
message, category, filename="", lineno=-1, file=None, line=None
):
"""Hook to override default showwarning.
https://stackoverflow.com/questions/2187269/python-print-only-the-message-on-warnings
"""
if file is None:
file = sys.stderr
if file is None:
# sys.stderr is None when run with pythonw.exe:
# warnings get lost
return
text = "%s: %s\n" % (category.__name__, message)
try:
file.write(text)
except OSError:
# the file (probably stderr) is invalid - this warning gets lost.
pass | python | {
"resource": ""
} |
q261775 | custom_showtraceback | validation | def custom_showtraceback(
self,
exc_tuple=None,
filename=None,
tb_offset=None,
exception_only=False,
running_compiled_code=False,
):
"""Custom showtraceback for monkey-patching IPython's InteractiveShell
https://stackoverflow.com/questions/1261668/cannot-override-sys-excepthook
"""
self.default_showtraceback(
exc_tuple,
filename,
tb_offset,
exception_only=True,
running_compiled_code=running_compiled_code,
) | python | {
"resource": ""
} |
q261776 | CallStack.tracemessage | validation | def tracemessage(self, maxlen=6):
"""
if maxlen > 0, the message is shortened to maxlen traces.
"""
result = ""
for i, value in enumerate(self):
result += "{0}: {1}\n".format(i, get_node_repr(value))
result = result.strip("\n")
lines = result.split("\n")
if maxlen and len(lines) > maxlen:
i = int(maxlen / 2)
lines = lines[:i] + ["..."] + lines[-(maxlen - i) :]
result = "\n".join(lines)
return result | python | {
"resource": ""
} |
q261777 | System.setup_ipython | validation | def setup_ipython(self):
"""Monkey patch shell's error handler.
This method is to monkey-patch the showtraceback method of
IPython's InteractiveShell to
__IPYTHON__ is not detected when starting an IPython kernel,
so this method is called from start_kernel in spyder-modelx.
"""
if self.is_ipysetup:
return
from ipykernel.kernelapp import IPKernelApp
self.shell = IPKernelApp.instance().shell # None in PyCharm console
if not self.shell and is_ipython():
self.shell = get_ipython()
if self.shell:
shell_class = type(self.shell)
shell_class.default_showtraceback = shell_class.showtraceback
shell_class.showtraceback = custom_showtraceback
self.is_ipysetup = True
else:
raise RuntimeError("IPython shell not found.") | python | {
"resource": ""
} |
q261778 | System.restore_ipython | validation | def restore_ipython(self):
"""Restore default IPython showtraceback"""
if not self.is_ipysetup:
return
shell_class = type(self.shell)
shell_class.showtraceback = shell_class.default_showtraceback
del shell_class.default_showtraceback
self.is_ipysetup = False | python | {
"resource": ""
} |
q261779 | System.restore_python | validation | def restore_python(self):
"""Restore Python settings to the original states"""
orig = self.orig_settings
sys.setrecursionlimit(orig["sys.recursionlimit"])
if "sys.tracebacklimit" in orig:
sys.tracebacklimit = orig["sys.tracebacklimit"]
else:
if hasattr(sys, "tracebacklimit"):
del sys.tracebacklimit
if "showwarning" in orig:
warnings.showwarning = orig["showwarning"]
orig.clear()
threading.stack_size() | python | {
"resource": ""
} |
q261780 | System.get_object | validation | def get_object(self, name):
"""Retrieve an object by its absolute name."""
parts = name.split(".")
model_name = parts.pop(0)
return self.models[model_name].get_object(".".join(parts)) | python | {
"resource": ""
} |
q261781 | show_tree | validation | def show_tree(model=None):
"""Display the model tree window.
Args:
model: :class:`Model <modelx.core.model.Model>` object.
Defaults to the current model.
Warnings:
For this function to work with Spyder, *Graphics backend* option
of Spyder must be set to *inline*.
"""
if model is None:
model = mx.cur_model()
view = get_modeltree(model)
app = QApplication.instance()
if not app:
raise RuntimeError("QApplication does not exist.")
view.show()
app.exec_() | python | {
"resource": ""
} |
q261782 | get_interfaces | validation | def get_interfaces(impls):
"""Get interfaces from their implementations."""
if impls is None:
return None
elif isinstance(impls, OrderMixin):
result = OrderedDict()
for name in impls.order:
result[name] = impls[name].interface
return result
elif isinstance(impls, Mapping):
return {name: impls[name].interface for name in impls}
elif isinstance(impls, Sequence):
return [impl.interface for impl in impls]
else:
return impls.interface | python | {
"resource": ""
} |
q261783 | get_impls | validation | def get_impls(interfaces):
"""Get impls from their interfaces."""
if interfaces is None:
return None
elif isinstance(interfaces, Mapping):
return {name: interfaces[name]._impl for name in interfaces}
elif isinstance(interfaces, Sequence):
return [interfaces._impl for interfaces in interfaces]
else:
return interfaces._impl | python | {
"resource": ""
} |
q261784 | Impl.update_lazyevals | validation | def update_lazyevals(self):
"""Update all LazyEvals in self
self.lzy_evals must be set to LazyEval object(s) enough to
update all owned LazyEval objects.
"""
if self.lazy_evals is None:
return
elif isinstance(self.lazy_evals, LazyEval):
self.lazy_evals.get_updated()
else:
for lz in self.lazy_evals:
lz.get_updated() | python | {
"resource": ""
} |
q261785 | Interface._to_attrdict | validation | def _to_attrdict(self, attrs=None):
"""Get extra attributes"""
result = self._baseattrs
for attr in attrs:
if hasattr(self, attr):
result[attr] = getattr(self, attr)._to_attrdict(attrs)
return result | python | {
"resource": ""
} |
q261786 | convert_args | validation | def convert_args(args, kwargs):
"""If args and kwargs contains Cells, Convert them to their values."""
found = False
for arg in args:
if isinstance(arg, Cells):
found = True
break
if found:
args = tuple(
arg.value if isinstance(arg, Cells) else arg for arg in args
)
if kwargs is not None:
for key, arg in kwargs.items():
if isinstance(arg, Cells):
kwargs[key] = arg.value
return args, kwargs | python | {
"resource": ""
} |
q261787 | shareable_parameters | validation | def shareable_parameters(cells):
"""Return parameter names if the parameters are shareable among cells.
Parameters are shareable among multiple cells when all the cells
have the parameters in the same order if they ever have any.
For example, if cells are foo(), bar(x), baz(x, y), then
('x', 'y') are shareable parameters amounts them, as 'x' and 'y'
appear in the same order in the parameter list if they ever appear.
Args:
cells: An iterator yielding cells.
Returns:
None if parameters are not share,
tuple of shareable parameter names,
() if cells are all scalars.
"""
result = []
for c in cells.values():
params = c.formula.parameters
for i in range(min(len(result), len(params))):
if params[i] != result[i]:
return None
for i in range(len(result), len(params)):
result.append(params[i])
return result | python | {
"resource": ""
} |
q261788 | Cells.copy | validation | def copy(self, space=None, name=None):
"""Make a copy of itself and return it."""
return Cells(space=space, name=name, formula=self.formula) | python | {
"resource": ""
} |
q261789 | CellNode.value | validation | def value(self):
"""Return the value of the cells."""
if self.has_value:
return self._impl[OBJ].get_value(self._impl[KEY])
else:
raise ValueError("Value not found") | python | {
"resource": ""
} |
q261790 | _get_col_index | validation | def _get_col_index(name):
"""Convert column name to index."""
index = string.ascii_uppercase.index
col = 0
for c in name.upper():
col = col * 26 + index(c) + 1
return col | python | {
"resource": ""
} |
q261791 | _get_range | validation | def _get_range(book, range_, sheet):
"""Return a range as nested dict of openpyxl cells."""
filename = None
if isinstance(book, str):
filename = book
book = opxl.load_workbook(book, data_only=True)
elif isinstance(book, opxl.Workbook):
pass
else:
raise TypeError
if _is_range_address(range_):
sheet_names = [name.upper() for name in book.sheetnames]
index = sheet_names.index(sheet.upper())
data = book.worksheets[index][range_]
else:
data = _get_namedrange(book, range_, sheet)
if data is None:
raise ValueError(
"Named range '%s' not found in %s" % (range_, filename or book)
)
return data | python | {
"resource": ""
} |
q261792 | read_range | validation | def read_range(filepath, range_expr, sheet=None, dict_generator=None):
"""Read values from an Excel range into a dictionary.
`range_expr` ie either a range address string, such as "A1", "$C$3:$E$5",
or a defined name string for a range, such as "NamedRange1".
If a range address is provided, `sheet` argument must also be provided.
If a named range is provided and `sheet` is not, book level defined name
is searched. If `sheet` is also provided, sheet level defined name for the
specified `sheet` is searched.
If range_expr points to a single cell, its value is returned.
`dictgenerator` is a generator function that yields keys and values of
the returned dictionary. the excel range, as a nested tuple of openpyxl's
Cell objects, is passed to the generator function as its single argument.
If not specified, default generator is used, which maps tuples of row and
column indexes, both starting with 0, to their values.
Args:
filepath (str): Path to an Excel file.
range_epxr (str): Range expression, such as "A1", "$G4:$K10",
or named range "NamedRange1"
sheet (str): Sheet name (case ignored).
None if book level defined range name is passed as `range_epxr`.
dict_generator: A generator function taking a nested tuple of cells
as a single parameter.
Returns:
Nested list containing range values.
"""
def default_generator(cells):
for row_ind, row in enumerate(cells):
for col_ind, cell in enumerate(row):
yield (row_ind, col_ind), cell.value
book = opxl.load_workbook(filepath, data_only=True)
if _is_range_address(range_expr):
sheet_names = [name.upper() for name in book.sheetnames]
index = sheet_names.index(sheet.upper())
cells = book.worksheets[index][range_expr]
else:
cells = _get_namedrange(book, range_expr, sheet)
# In case of a single cell, return its value.
if isinstance(cells, opxl.cell.Cell):
return cells.value
if dict_generator is None:
dict_generator = default_generator
gen = dict_generator(cells)
return {keyval[0]: keyval[1] for keyval in gen} | python | {
"resource": ""
} |
q261793 | _get_namedrange | validation | def _get_namedrange(book, rangename, sheetname=None):
"""Get range from a workbook.
A workbook can contain multiple definitions for a single name,
as a name can be defined for the entire book or for
a particular sheet.
If sheet is None, the book-wide def is searched,
otherwise sheet-local def is looked up.
Args:
book: An openpyxl workbook object.
rangename (str): Range expression, such as "A1", "$G4:$K10",
named range "NamedRange1".
sheetname (str, optional): None for book-wide name def,
sheet name for sheet-local named range.
Returns:
Range object specified by the name.
"""
def cond(namedef):
if namedef.type.upper() == "RANGE":
if namedef.name.upper() == rangename.upper():
if sheetname is None:
if not namedef.localSheetId:
return True
else: # sheet local name
sheet_id = [sht.upper() for sht in book.sheetnames].index(
sheetname.upper()
)
if namedef.localSheetId == sheet_id:
return True
return False
def get_destinations(name_def):
"""Workaround for the bug in DefinedName.destinations"""
from openpyxl.formula import Tokenizer
from openpyxl.utils.cell import SHEETRANGE_RE
if name_def.type == "RANGE":
tok = Tokenizer("=" + name_def.value)
for part in tok.items:
if part.subtype == "RANGE":
m = SHEETRANGE_RE.match(part.value)
if m.group("quoted"):
sheet_name = m.group("quoted")
else:
sheet_name = m.group("notquoted")
yield sheet_name, m.group("cells")
namedef = next(
(item for item in book.defined_names.definedName if cond(item)), None
)
if namedef is None:
return None
dests = get_destinations(namedef)
xlranges = []
sheetnames_upper = [name.upper() for name in book.sheetnames]
for sht, addr in dests:
if sheetname:
sht = sheetname
index = sheetnames_upper.index(sht.upper())
xlranges.append(book.worksheets[index][addr])
if len(xlranges) == 1:
return xlranges[0]
else:
return xlranges | python | {
"resource": ""
} |
q261794 | SpaceGraph.get_mro | validation | def get_mro(self, space):
"""Calculate the Method Resolution Order of bases using the C3 algorithm.
Code modified from
http://code.activestate.com/recipes/577748-calculate-the-mro-of-a-class/
Args:
bases: sequence of direct base spaces.
Returns:
mro as a list of bases including node itself
"""
seqs = [self.get_mro(base) for base
in self.get_bases(space)] + [list(self.get_bases(space))]
res = []
while True:
non_empty = list(filter(None, seqs))
if not non_empty:
# Nothing left to process, we're done.
res.insert(0, space)
return res
for seq in non_empty: # Find merge candidates among seq heads.
candidate = seq[0]
not_head = [s for s in non_empty if candidate in s[1:]]
if not_head:
# Reject the candidate.
candidate = None
else:
break
if not candidate:
raise TypeError(
"inconsistent hierarchy, no C3 MRO is possible")
res.append(candidate)
for seq in non_empty:
# Remove candidate.
if seq[0] == candidate:
del seq[0] | python | {
"resource": ""
} |
q261795 | _alter_code | validation | def _alter_code(code, **attrs):
"""Create a new code object by altering some of ``code`` attributes
Args:
code: code objcect
attrs: a mapping of names of code object attrs to their values
"""
PyCode_New = ctypes.pythonapi.PyCode_New
PyCode_New.argtypes = (
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.c_int,
ctypes.py_object)
PyCode_New.restype = ctypes.py_object
args = [
[code.co_argcount, 'co_argcount'],
[code.co_kwonlyargcount, 'co_kwonlyargcount'],
[code.co_nlocals, 'co_nlocals'],
[code.co_stacksize, 'co_stacksize'],
[code.co_flags, 'co_flags'],
[code.co_code, 'co_code'],
[code.co_consts, 'co_consts'],
[code.co_names, 'co_names'],
[code.co_varnames, 'co_varnames'],
[code.co_freevars, 'co_freevars'],
[code.co_cellvars, 'co_cellvars'],
[code.co_filename, 'co_filename'],
[code.co_name, 'co_name'],
[code.co_firstlineno, 'co_firstlineno'],
[code.co_lnotab, 'co_lnotab']]
for arg in args:
if arg[1] in attrs:
arg[0] = attrs[arg[1]]
return PyCode_New(
args[0][0], # code.co_argcount,
args[1][0], # code.co_kwonlyargcount,
args[2][0], # code.co_nlocals,
args[3][0], # code.co_stacksize,
args[4][0], # code.co_flags,
args[5][0], # code.co_code,
args[6][0], # code.co_consts,
args[7][0], # code.co_names,
args[8][0], # code.co_varnames,
args[9][0], # code.co_freevars,
args[10][0], # code.co_cellvars,
args[11][0], # code.co_filename,
args[12][0], # code.co_name,
args[13][0], # code.co_firstlineno,
args[14][0]) | python | {
"resource": ""
} |
q261796 | alter_freevars | validation | def alter_freevars(func, globals_=None, **vars):
"""Replace local variables with free variables
Warnings:
This function does not work.
"""
if globals_ is None:
globals_ = func.__globals__
frees = tuple(vars.keys())
oldlocs = func.__code__.co_names
newlocs = tuple(name for name in oldlocs if name not in frees)
code = _alter_code(func.__code__,
co_freevars=frees,
co_names=newlocs,
co_flags=func.__code__.co_flags | inspect.CO_NESTED)
closure = _create_closure(*vars.values())
return FunctionType(code, globals_, closure=closure) | python | {
"resource": ""
} |
q261797 | fix_lamdaline | validation | def fix_lamdaline(source):
"""Remove the last redundant token from lambda expression
lambda x: return x)
^
Return string without irrelevant tokens
returned from inspect.getsource on lamda expr returns
"""
# Using undocumented generate_tokens due to a tokenize.tokenize bug
# See https://bugs.python.org/issue23297
strio = io.StringIO(source)
gen = tokenize.generate_tokens(strio.readline)
tkns = []
try:
for t in gen:
tkns.append(t)
except tokenize.TokenError:
pass
# Find the position of 'lambda'
lambda_pos = [(t.type, t.string) for t in tkns].index(
(tokenize.NAME, "lambda")
)
# Ignore tokes before 'lambda'
tkns = tkns[lambda_pos:]
# Find the position of th las OP
lastop_pos = (
len(tkns) - 1 - [t.type for t in tkns[::-1]].index(tokenize.OP)
)
lastop = tkns[lastop_pos]
# Remove OP from the line
fiedlineno = lastop.start[0]
fixedline = lastop.line[: lastop.start[1]] + lastop.line[lastop.end[1] :]
tkns = tkns[:lastop_pos]
fixedlines = ""
last_lineno = 0
for t in tkns:
if last_lineno == t.start[0]:
continue
elif t.start[0] == fiedlineno:
fixedlines += fixedline
last_lineno = t.start[0]
else:
fixedlines += t.line
last_lineno = t.start[0]
return fixedlines | python | {
"resource": ""
} |
q261798 | find_funcdef | validation | def find_funcdef(source):
"""Find the first FuncDef ast object in source"""
try:
module_node = compile(
source, "<string>", mode="exec", flags=ast.PyCF_ONLY_AST
)
except SyntaxError:
return find_funcdef(fix_lamdaline(source))
for node in ast.walk(module_node):
if isinstance(node, ast.FunctionDef) or isinstance(node, ast.Lambda):
return node
raise ValueError("function definition not found") | python | {
"resource": ""
} |
q261799 | extract_params | validation | def extract_params(source):
"""Extract parameters from a function definition"""
funcdef = find_funcdef(source)
params = []
for node in ast.walk(funcdef.args):
if isinstance(node, ast.arg):
if node.arg not in params:
params.append(node.arg)
return params | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.