code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None | def function[_get_pool, parameter[name, session]]:
constant[
Get XEN resource pool object reference
]
if compare[name[session] is constant[None]] begin[:]
variable[session] assign[=] call[name[_get_session], parameter[]]
variable[pools] assign[=] call[name[session].xenapi.pool.get_all, parameter[]]
for taget[name[pool]] in starred[name[pools]] begin[:]
variable[pool_record] assign[=] call[name[session].xenapi.pool.get_record, parameter[name[pool]]]
if compare[name[name] in call[name[pool_record].get, parameter[constant[name_label]]]] begin[:]
return[name[pool]]
return[constant[None]] | keyword[def] identifier[_get_pool] ( identifier[name] = keyword[None] , identifier[session] = keyword[None] ):
literal[string]
keyword[if] identifier[session] keyword[is] keyword[None] :
identifier[session] = identifier[_get_session] ()
identifier[pools] = identifier[session] . identifier[xenapi] . identifier[pool] . identifier[get_all] ()
keyword[for] identifier[pool] keyword[in] identifier[pools] :
identifier[pool_record] = identifier[session] . identifier[xenapi] . identifier[pool] . identifier[get_record] ( identifier[pool] )
keyword[if] identifier[name] keyword[in] identifier[pool_record] . identifier[get] ( literal[string] ):
keyword[return] identifier[pool]
keyword[return] keyword[None] | def _get_pool(name=None, session=None):
"""
Get XEN resource pool object reference
"""
if session is None:
session = _get_session() # depends on [control=['if'], data=['session']]
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pool']]
return None |
def start_dashboard(self):
"""Start the dashboard."""
stdout_file, stderr_file = self.new_log_files("dashboard", True)
self._webui_url, process_info = ray.services.start_dashboard(
self.redis_address,
self._temp_dir,
stdout_file=stdout_file,
stderr_file=stderr_file,
redis_password=self._ray_params.redis_password)
assert ray_constants.PROCESS_TYPE_DASHBOARD not in self.all_processes
if process_info is not None:
self.all_processes[ray_constants.PROCESS_TYPE_DASHBOARD] = [
process_info
]
redis_client = self.create_redis_client()
redis_client.hmset("webui", {"url": self._webui_url}) | def function[start_dashboard, parameter[self]]:
constant[Start the dashboard.]
<ast.Tuple object at 0x7da18eb54160> assign[=] call[name[self].new_log_files, parameter[constant[dashboard], constant[True]]]
<ast.Tuple object at 0x7da18eb579d0> assign[=] call[name[ray].services.start_dashboard, parameter[name[self].redis_address, name[self]._temp_dir]]
assert[compare[name[ray_constants].PROCESS_TYPE_DASHBOARD <ast.NotIn object at 0x7da2590d7190> name[self].all_processes]]
if compare[name[process_info] is_not constant[None]] begin[:]
call[name[self].all_processes][name[ray_constants].PROCESS_TYPE_DASHBOARD] assign[=] list[[<ast.Name object at 0x7da18eb55c60>]]
variable[redis_client] assign[=] call[name[self].create_redis_client, parameter[]]
call[name[redis_client].hmset, parameter[constant[webui], dictionary[[<ast.Constant object at 0x7da1b2346230>], [<ast.Attribute object at 0x7da1b23441f0>]]]] | keyword[def] identifier[start_dashboard] ( identifier[self] ):
literal[string]
identifier[stdout_file] , identifier[stderr_file] = identifier[self] . identifier[new_log_files] ( literal[string] , keyword[True] )
identifier[self] . identifier[_webui_url] , identifier[process_info] = identifier[ray] . identifier[services] . identifier[start_dashboard] (
identifier[self] . identifier[redis_address] ,
identifier[self] . identifier[_temp_dir] ,
identifier[stdout_file] = identifier[stdout_file] ,
identifier[stderr_file] = identifier[stderr_file] ,
identifier[redis_password] = identifier[self] . identifier[_ray_params] . identifier[redis_password] )
keyword[assert] identifier[ray_constants] . identifier[PROCESS_TYPE_DASHBOARD] keyword[not] keyword[in] identifier[self] . identifier[all_processes]
keyword[if] identifier[process_info] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[all_processes] [ identifier[ray_constants] . identifier[PROCESS_TYPE_DASHBOARD] ]=[
identifier[process_info]
]
identifier[redis_client] = identifier[self] . identifier[create_redis_client] ()
identifier[redis_client] . identifier[hmset] ( literal[string] ,{ literal[string] : identifier[self] . identifier[_webui_url] }) | def start_dashboard(self):
"""Start the dashboard."""
(stdout_file, stderr_file) = self.new_log_files('dashboard', True)
(self._webui_url, process_info) = ray.services.start_dashboard(self.redis_address, self._temp_dir, stdout_file=stdout_file, stderr_file=stderr_file, redis_password=self._ray_params.redis_password)
assert ray_constants.PROCESS_TYPE_DASHBOARD not in self.all_processes
if process_info is not None:
self.all_processes[ray_constants.PROCESS_TYPE_DASHBOARD] = [process_info]
redis_client = self.create_redis_client()
redis_client.hmset('webui', {'url': self._webui_url}) # depends on [control=['if'], data=['process_info']] |
def delete(self):
"""Delete the Router."""
if lib.EnvDeleteRouter(self._env, self._name.encode()) == 0:
raise RuntimeError("Unable to delete router %s" % self._name) | def function[delete, parameter[self]]:
constant[Delete the Router.]
if compare[call[name[lib].EnvDeleteRouter, parameter[name[self]._env, call[name[self]._name.encode, parameter[]]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18f58f6d0> | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
keyword[if] identifier[lib] . identifier[EnvDeleteRouter] ( identifier[self] . identifier[_env] , identifier[self] . identifier[_name] . identifier[encode] ())== literal[int] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[self] . identifier[_name] ) | def delete(self):
"""Delete the Router."""
if lib.EnvDeleteRouter(self._env, self._name.encode()) == 0:
raise RuntimeError('Unable to delete router %s' % self._name) # depends on [control=['if'], data=[]] |
def bootstrap_falsealarmprob(lspinfo,
times,
mags,
errs,
nbootstrap=250,
magsarefluxes=False,
sigclip=10.0,
npeaks=None):
'''Calculates the false alarm probabilities of periodogram peaks using
bootstrap resampling of the magnitude time series.
The false alarm probability here is defined as::
(1.0 + sum(trialbestpeaks[i] > peak[j]))/(ntrialbestpeaks + 1)
for each best periodogram peak j. The index i is for each bootstrap
trial. This effectively gives us a significance for the peak. Smaller FAP
means a better chance that the peak is real.
The basic idea is to get the number of trial best peaks that are larger than
the current best peak and divide this by the total number of trials. The
distribution of these trial best peaks is obtained after scrambling the mag
values and rerunning the specified periodogram method for a bunch of trials.
`lspinfo` is the output dict from a periodbase periodogram function and MUST
contain a 'method' key that corresponds to one of the keys in the LSPMETHODS
dict above. This will let this function know which periodogram function to
run to generate the bootstrap samples. The lspinfo SHOULD also have a
'kwargs' key that corresponds to the input keyword arguments for the
periodogram function as it was run originally, to keep everything the same
during the bootstrap runs. If this is missing, default values will be used.
FIXME: this may not be strictly correct; must look more into bootstrap
significance testing. Also look into if we're doing resampling correctly for
time series because the samples are not iid. Look into moving block
bootstrap.
Parameters
----------
lspinfo : dict
A dict of period-finder results from one of the period-finders in
periodbase, or your own functions, provided it's of the form and
contains at least the keys listed below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above,
'kwargs': dict of kwargs passed to your own period-finder function}
If you provide your own function's period-finder results, you should add
a corresponding key for it to the LSPMETHODS dict above so the bootstrap
function can use it correctly. Your period-finder function should take
`times`, `mags`, errs and any extra parameters as kwargs and return a
dict of the form described above. A small worked example::
from your_module import your_periodfinder_func
from astrobase import periodbase
periodbase.LSPMETHODS['your-finder'] = your_periodfinder_func
# run a period-finder session
your_pfresults = your_periodfinder_func(times, mags, errs,
**extra_kwargs)
# run bootstrap to find FAP
falsealarm_info = periodbase.bootstrap_falsealarmprob(
your_pfresults,
times, mags, errs,
nbootstrap=250,
magsarefluxes=False,
)
times,mags,errs : np.arrays
The magnitude/flux time-series to process along with their associated
measurement errors.
nbootstrap : int
The total number of bootstrap trials to run. This is set to 250 by
default, but should probably be around 1000 for realistic results.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
npeaks : int or None
The number of peaks from the list of 'nbestlspvals' in the period-finder
result dict to run the bootstrap for. If None, all of the peaks in this
list will have their FAP calculated.
Returns
-------
dict
Returns a dict of the form::
{'peaks':allpeaks,
'periods':allperiods,
'probabilities':allfaps,
'alltrialbestpeaks':alltrialbestpeaks}
'''
# figure out how many periods to work on
if (npeaks and (0 < npeaks < len(lspinfo['nbestperiods']))):
nperiods = npeaks
else:
LOGWARNING('npeaks not specified or invalid, '
'getting FAP for all %s periodogram peaks' %
len(lspinfo['nbestperiods']))
nperiods = len(lspinfo['nbestperiods'])
nbestperiods = lspinfo['nbestperiods'][:nperiods]
nbestpeaks = lspinfo['nbestlspvals'][:nperiods]
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
allpeaks = []
allperiods = []
allfaps = []
alltrialbestpeaks = []
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
for ind, period, peak in zip(range(len(nbestperiods)),
nbestperiods,
nbestpeaks):
LOGINFO('peak %s: running %s trials...' % (ind+1, nbootstrap))
trialbestpeaks = []
for _trial in range(nbootstrap):
# get a scrambled index
tindex = np.random.randint(0,
high=mags.size,
size=mags.size)
# get the kwargs dict out of the lspinfo
if 'kwargs' in lspinfo:
kwargs = lspinfo['kwargs']
# update the kwargs with some local stuff
kwargs.update({'magsarefluxes':magsarefluxes,
'sigclip':sigclip,
'verbose':False})
else:
kwargs = {'magsarefluxes':magsarefluxes,
'sigclip':sigclip,
'verbose':False}
# run the periodogram with scrambled mags and errs
# and the appropriate keyword arguments
lspres = LSPMETHODS[lspinfo['method']](
times, mags[tindex], errs[tindex],
**kwargs
)
trialbestpeaks.append(lspres['bestlspval'])
trialbestpeaks = np.array(trialbestpeaks)
alltrialbestpeaks.append(trialbestpeaks)
# calculate the FAP for a trial peak j = FAP[j] =
# (1.0 + sum(trialbestpeaks[i] > peak[j]))/(ntrialbestpeaks + 1)
if lspinfo['method'] != 'pdm':
falsealarmprob = (
(1.0 + trialbestpeaks[trialbestpeaks > peak].size) /
(trialbestpeaks.size + 1.0)
)
# for PDM, we're looking for a peak smaller than the best peak
# because values closer to 0.0 are more significant
else:
falsealarmprob = (
(1.0 + trialbestpeaks[trialbestpeaks < peak].size) /
(trialbestpeaks.size + 1.0)
)
LOGINFO('FAP for peak %s, period: %.6f = %.3g' % (ind+1,
period,
falsealarmprob))
allpeaks.append(peak)
allperiods.append(period)
allfaps.append(falsealarmprob)
return {'peaks':allpeaks,
'periods':allperiods,
'probabilities':allfaps,
'alltrialbestpeaks':alltrialbestpeaks}
else:
LOGERROR('not enough mag series points to calculate periodogram')
return None | def function[bootstrap_falsealarmprob, parameter[lspinfo, times, mags, errs, nbootstrap, magsarefluxes, sigclip, npeaks]]:
constant[Calculates the false alarm probabilities of periodogram peaks using
bootstrap resampling of the magnitude time series.
The false alarm probability here is defined as::
(1.0 + sum(trialbestpeaks[i] > peak[j]))/(ntrialbestpeaks + 1)
for each best periodogram peak j. The index i is for each bootstrap
trial. This effectively gives us a significance for the peak. Smaller FAP
means a better chance that the peak is real.
The basic idea is to get the number of trial best peaks that are larger than
the current best peak and divide this by the total number of trials. The
distribution of these trial best peaks is obtained after scrambling the mag
values and rerunning the specified periodogram method for a bunch of trials.
`lspinfo` is the output dict from a periodbase periodogram function and MUST
contain a 'method' key that corresponds to one of the keys in the LSPMETHODS
dict above. This will let this function know which periodogram function to
run to generate the bootstrap samples. The lspinfo SHOULD also have a
'kwargs' key that corresponds to the input keyword arguments for the
periodogram function as it was run originally, to keep everything the same
during the bootstrap runs. If this is missing, default values will be used.
FIXME: this may not be strictly correct; must look more into bootstrap
significance testing. Also look into if we're doing resampling correctly for
time series because the samples are not iid. Look into moving block
bootstrap.
Parameters
----------
lspinfo : dict
A dict of period-finder results from one of the period-finders in
periodbase, or your own functions, provided it's of the form and
contains at least the keys listed below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above,
'kwargs': dict of kwargs passed to your own period-finder function}
If you provide your own function's period-finder results, you should add
a corresponding key for it to the LSPMETHODS dict above so the bootstrap
function can use it correctly. Your period-finder function should take
`times`, `mags`, errs and any extra parameters as kwargs and return a
dict of the form described above. A small worked example::
from your_module import your_periodfinder_func
from astrobase import periodbase
periodbase.LSPMETHODS['your-finder'] = your_periodfinder_func
# run a period-finder session
your_pfresults = your_periodfinder_func(times, mags, errs,
**extra_kwargs)
# run bootstrap to find FAP
falsealarm_info = periodbase.bootstrap_falsealarmprob(
your_pfresults,
times, mags, errs,
nbootstrap=250,
magsarefluxes=False,
)
times,mags,errs : np.arrays
The magnitude/flux time-series to process along with their associated
measurement errors.
nbootstrap : int
The total number of bootstrap trials to run. This is set to 250 by
default, but should probably be around 1000 for realistic results.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
npeaks : int or None
The number of peaks from the list of 'nbestlspvals' in the period-finder
result dict to run the bootstrap for. If None, all of the peaks in this
list will have their FAP calculated.
Returns
-------
dict
Returns a dict of the form::
{'peaks':allpeaks,
'periods':allperiods,
'probabilities':allfaps,
'alltrialbestpeaks':alltrialbestpeaks}
]
if <ast.BoolOp object at 0x7da1b00f4850> begin[:]
variable[nperiods] assign[=] name[npeaks]
variable[nbestperiods] assign[=] call[call[name[lspinfo]][constant[nbestperiods]]][<ast.Slice object at 0x7da18fe92f80>]
variable[nbestpeaks] assign[=] call[call[name[lspinfo]][constant[nbestlspvals]]][<ast.Slice object at 0x7da18fe92f20>]
<ast.Tuple object at 0x7da18fe90a90> assign[=] call[name[sigclip_magseries], parameter[name[times], name[mags], name[errs]]]
variable[allpeaks] assign[=] list[[]]
variable[allperiods] assign[=] list[[]]
variable[allfaps] assign[=] list[[]]
variable[alltrialbestpeaks] assign[=] list[[]]
if <ast.BoolOp object at 0x7da18fe90820> begin[:]
for taget[tuple[[<ast.Name object at 0x7da18fe91270>, <ast.Name object at 0x7da18fe91bd0>, <ast.Name object at 0x7da18fe91210>]]] in starred[call[name[zip], parameter[call[name[range], parameter[call[name[len], parameter[name[nbestperiods]]]]], name[nbestperiods], name[nbestpeaks]]]] begin[:]
call[name[LOGINFO], parameter[binary_operation[constant[peak %s: running %s trials...] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da18fe921a0>, <ast.Name object at 0x7da18fe93970>]]]]]
variable[trialbestpeaks] assign[=] list[[]]
for taget[name[_trial]] in starred[call[name[range], parameter[name[nbootstrap]]]] begin[:]
variable[tindex] assign[=] call[name[np].random.randint, parameter[constant[0]]]
if compare[constant[kwargs] in name[lspinfo]] begin[:]
variable[kwargs] assign[=] call[name[lspinfo]][constant[kwargs]]
call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da18fe92fe0>, <ast.Constant object at 0x7da18fe92890>, <ast.Constant object at 0x7da18fe92560>], [<ast.Name object at 0x7da18fe90130>, <ast.Name object at 0x7da18fe91930>, <ast.Constant object at 0x7da18fe911b0>]]]]
variable[lspres] assign[=] call[call[name[LSPMETHODS]][call[name[lspinfo]][constant[method]]], parameter[name[times], call[name[mags]][name[tindex]], call[name[errs]][name[tindex]]]]
call[name[trialbestpeaks].append, parameter[call[name[lspres]][constant[bestlspval]]]]
variable[trialbestpeaks] assign[=] call[name[np].array, parameter[name[trialbestpeaks]]]
call[name[alltrialbestpeaks].append, parameter[name[trialbestpeaks]]]
if compare[call[name[lspinfo]][constant[method]] not_equal[!=] constant[pdm]] begin[:]
variable[falsealarmprob] assign[=] binary_operation[binary_operation[constant[1.0] + call[name[trialbestpeaks]][compare[name[trialbestpeaks] greater[>] name[peak]]].size] / binary_operation[name[trialbestpeaks].size + constant[1.0]]]
call[name[LOGINFO], parameter[binary_operation[constant[FAP for peak %s, period: %.6f = %.3g] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da18fe91ba0>, <ast.Name object at 0x7da18fe93640>, <ast.Name object at 0x7da20e955ed0>]]]]]
call[name[allpeaks].append, parameter[name[peak]]]
call[name[allperiods].append, parameter[name[period]]]
call[name[allfaps].append, parameter[name[falsealarmprob]]]
return[dictionary[[<ast.Constant object at 0x7da20e954490>, <ast.Constant object at 0x7da20e955a20>, <ast.Constant object at 0x7da20e956950>, <ast.Constant object at 0x7da20e9557e0>], [<ast.Name object at 0x7da20e955c90>, <ast.Name object at 0x7da20e955cc0>, <ast.Name object at 0x7da20e9551e0>, <ast.Name object at 0x7da20e957520>]]] | keyword[def] identifier[bootstrap_falsealarmprob] ( identifier[lspinfo] ,
identifier[times] ,
identifier[mags] ,
identifier[errs] ,
identifier[nbootstrap] = literal[int] ,
identifier[magsarefluxes] = keyword[False] ,
identifier[sigclip] = literal[int] ,
identifier[npeaks] = keyword[None] ):
literal[string]
keyword[if] ( identifier[npeaks] keyword[and] ( literal[int] < identifier[npeaks] < identifier[len] ( identifier[lspinfo] [ literal[string] ]))):
identifier[nperiods] = identifier[npeaks]
keyword[else] :
identifier[LOGWARNING] ( literal[string]
literal[string] %
identifier[len] ( identifier[lspinfo] [ literal[string] ]))
identifier[nperiods] = identifier[len] ( identifier[lspinfo] [ literal[string] ])
identifier[nbestperiods] = identifier[lspinfo] [ literal[string] ][: identifier[nperiods] ]
identifier[nbestpeaks] = identifier[lspinfo] [ literal[string] ][: identifier[nperiods] ]
identifier[stimes] , identifier[smags] , identifier[serrs] = identifier[sigclip_magseries] ( identifier[times] ,
identifier[mags] ,
identifier[errs] ,
identifier[magsarefluxes] = identifier[magsarefluxes] ,
identifier[sigclip] = identifier[sigclip] )
identifier[allpeaks] =[]
identifier[allperiods] =[]
identifier[allfaps] =[]
identifier[alltrialbestpeaks] =[]
keyword[if] identifier[len] ( identifier[stimes] )> literal[int] keyword[and] identifier[len] ( identifier[smags] )> literal[int] keyword[and] identifier[len] ( identifier[serrs] )> literal[int] :
keyword[for] identifier[ind] , identifier[period] , identifier[peak] keyword[in] identifier[zip] ( identifier[range] ( identifier[len] ( identifier[nbestperiods] )),
identifier[nbestperiods] ,
identifier[nbestpeaks] ):
identifier[LOGINFO] ( literal[string] %( identifier[ind] + literal[int] , identifier[nbootstrap] ))
identifier[trialbestpeaks] =[]
keyword[for] identifier[_trial] keyword[in] identifier[range] ( identifier[nbootstrap] ):
identifier[tindex] = identifier[np] . identifier[random] . identifier[randint] ( literal[int] ,
identifier[high] = identifier[mags] . identifier[size] ,
identifier[size] = identifier[mags] . identifier[size] )
keyword[if] literal[string] keyword[in] identifier[lspinfo] :
identifier[kwargs] = identifier[lspinfo] [ literal[string] ]
identifier[kwargs] . identifier[update] ({ literal[string] : identifier[magsarefluxes] ,
literal[string] : identifier[sigclip] ,
literal[string] : keyword[False] })
keyword[else] :
identifier[kwargs] ={ literal[string] : identifier[magsarefluxes] ,
literal[string] : identifier[sigclip] ,
literal[string] : keyword[False] }
identifier[lspres] = identifier[LSPMETHODS] [ identifier[lspinfo] [ literal[string] ]](
identifier[times] , identifier[mags] [ identifier[tindex] ], identifier[errs] [ identifier[tindex] ],
** identifier[kwargs]
)
identifier[trialbestpeaks] . identifier[append] ( identifier[lspres] [ literal[string] ])
identifier[trialbestpeaks] = identifier[np] . identifier[array] ( identifier[trialbestpeaks] )
identifier[alltrialbestpeaks] . identifier[append] ( identifier[trialbestpeaks] )
keyword[if] identifier[lspinfo] [ literal[string] ]!= literal[string] :
identifier[falsealarmprob] =(
( literal[int] + identifier[trialbestpeaks] [ identifier[trialbestpeaks] > identifier[peak] ]. identifier[size] )/
( identifier[trialbestpeaks] . identifier[size] + literal[int] )
)
keyword[else] :
identifier[falsealarmprob] =(
( literal[int] + identifier[trialbestpeaks] [ identifier[trialbestpeaks] < identifier[peak] ]. identifier[size] )/
( identifier[trialbestpeaks] . identifier[size] + literal[int] )
)
identifier[LOGINFO] ( literal[string] %( identifier[ind] + literal[int] ,
identifier[period] ,
identifier[falsealarmprob] ))
identifier[allpeaks] . identifier[append] ( identifier[peak] )
identifier[allperiods] . identifier[append] ( identifier[period] )
identifier[allfaps] . identifier[append] ( identifier[falsealarmprob] )
keyword[return] { literal[string] : identifier[allpeaks] ,
literal[string] : identifier[allperiods] ,
literal[string] : identifier[allfaps] ,
literal[string] : identifier[alltrialbestpeaks] }
keyword[else] :
identifier[LOGERROR] ( literal[string] )
keyword[return] keyword[None] | def bootstrap_falsealarmprob(lspinfo, times, mags, errs, nbootstrap=250, magsarefluxes=False, sigclip=10.0, npeaks=None):
"""Calculates the false alarm probabilities of periodogram peaks using
bootstrap resampling of the magnitude time series.
The false alarm probability here is defined as::
(1.0 + sum(trialbestpeaks[i] > peak[j]))/(ntrialbestpeaks + 1)
for each best periodogram peak j. The index i is for each bootstrap
trial. This effectively gives us a significance for the peak. Smaller FAP
means a better chance that the peak is real.
The basic idea is to get the number of trial best peaks that are larger than
the current best peak and divide this by the total number of trials. The
distribution of these trial best peaks is obtained after scrambling the mag
values and rerunning the specified periodogram method for a bunch of trials.
`lspinfo` is the output dict from a periodbase periodogram function and MUST
contain a 'method' key that corresponds to one of the keys in the LSPMETHODS
dict above. This will let this function know which periodogram function to
run to generate the bootstrap samples. The lspinfo SHOULD also have a
'kwargs' key that corresponds to the input keyword arguments for the
periodogram function as it was run originally, to keep everything the same
during the bootstrap runs. If this is missing, default values will be used.
FIXME: this may not be strictly correct; must look more into bootstrap
significance testing. Also look into if we're doing resampling correctly for
time series because the samples are not iid. Look into moving block
bootstrap.
Parameters
----------
lspinfo : dict
A dict of period-finder results from one of the period-finders in
periodbase, or your own functions, provided it's of the form and
contains at least the keys listed below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above,
'kwargs': dict of kwargs passed to your own period-finder function}
If you provide your own function's period-finder results, you should add
a corresponding key for it to the LSPMETHODS dict above so the bootstrap
function can use it correctly. Your period-finder function should take
`times`, `mags`, errs and any extra parameters as kwargs and return a
dict of the form described above. A small worked example::
from your_module import your_periodfinder_func
from astrobase import periodbase
periodbase.LSPMETHODS['your-finder'] = your_periodfinder_func
# run a period-finder session
your_pfresults = your_periodfinder_func(times, mags, errs,
**extra_kwargs)
# run bootstrap to find FAP
falsealarm_info = periodbase.bootstrap_falsealarmprob(
your_pfresults,
times, mags, errs,
nbootstrap=250,
magsarefluxes=False,
)
times,mags,errs : np.arrays
The magnitude/flux time-series to process along with their associated
measurement errors.
nbootstrap : int
The total number of bootstrap trials to run. This is set to 250 by
default, but should probably be around 1000 for realistic results.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
npeaks : int or None
The number of peaks from the list of 'nbestlspvals' in the period-finder
result dict to run the bootstrap for. If None, all of the peaks in this
list will have their FAP calculated.
Returns
-------
dict
Returns a dict of the form::
{'peaks':allpeaks,
'periods':allperiods,
'probabilities':allfaps,
'alltrialbestpeaks':alltrialbestpeaks}
"""
# figure out how many periods to work on
if npeaks and 0 < npeaks < len(lspinfo['nbestperiods']):
nperiods = npeaks # depends on [control=['if'], data=[]]
else:
LOGWARNING('npeaks not specified or invalid, getting FAP for all %s periodogram peaks' % len(lspinfo['nbestperiods']))
nperiods = len(lspinfo['nbestperiods'])
nbestperiods = lspinfo['nbestperiods'][:nperiods]
nbestpeaks = lspinfo['nbestlspvals'][:nperiods]
# get rid of nans first and sigclip
(stimes, smags, serrs) = sigclip_magseries(times, mags, errs, magsarefluxes=magsarefluxes, sigclip=sigclip)
allpeaks = []
allperiods = []
allfaps = []
alltrialbestpeaks = []
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and (len(serrs) > 9):
for (ind, period, peak) in zip(range(len(nbestperiods)), nbestperiods, nbestpeaks):
LOGINFO('peak %s: running %s trials...' % (ind + 1, nbootstrap))
trialbestpeaks = []
for _trial in range(nbootstrap):
# get a scrambled index
tindex = np.random.randint(0, high=mags.size, size=mags.size)
# get the kwargs dict out of the lspinfo
if 'kwargs' in lspinfo:
kwargs = lspinfo['kwargs']
# update the kwargs with some local stuff
kwargs.update({'magsarefluxes': magsarefluxes, 'sigclip': sigclip, 'verbose': False}) # depends on [control=['if'], data=['lspinfo']]
else:
kwargs = {'magsarefluxes': magsarefluxes, 'sigclip': sigclip, 'verbose': False}
# run the periodogram with scrambled mags and errs
# and the appropriate keyword arguments
lspres = LSPMETHODS[lspinfo['method']](times, mags[tindex], errs[tindex], **kwargs)
trialbestpeaks.append(lspres['bestlspval']) # depends on [control=['for'], data=[]]
trialbestpeaks = np.array(trialbestpeaks)
alltrialbestpeaks.append(trialbestpeaks)
# calculate the FAP for a trial peak j = FAP[j] =
# (1.0 + sum(trialbestpeaks[i] > peak[j]))/(ntrialbestpeaks + 1)
if lspinfo['method'] != 'pdm':
falsealarmprob = (1.0 + trialbestpeaks[trialbestpeaks > peak].size) / (trialbestpeaks.size + 1.0) # depends on [control=['if'], data=[]]
else:
# for PDM, we're looking for a peak smaller than the best peak
# because values closer to 0.0 are more significant
falsealarmprob = (1.0 + trialbestpeaks[trialbestpeaks < peak].size) / (trialbestpeaks.size + 1.0)
LOGINFO('FAP for peak %s, period: %.6f = %.3g' % (ind + 1, period, falsealarmprob))
allpeaks.append(peak)
allperiods.append(period)
allfaps.append(falsealarmprob) # depends on [control=['for'], data=[]]
return {'peaks': allpeaks, 'periods': allperiods, 'probabilities': allfaps, 'alltrialbestpeaks': alltrialbestpeaks} # depends on [control=['if'], data=[]]
else:
LOGERROR('not enough mag series points to calculate periodogram')
return None |
def _on_permission_result(self, code, perms, results):
""" Handles a permission request result by passing it to the
handler with the given code.
"""
#: Get the handler for this request
handler = self._permission_requests.get(code, None)
if handler is not None:
del self._permission_requests[code]
#: Invoke that handler with the permission request response
handler(code, perms, results) | def function[_on_permission_result, parameter[self, code, perms, results]]:
constant[ Handles a permission request result by passing it to the
handler with the given code.
]
variable[handler] assign[=] call[name[self]._permission_requests.get, parameter[name[code], constant[None]]]
if compare[name[handler] is_not constant[None]] begin[:]
<ast.Delete object at 0x7da1b1b359c0>
call[name[handler], parameter[name[code], name[perms], name[results]]] | keyword[def] identifier[_on_permission_result] ( identifier[self] , identifier[code] , identifier[perms] , identifier[results] ):
literal[string]
identifier[handler] = identifier[self] . identifier[_permission_requests] . identifier[get] ( identifier[code] , keyword[None] )
keyword[if] identifier[handler] keyword[is] keyword[not] keyword[None] :
keyword[del] identifier[self] . identifier[_permission_requests] [ identifier[code] ]
identifier[handler] ( identifier[code] , identifier[perms] , identifier[results] ) | def _on_permission_result(self, code, perms, results):
""" Handles a permission request result by passing it to the
handler with the given code.
"""
#: Get the handler for this request
handler = self._permission_requests.get(code, None)
if handler is not None:
del self._permission_requests[code]
#: Invoke that handler with the permission request response
handler(code, perms, results) # depends on [control=['if'], data=['handler']] |
def config_attributes(self):
"""
Helper method used by TorConfig when generating a torrc file.
"""
rtn = [('HiddenServiceDir', str(self.dir))]
if self.conf._supports['HiddenServiceDirGroupReadable'] \
and self.group_readable:
rtn.append(('HiddenServiceDirGroupReadable', str(1)))
for port in self.ports:
rtn.append(('HiddenServicePort', str(port)))
if self.version:
rtn.append(('HiddenServiceVersion', str(self.version)))
for authline in self.authorize_client:
rtn.append(('HiddenServiceAuthorizeClient', str(authline)))
return rtn | def function[config_attributes, parameter[self]]:
constant[
Helper method used by TorConfig when generating a torrc file.
]
variable[rtn] assign[=] list[[<ast.Tuple object at 0x7da20e9b1ea0>]]
if <ast.BoolOp object at 0x7da20e9b1600> begin[:]
call[name[rtn].append, parameter[tuple[[<ast.Constant object at 0x7da20e9b05b0>, <ast.Call object at 0x7da20e9b01f0>]]]]
for taget[name[port]] in starred[name[self].ports] begin[:]
call[name[rtn].append, parameter[tuple[[<ast.Constant object at 0x7da20e9b0100>, <ast.Call object at 0x7da20e9b38e0>]]]]
if name[self].version begin[:]
call[name[rtn].append, parameter[tuple[[<ast.Constant object at 0x7da1b07ceb90>, <ast.Call object at 0x7da1b07cc220>]]]]
for taget[name[authline]] in starred[name[self].authorize_client] begin[:]
call[name[rtn].append, parameter[tuple[[<ast.Constant object at 0x7da2044c1ba0>, <ast.Call object at 0x7da2044c07c0>]]]]
return[name[rtn]] | keyword[def] identifier[config_attributes] ( identifier[self] ):
literal[string]
identifier[rtn] =[( literal[string] , identifier[str] ( identifier[self] . identifier[dir] ))]
keyword[if] identifier[self] . identifier[conf] . identifier[_supports] [ literal[string] ] keyword[and] identifier[self] . identifier[group_readable] :
identifier[rtn] . identifier[append] (( literal[string] , identifier[str] ( literal[int] )))
keyword[for] identifier[port] keyword[in] identifier[self] . identifier[ports] :
identifier[rtn] . identifier[append] (( literal[string] , identifier[str] ( identifier[port] )))
keyword[if] identifier[self] . identifier[version] :
identifier[rtn] . identifier[append] (( literal[string] , identifier[str] ( identifier[self] . identifier[version] )))
keyword[for] identifier[authline] keyword[in] identifier[self] . identifier[authorize_client] :
identifier[rtn] . identifier[append] (( literal[string] , identifier[str] ( identifier[authline] )))
keyword[return] identifier[rtn] | def config_attributes(self):
"""
Helper method used by TorConfig when generating a torrc file.
"""
rtn = [('HiddenServiceDir', str(self.dir))]
if self.conf._supports['HiddenServiceDirGroupReadable'] and self.group_readable:
rtn.append(('HiddenServiceDirGroupReadable', str(1))) # depends on [control=['if'], data=[]]
for port in self.ports:
rtn.append(('HiddenServicePort', str(port))) # depends on [control=['for'], data=['port']]
if self.version:
rtn.append(('HiddenServiceVersion', str(self.version))) # depends on [control=['if'], data=[]]
for authline in self.authorize_client:
rtn.append(('HiddenServiceAuthorizeClient', str(authline))) # depends on [control=['for'], data=['authline']]
return rtn |
def graph(data):
"""Draws graph of rating vs episode number"""
title = data['name'] + ' (' + data['rating'] + ') '
plt.title(title)
plt.xlabel('Episode Number')
plt.ylabel('Ratings')
rf,ef=graphdata(data)
col=['red', 'green' , 'orange']
for i in range(len(rf)):
x,y=ef[i],rf[i]
k = i + 1
plt.plot(x, y,color=col[i%3])
x1, x2, y1, y2 = plt.axis()
y2 = 10
if y1 > 7:
y1 = 7
plt.axis([x1, x2, y1, y2])
plt.show() | def function[graph, parameter[data]]:
constant[Draws graph of rating vs episode number]
variable[title] assign[=] binary_operation[binary_operation[binary_operation[call[name[data]][constant[name]] + constant[ (]] + call[name[data]][constant[rating]]] + constant[) ]]
call[name[plt].title, parameter[name[title]]]
call[name[plt].xlabel, parameter[constant[Episode Number]]]
call[name[plt].ylabel, parameter[constant[Ratings]]]
<ast.Tuple object at 0x7da18c4cfd60> assign[=] call[name[graphdata], parameter[name[data]]]
variable[col] assign[=] list[[<ast.Constant object at 0x7da18c4cc4c0>, <ast.Constant object at 0x7da18c4ce8c0>, <ast.Constant object at 0x7da18c4cc850>]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[rf]]]]]] begin[:]
<ast.Tuple object at 0x7da18c4cfbb0> assign[=] tuple[[<ast.Subscript object at 0x7da2044c0850>, <ast.Subscript object at 0x7da2044c15a0>]]
variable[k] assign[=] binary_operation[name[i] + constant[1]]
call[name[plt].plot, parameter[name[x], name[y]]]
<ast.Tuple object at 0x7da2044c1270> assign[=] call[name[plt].axis, parameter[]]
variable[y2] assign[=] constant[10]
if compare[name[y1] greater[>] constant[7]] begin[:]
variable[y1] assign[=] constant[7]
call[name[plt].axis, parameter[list[[<ast.Name object at 0x7da18eb57340>, <ast.Name object at 0x7da18eb56ad0>, <ast.Name object at 0x7da18eb55c30>, <ast.Name object at 0x7da18eb54b50>]]]]
call[name[plt].show, parameter[]] | keyword[def] identifier[graph] ( identifier[data] ):
literal[string]
identifier[title] = identifier[data] [ literal[string] ]+ literal[string] + identifier[data] [ literal[string] ]+ literal[string]
identifier[plt] . identifier[title] ( identifier[title] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[rf] , identifier[ef] = identifier[graphdata] ( identifier[data] )
identifier[col] =[ literal[string] , literal[string] , literal[string] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[rf] )):
identifier[x] , identifier[y] = identifier[ef] [ identifier[i] ], identifier[rf] [ identifier[i] ]
identifier[k] = identifier[i] + literal[int]
identifier[plt] . identifier[plot] ( identifier[x] , identifier[y] , identifier[color] = identifier[col] [ identifier[i] % literal[int] ])
identifier[x1] , identifier[x2] , identifier[y1] , identifier[y2] = identifier[plt] . identifier[axis] ()
identifier[y2] = literal[int]
keyword[if] identifier[y1] > literal[int] :
identifier[y1] = literal[int]
identifier[plt] . identifier[axis] ([ identifier[x1] , identifier[x2] , identifier[y1] , identifier[y2] ])
identifier[plt] . identifier[show] () | def graph(data):
"""Draws graph of rating vs episode number"""
title = data['name'] + ' (' + data['rating'] + ') '
plt.title(title)
plt.xlabel('Episode Number')
plt.ylabel('Ratings')
(rf, ef) = graphdata(data)
col = ['red', 'green', 'orange']
for i in range(len(rf)):
(x, y) = (ef[i], rf[i])
k = i + 1
plt.plot(x, y, color=col[i % 3]) # depends on [control=['for'], data=['i']]
(x1, x2, y1, y2) = plt.axis()
y2 = 10
if y1 > 7:
y1 = 7 # depends on [control=['if'], data=['y1']]
plt.axis([x1, x2, y1, y2])
plt.show() |
def ddp_frames_from_message(self, message):
"""Yield DDP messages from a raw WebSocket message."""
# parse message set
try:
msgs = ejson.loads(message)
except ValueError:
self.reply(
'error', error=400, reason='Data is not valid EJSON',
)
raise StopIteration
if not isinstance(msgs, list):
self.reply(
'error', error=400, reason='Invalid EJSON messages',
)
raise StopIteration
# process individual messages
while msgs:
# pop raw message from the list
raw = msgs.pop(0)
# parse message payload
try:
data = ejson.loads(raw)
except (TypeError, ValueError):
data = None
if not isinstance(data, dict):
self.reply(
'error', error=400,
reason='Invalid SockJS DDP payload',
offendingMessage=raw,
)
yield data
if msgs:
# yield to other greenlets before processing next msg
gevent.sleep() | def function[ddp_frames_from_message, parameter[self, message]]:
constant[Yield DDP messages from a raw WebSocket message.]
<ast.Try object at 0x7da1b26ae290>
if <ast.UnaryOp object at 0x7da1b26afd60> begin[:]
call[name[self].reply, parameter[constant[error]]]
<ast.Raise object at 0x7da1b26ad4e0>
while name[msgs] begin[:]
variable[raw] assign[=] call[name[msgs].pop, parameter[constant[0]]]
<ast.Try object at 0x7da1b26ad9c0>
if <ast.UnaryOp object at 0x7da20cabedd0> begin[:]
call[name[self].reply, parameter[constant[error]]]
<ast.Yield object at 0x7da20cabf490>
if name[msgs] begin[:]
call[name[gevent].sleep, parameter[]] | keyword[def] identifier[ddp_frames_from_message] ( identifier[self] , identifier[message] ):
literal[string]
keyword[try] :
identifier[msgs] = identifier[ejson] . identifier[loads] ( identifier[message] )
keyword[except] identifier[ValueError] :
identifier[self] . identifier[reply] (
literal[string] , identifier[error] = literal[int] , identifier[reason] = literal[string] ,
)
keyword[raise] identifier[StopIteration]
keyword[if] keyword[not] identifier[isinstance] ( identifier[msgs] , identifier[list] ):
identifier[self] . identifier[reply] (
literal[string] , identifier[error] = literal[int] , identifier[reason] = literal[string] ,
)
keyword[raise] identifier[StopIteration]
keyword[while] identifier[msgs] :
identifier[raw] = identifier[msgs] . identifier[pop] ( literal[int] )
keyword[try] :
identifier[data] = identifier[ejson] . identifier[loads] ( identifier[raw] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
identifier[data] = keyword[None]
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[dict] ):
identifier[self] . identifier[reply] (
literal[string] , identifier[error] = literal[int] ,
identifier[reason] = literal[string] ,
identifier[offendingMessage] = identifier[raw] ,
)
keyword[yield] identifier[data]
keyword[if] identifier[msgs] :
identifier[gevent] . identifier[sleep] () | def ddp_frames_from_message(self, message):
"""Yield DDP messages from a raw WebSocket message."""
# parse message set
try:
msgs = ejson.loads(message) # depends on [control=['try'], data=[]]
except ValueError:
self.reply('error', error=400, reason='Data is not valid EJSON')
raise StopIteration # depends on [control=['except'], data=[]]
if not isinstance(msgs, list):
self.reply('error', error=400, reason='Invalid EJSON messages')
raise StopIteration # depends on [control=['if'], data=[]]
# process individual messages
while msgs:
# pop raw message from the list
raw = msgs.pop(0)
# parse message payload
try:
data = ejson.loads(raw) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
data = None # depends on [control=['except'], data=[]]
if not isinstance(data, dict):
self.reply('error', error=400, reason='Invalid SockJS DDP payload', offendingMessage=raw) # depends on [control=['if'], data=[]]
yield data
if msgs:
# yield to other greenlets before processing next msg
gevent.sleep() # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def quote_as2name(unquoted_name):
"""
Function converts as2 name from unquoted to quoted format
:param unquoted_name: the as2 name in unquoted format
:return: the as2 name in unquoted format
"""
if re.search(r'[\\" ]', unquoted_name, re.M):
return '"' + email.utils.quote(unquoted_name) + '"'
else:
return unquoted_name | def function[quote_as2name, parameter[unquoted_name]]:
constant[
Function converts as2 name from unquoted to quoted format
:param unquoted_name: the as2 name in unquoted format
:return: the as2 name in unquoted format
]
if call[name[re].search, parameter[constant[[\\" ]], name[unquoted_name], name[re].M]] begin[:]
return[binary_operation[binary_operation[constant["] + call[name[email].utils.quote, parameter[name[unquoted_name]]]] + constant["]]] | keyword[def] identifier[quote_as2name] ( identifier[unquoted_name] ):
literal[string]
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[unquoted_name] , identifier[re] . identifier[M] ):
keyword[return] literal[string] + identifier[email] . identifier[utils] . identifier[quote] ( identifier[unquoted_name] )+ literal[string]
keyword[else] :
keyword[return] identifier[unquoted_name] | def quote_as2name(unquoted_name):
"""
Function converts as2 name from unquoted to quoted format
:param unquoted_name: the as2 name in unquoted format
:return: the as2 name in unquoted format
"""
if re.search('[\\\\" ]', unquoted_name, re.M):
return '"' + email.utils.quote(unquoted_name) + '"' # depends on [control=['if'], data=[]]
else:
return unquoted_name |
def egress(self, envelope, http_headers, operation, binding_options):
"""Overrides the egress function ror request logging.
Args:
envelope: An Element with the SOAP request data.
http_headers: A dict of the current http headers.
operation: The SoapOperation instance.
binding_options: An options dict for the SOAP binding.
Returns:
A tuple of the envelope and headers.
"""
if self._logger.isEnabledFor(logging.INFO):
service_name = operation.binding.wsdl.services.keys()[0]
self._logger.info(_REQUEST_LOG_LINE, service_name, operation.name,
binding_options['address'])
if self._logger.isEnabledFor(logging.DEBUG):
http_headers_safe = http_headers.copy()
if self._AUTHORIZATION_HEADER in http_headers_safe:
http_headers_safe[self._AUTHORIZATION_HEADER] = self._REDACTED
request_string = etree.tostring(envelope, pretty_print=True)
safe_request = self._DEVELOPER_TOKEN_SUB.sub(
self._REDACTED, request_string.decode('utf-8'))
self._logger.debug(
_REQUEST_XML_LOG_LINE, http_headers_safe, safe_request)
return envelope, http_headers | def function[egress, parameter[self, envelope, http_headers, operation, binding_options]]:
constant[Overrides the egress function ror request logging.
Args:
envelope: An Element with the SOAP request data.
http_headers: A dict of the current http headers.
operation: The SoapOperation instance.
binding_options: An options dict for the SOAP binding.
Returns:
A tuple of the envelope and headers.
]
if call[name[self]._logger.isEnabledFor, parameter[name[logging].INFO]] begin[:]
variable[service_name] assign[=] call[call[name[operation].binding.wsdl.services.keys, parameter[]]][constant[0]]
call[name[self]._logger.info, parameter[name[_REQUEST_LOG_LINE], name[service_name], name[operation].name, call[name[binding_options]][constant[address]]]]
if call[name[self]._logger.isEnabledFor, parameter[name[logging].DEBUG]] begin[:]
variable[http_headers_safe] assign[=] call[name[http_headers].copy, parameter[]]
if compare[name[self]._AUTHORIZATION_HEADER in name[http_headers_safe]] begin[:]
call[name[http_headers_safe]][name[self]._AUTHORIZATION_HEADER] assign[=] name[self]._REDACTED
variable[request_string] assign[=] call[name[etree].tostring, parameter[name[envelope]]]
variable[safe_request] assign[=] call[name[self]._DEVELOPER_TOKEN_SUB.sub, parameter[name[self]._REDACTED, call[name[request_string].decode, parameter[constant[utf-8]]]]]
call[name[self]._logger.debug, parameter[name[_REQUEST_XML_LOG_LINE], name[http_headers_safe], name[safe_request]]]
return[tuple[[<ast.Name object at 0x7da1b1b0db40>, <ast.Name object at 0x7da1b1b0dc30>]]] | keyword[def] identifier[egress] ( identifier[self] , identifier[envelope] , identifier[http_headers] , identifier[operation] , identifier[binding_options] ):
literal[string]
keyword[if] identifier[self] . identifier[_logger] . identifier[isEnabledFor] ( identifier[logging] . identifier[INFO] ):
identifier[service_name] = identifier[operation] . identifier[binding] . identifier[wsdl] . identifier[services] . identifier[keys] ()[ literal[int] ]
identifier[self] . identifier[_logger] . identifier[info] ( identifier[_REQUEST_LOG_LINE] , identifier[service_name] , identifier[operation] . identifier[name] ,
identifier[binding_options] [ literal[string] ])
keyword[if] identifier[self] . identifier[_logger] . identifier[isEnabledFor] ( identifier[logging] . identifier[DEBUG] ):
identifier[http_headers_safe] = identifier[http_headers] . identifier[copy] ()
keyword[if] identifier[self] . identifier[_AUTHORIZATION_HEADER] keyword[in] identifier[http_headers_safe] :
identifier[http_headers_safe] [ identifier[self] . identifier[_AUTHORIZATION_HEADER] ]= identifier[self] . identifier[_REDACTED]
identifier[request_string] = identifier[etree] . identifier[tostring] ( identifier[envelope] , identifier[pretty_print] = keyword[True] )
identifier[safe_request] = identifier[self] . identifier[_DEVELOPER_TOKEN_SUB] . identifier[sub] (
identifier[self] . identifier[_REDACTED] , identifier[request_string] . identifier[decode] ( literal[string] ))
identifier[self] . identifier[_logger] . identifier[debug] (
identifier[_REQUEST_XML_LOG_LINE] , identifier[http_headers_safe] , identifier[safe_request] )
keyword[return] identifier[envelope] , identifier[http_headers] | def egress(self, envelope, http_headers, operation, binding_options):
"""Overrides the egress function ror request logging.
Args:
envelope: An Element with the SOAP request data.
http_headers: A dict of the current http headers.
operation: The SoapOperation instance.
binding_options: An options dict for the SOAP binding.
Returns:
A tuple of the envelope and headers.
"""
if self._logger.isEnabledFor(logging.INFO):
service_name = operation.binding.wsdl.services.keys()[0]
self._logger.info(_REQUEST_LOG_LINE, service_name, operation.name, binding_options['address']) # depends on [control=['if'], data=[]]
if self._logger.isEnabledFor(logging.DEBUG):
http_headers_safe = http_headers.copy()
if self._AUTHORIZATION_HEADER in http_headers_safe:
http_headers_safe[self._AUTHORIZATION_HEADER] = self._REDACTED # depends on [control=['if'], data=['http_headers_safe']]
request_string = etree.tostring(envelope, pretty_print=True)
safe_request = self._DEVELOPER_TOKEN_SUB.sub(self._REDACTED, request_string.decode('utf-8'))
self._logger.debug(_REQUEST_XML_LOG_LINE, http_headers_safe, safe_request) # depends on [control=['if'], data=[]]
return (envelope, http_headers) |
def emojis(self):
"""Retrieves a dictionary of all of the emojis that GitHub supports.
:returns: dictionary where the key is what would be in between the
colons and the value is the URL to the image, e.g., ::
{
'+1': 'https://github.global.ssl.fastly.net/images/...',
# ...
}
"""
url = self._build_url('emojis')
return self._json(self._get(url), 200) | def function[emojis, parameter[self]]:
constant[Retrieves a dictionary of all of the emojis that GitHub supports.
:returns: dictionary where the key is what would be in between the
colons and the value is the URL to the image, e.g., ::
{
'+1': 'https://github.global.ssl.fastly.net/images/...',
# ...
}
]
variable[url] assign[=] call[name[self]._build_url, parameter[constant[emojis]]]
return[call[name[self]._json, parameter[call[name[self]._get, parameter[name[url]]], constant[200]]]] | keyword[def] identifier[emojis] ( identifier[self] ):
literal[string]
identifier[url] = identifier[self] . identifier[_build_url] ( literal[string] )
keyword[return] identifier[self] . identifier[_json] ( identifier[self] . identifier[_get] ( identifier[url] ), literal[int] ) | def emojis(self):
"""Retrieves a dictionary of all of the emojis that GitHub supports.
:returns: dictionary where the key is what would be in between the
colons and the value is the URL to the image, e.g., ::
{
'+1': 'https://github.global.ssl.fastly.net/images/...',
# ...
}
"""
url = self._build_url('emojis')
return self._json(self._get(url), 200) |
def login_required(func):
"""
A wrapper around the flask_login.login_required.
But it also checks the presence of the decorator: @no_login_required
On a "@login_required" class, method containing "@no_login_required" will
still be able to access without authentication
:param func:
:return:
"""
@functools.wraps(func)
def decorated_view(*args, **kwargs):
if "no_login_required" not in ext.utils.get_decorators_list(func) \
and ext.user_not_authenticated():
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorated_view | def function[login_required, parameter[func]]:
constant[
A wrapper around the flask_login.login_required.
But it also checks the presence of the decorator: @no_login_required
On a "@login_required" class, method containing "@no_login_required" will
still be able to access without authentication
:param func:
:return:
]
def function[decorated_view, parameter[]]:
if <ast.BoolOp object at 0x7da1b1fa8f70> begin[:]
return[call[name[current_app].login_manager.unauthorized, parameter[]]]
return[call[name[func], parameter[<ast.Starred object at 0x7da1b1faa6e0>]]]
return[name[decorated_view]] | keyword[def] identifier[login_required] ( identifier[func] ):
literal[string]
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[decorated_view] (* identifier[args] ,** identifier[kwargs] ):
keyword[if] literal[string] keyword[not] keyword[in] identifier[ext] . identifier[utils] . identifier[get_decorators_list] ( identifier[func] ) keyword[and] identifier[ext] . identifier[user_not_authenticated] ():
keyword[return] identifier[current_app] . identifier[login_manager] . identifier[unauthorized] ()
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[decorated_view] | def login_required(func):
"""
A wrapper around the flask_login.login_required.
But it also checks the presence of the decorator: @no_login_required
On a "@login_required" class, method containing "@no_login_required" will
still be able to access without authentication
:param func:
:return:
"""
@functools.wraps(func)
def decorated_view(*args, **kwargs):
if 'no_login_required' not in ext.utils.get_decorators_list(func) and ext.user_not_authenticated():
return current_app.login_manager.unauthorized() # depends on [control=['if'], data=[]]
return func(*args, **kwargs)
return decorated_view |
def _proper_namespace(self, owner=None, app=None, sharing=None):
"""Produce a namespace sans wildcards for use in entity requests.
This method tries to fill in the fields of the namespace which are `None`
or wildcard (`'-'`) from the entity's namespace. If that fails, it uses
the service's namespace.
:param owner:
:param app:
:param sharing:
:return:
"""
if owner is None and app is None and sharing is None: # No namespace provided
if self._state is not None and 'access' in self._state:
return (self._state.access.owner,
self._state.access.app,
self._state.access.sharing)
else:
return (self.service.namespace['owner'],
self.service.namespace['app'],
self.service.namespace['sharing'])
else:
return (owner,app,sharing) | def function[_proper_namespace, parameter[self, owner, app, sharing]]:
constant[Produce a namespace sans wildcards for use in entity requests.
This method tries to fill in the fields of the namespace which are `None`
or wildcard (`'-'`) from the entity's namespace. If that fails, it uses
the service's namespace.
:param owner:
:param app:
:param sharing:
:return:
]
if <ast.BoolOp object at 0x7da1b18ac910> begin[:]
if <ast.BoolOp object at 0x7da1b18ada50> begin[:]
return[tuple[[<ast.Attribute object at 0x7da1b18af400>, <ast.Attribute object at 0x7da1b1950a00>, <ast.Attribute object at 0x7da1b1953a00>]]] | keyword[def] identifier[_proper_namespace] ( identifier[self] , identifier[owner] = keyword[None] , identifier[app] = keyword[None] , identifier[sharing] = keyword[None] ):
literal[string]
keyword[if] identifier[owner] keyword[is] keyword[None] keyword[and] identifier[app] keyword[is] keyword[None] keyword[and] identifier[sharing] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[_state] keyword[is] keyword[not] keyword[None] keyword[and] literal[string] keyword[in] identifier[self] . identifier[_state] :
keyword[return] ( identifier[self] . identifier[_state] . identifier[access] . identifier[owner] ,
identifier[self] . identifier[_state] . identifier[access] . identifier[app] ,
identifier[self] . identifier[_state] . identifier[access] . identifier[sharing] )
keyword[else] :
keyword[return] ( identifier[self] . identifier[service] . identifier[namespace] [ literal[string] ],
identifier[self] . identifier[service] . identifier[namespace] [ literal[string] ],
identifier[self] . identifier[service] . identifier[namespace] [ literal[string] ])
keyword[else] :
keyword[return] ( identifier[owner] , identifier[app] , identifier[sharing] ) | def _proper_namespace(self, owner=None, app=None, sharing=None):
"""Produce a namespace sans wildcards for use in entity requests.
This method tries to fill in the fields of the namespace which are `None`
or wildcard (`'-'`) from the entity's namespace. If that fails, it uses
the service's namespace.
:param owner:
:param app:
:param sharing:
:return:
"""
if owner is None and app is None and (sharing is None): # No namespace provided
if self._state is not None and 'access' in self._state:
return (self._state.access.owner, self._state.access.app, self._state.access.sharing) # depends on [control=['if'], data=[]]
else:
return (self.service.namespace['owner'], self.service.namespace['app'], self.service.namespace['sharing']) # depends on [control=['if'], data=[]]
else:
return (owner, app, sharing) |
def wait_for_host(self, host):
"""Throttle requests to one host."""
t = time.time()
if host in self.times:
due_time = self.times[host]
if due_time > t:
wait = due_time - t
time.sleep(wait)
t = time.time()
wait_time = random.uniform(self.wait_time_min, self.wait_time_max)
self.times[host] = t + wait_time | def function[wait_for_host, parameter[self, host]]:
constant[Throttle requests to one host.]
variable[t] assign[=] call[name[time].time, parameter[]]
if compare[name[host] in name[self].times] begin[:]
variable[due_time] assign[=] call[name[self].times][name[host]]
if compare[name[due_time] greater[>] name[t]] begin[:]
variable[wait] assign[=] binary_operation[name[due_time] - name[t]]
call[name[time].sleep, parameter[name[wait]]]
variable[t] assign[=] call[name[time].time, parameter[]]
variable[wait_time] assign[=] call[name[random].uniform, parameter[name[self].wait_time_min, name[self].wait_time_max]]
call[name[self].times][name[host]] assign[=] binary_operation[name[t] + name[wait_time]] | keyword[def] identifier[wait_for_host] ( identifier[self] , identifier[host] ):
literal[string]
identifier[t] = identifier[time] . identifier[time] ()
keyword[if] identifier[host] keyword[in] identifier[self] . identifier[times] :
identifier[due_time] = identifier[self] . identifier[times] [ identifier[host] ]
keyword[if] identifier[due_time] > identifier[t] :
identifier[wait] = identifier[due_time] - identifier[t]
identifier[time] . identifier[sleep] ( identifier[wait] )
identifier[t] = identifier[time] . identifier[time] ()
identifier[wait_time] = identifier[random] . identifier[uniform] ( identifier[self] . identifier[wait_time_min] , identifier[self] . identifier[wait_time_max] )
identifier[self] . identifier[times] [ identifier[host] ]= identifier[t] + identifier[wait_time] | def wait_for_host(self, host):
"""Throttle requests to one host."""
t = time.time()
if host in self.times:
due_time = self.times[host]
if due_time > t:
wait = due_time - t
time.sleep(wait)
t = time.time() # depends on [control=['if'], data=['due_time', 't']] # depends on [control=['if'], data=['host']]
wait_time = random.uniform(self.wait_time_min, self.wait_time_max)
self.times[host] = t + wait_time |
def get_queryset(self, raw_query=False):
# Call the base implementation
if not self.haystack:
queryset = super(GenList, self).get_queryset()
else:
queryset = SearchQuerySet().models(self.model)
# Optional tweak methods
Mfields = None
MlimitQ = None
MsearchF = None
MsearchQ = None
if hasattr(self, '__fields__'):
Mfields = self.__fields__
if hasattr(self, '__limitQ__'):
MlimitQ = self.__limitQ__
if hasattr(self, '__searchF__'):
MsearchF = self.__searchF__
if hasattr(self, '__searchQ__'):
MsearchQ = self.__searchQ__
self._viewname = self.__module__
# Link to our context and kwargs
context = self.__context
# Update kwargs if json key is present
jsonquerytxt = self.request.GET.get('json', self.request.POST.get('json', None))
if jsonquerytxt is not None:
# Decode json
try:
jsonquery = json.loads(jsonquerytxt)
except json.JSONDecodeError as e:
raise IOError("json argument in your GET/POST parameters is not a valid JSON string")
# Set json context
jsondata = self.set_context_json(jsonquery)
# Get listid
listid = jsondata.pop('listid')
# Get elementid
elementid = jsondata.pop('elementid')
else:
listid = None
elementid = None
jsondata = {}
jsonquery = {}
# Build info for GenModel methods
MODELINF = MODELINFO(self.model, self._appname, self._modelname, self._viewname, self.request, self.user, self.profile, jsonquery, Mfields, MlimitQ, MsearchF, MsearchQ, listid, elementid, self.__kwargs)
# Process the filter
context['filters'] = []
context['filters_obj'] = {}
# Get field list
fields = getattr(self, 'fields', MODELINF.fields())
# Save GET values
context['get'] = []
context['getval'] = {}
for name in jsondata:
struct = {}
struct['name'] = name
if name == 'rowsperpage':
struct['value'] = self.default_rows_per_page
elif name == 'page':
struct['value'] = 1
elif name == 'pages_to_bring':
struct['value'] = 1
else:
struct['value'] = jsondata[name]
context['get'].append(struct)
context['getval'][name] = struct['value']
# Filter on limits
limits = MODELINF.limitQ()
qobjects = None
distinct = False
for name in limits:
if name == 'i_distinct' or name == 'e_distinct':
distinct = True
else:
if qobjects:
qobjects &= limits[name]
else:
qobjects = limits[name]
if qobjects:
queryset = queryset.filter(qobjects)
if hasattr(self, 'annotations'):
if not self.haystack:
# Prepare annotations
if callable(self.annotations):
anot = self.annotations(MODELINF)
else:
anot = self.annotations
# Set annotations
queryset = queryset.annotate(**anot)
else:
raise IOError("Haystack doesn't support annotate")
if distinct:
queryset = queryset.distinct()
# Filters on fields requested by the user request
try:
filters_get = jsondata.get('filters', '{}')
if type(filters_get) == dict:
filters_by_struct = filters_get
else:
filters_by_struct = json.loads(str(filters_get))
except Exception:
filters_by_struct = []
listfilters = {}
# Autofilter system
if self.autofiltering:
listfilters.update(self.autoSearchF(MODELINF))
# List of filters from the MODELINF
listfilters.update(MODELINF.searchF())
# Process the search
filters_struct = {}
for key in filters_by_struct:
# Get the value of the original filter
value = filters_by_struct[key]
# If there is something to filter, filter is not being changed and filter is known by the class
try:
value = int(value)
except ValueError:
pass
except TypeError:
pass
# ORIG if (key in listfilters) and ((value>0) or (type(value) == list)):
# V1 if (value and type(value) == int and key in listfilters) and ((value > 0) or (type(value) == list)):
# V2 if (value and type(value) == int and key in listfilters) or ((value > 0) or (type(value) == list)):
if value and key in listfilters:
# Add the filter to the queryset
rule = listfilters[key]
# Get type
typekind = rule[2]
if type(typekind) == list:
# Compatibility: set typekind and fv in the old fassion
if type(value) == int:
fv = typekind[value - 1][0]
queryset = queryset.filter(rule[1](fv))
typekind = 'select'
elif typekind == 'select':
# Get selected value from rule
if type(value) == int:
fv = rule[3][value - 1][0]
queryset = queryset.filter(rule[1](fv))
elif typekind in ['multiselect', 'multidynamicselect']:
# Get selected values from rule
if type(value) in (list, tuple) and len(value):
qobjects = Q(rule[1](value[0]))
for fvt in value[1:]:
qobjects |= Q(rule[1](fvt))
queryset = queryset.filter(qobjects)
elif typekind in ['daterange', 'input']:
# No arguments
fv = value
queryset = queryset.filter(rule[1](fv))
elif typekind in ['checkbox', ]:
fv = value
queryset = queryset.filter(rule[1](fv))
else:
raise IOError("Wrong typekind '{0}' for filter '{1}'".format(typekind, key))
# Save it in the struct as a valid filter
filters_struct[key] = value
# Rewrite filters_json updated
filters_json = json.dumps(filters_struct)
# Build the clean get for filters
get = context['get']
filters_get = []
for element in get:
if element['name'] not in ['filters']:
struct = {}
struct['name'] = element['name']
struct['value'] = element['value']
filters_get.append(struct)
# Add filter_json
struct = {}
struct['name'] = 'filters'
struct['value'] = filters_json
filters_get.append(struct)
context['filters_get'] = filters_get
# Get the list of filters allowed by this class
filters = []
for key in listfilters:
typekind = listfilters[key][2]
if type(typekind) == list:
# Compatibility: set typekind and fv in the old fassion
choice = [_('All')]
for value in typekind:
choice.append(value[1])
# Decide the choosen field
if key in filters_struct.keys():
value = int(filters_struct[key])
else:
value = 0
typekind = 'select'
argument = choice
elif typekind == 'select':
typevalue = listfilters[key][3]
choice = [_('All')]
for value in typevalue:
choice.append(value[1])
# Decide the choosen field
if key in filters_struct.keys():
value = int(filters_struct[key])
else:
value = 0
# Set choice as the command's argument
argument = choice
elif typekind in ['multiselect', 'multidynamicselect']:
if typekind == 'multiselect':
typevalue = listfilters[key][3]
choice = []
for value in typevalue:
choice.append({'id': value[0], 'label': value[1]})
else:
choice = list(listfilters[key][3:])
choice[1] = reverse_lazy(choice[1], kwargs={'search': 'a'})[:-1]
# Decide the choosen field
if key in filters_struct.keys():
value = filters_struct[key]
else:
value = []
# Set choice as the command's argument
argument = choice
elif typekind in ['daterange', 'input']:
# Commands withouth arguments
argument = None
# Get the selected value
if key in filters_struct.keys():
value = filters_struct[key]
else:
value = None
elif typekind in ['checkbox']:
# Commands withouth arguments
argument = None
# Get the selected value
if key in filters_struct.keys():
value = filters_struct[key]
else:
value = None
else:
raise IOError("Wrong typekind '{0}' for filter '{1}'".format(typekind, key))
# Build filtertuple
filtertuple = (key, listfilters[key][0], typekind, argument, value)
# Save this filter in the corresponding list
filters.append(filtertuple)
# Save all filters
context['filters'] = filters
# Search filter button
search_filter_button = jsondata.get('search_filter_button', None)
if search_filter_button is not None:
self.search_filter_button = search_filter_button
# Search text in all fields
search = jsondata.get('search', '').lower()
# Remove extra spaces
newlen = len(search)
oldlen = 0
while newlen != oldlen:
oldlen = newlen
search = search.replace(" ", " ")
newlen = len(search)
if len(search) > 0 and search[0] == ' ':
search = search[1:]
if len(search) > 0 and search[-1] == ' ':
search = search[:-1]
# Save in context
context['search'] = search
datetimeQ = None
if len(search) > 0:
# Get ID
tid = None
if 'id:' in search:
tid = search.split(":")[1].split(" ")[0]
# Decide if it is what we expect
try:
tid = int(tid)
except Exception:
tid = None
# Remove the token
if tid:
search = search.replace("id:%s" % (tid), '')
search = search.replace(" ", " ")
# Get PK
tpk = None
if 'pk:' in search:
tpk = search.split(":")[1].split(" ")[0]
# Decide if it is what we expect
try:
tpk = int(tpk)
except Exception:
tpk = None
# Remove the token
if tpk:
search = search.replace("pk:%s" % (tpk), '')
search = search.replace(" ", " ")
# Spaces on front and behind
if len(search) > 0 and search[0] == ' ':
search = search[1:]
if len(search) > 0 and search[-1] == ' ':
search = search[:-1]
searchs = {}
# Autofilter system
if self.autofiltering:
searchs.update(self.autoSearchQ(MODELINF, search))
# Fields to search in from the MODELINF
tmp_search = MODELINF.searchQ(search)
if type(tmp_search) == dict:
searchs.update(tmp_search)
else:
searchs['autoSearchQ'] &= tmp_search
qobjects = {}
qobjectsCustom = {}
for name in searchs:
# Extract the token
qtoken = searchs[name]
if qtoken == 'datetime':
# If it is a datetime
datetimeQ = name
continue
elif (type(qtoken) == str) or (type(qtoken) == list):
# Prepare query
if type(qtoken) == tuple:
(query, func) = qtoken
else:
def lambdax(x):
return x
func = lambdax
query = qtoken
# If it is a string
if search:
for word in search.split(" "):
# If there is a word to process
if len(word) > 0:
# Build the key for the arguments and set the word as a value for the Q search
if word[0] == '-':
# If negated request
# key="-{}".format(hashlib.md5(word[1:].encode()).hexdigest())
qdict = {'{}'.format(query): func(word[1:])}
qtokens_element = ~Q(**qdict)
else:
# If positive request
# key="-{}".format(hashlib.md5(word[1:].encode()).hexdigest())
qdict = {'{}'.format(query): func(word)}
qtokens_element = Q(**qdict)
# Safe the token
if word in qobjects:
qobjects[word].append(qtokens_element)
else:
qobjects[word] = [qtokens_element]
else:
if qobjectsCustom:
qobjectsCustom |= searchs[name]
else:
qobjectsCustom = searchs[name]
# Build positive/negative
qdata = None
if search and qobjects:
for word in search.split(" "):
if word.split(":")[0] not in ['id', 'pk']:
if word[0] == '-':
negative = True
else:
negative = False
qword = None
for token in qobjects[word]:
if qword:
if negative:
qword &= token
else:
qword |= token
else:
qword = token
if qword:
if qdata:
qdata &= qword
else:
qdata = qword
# Process ID/PK specific searches
if tid:
queryset = queryset.filter(id=tid)
if tpk:
queryset = queryset.filter(pk=tpk)
# Add custom Q-objects
if qobjectsCustom:
queryset = queryset.filter(qobjectsCustom)
# Add word by word search Q-objects
if qdata:
queryset = queryset.filter(qdata)
else:
# Look for datetimeQ field
searchs = MODELINF.searchQ(search)
for name in searchs:
if (searchs[name] == 'datetime'):
datetimeQ = name
continue
# Datetime Q
context['datetimeQ'] = datetimeQ
if datetimeQ:
# Inicialization
f = {}
f['year'] = (1900, 2100, False)
f['month'] = (1, 12, False)
f['day'] = (1, 31, False)
f['hour'] = (0, 23, False)
f['minute'] = (0, 59, False)
f['second'] = (0, 59, False)
date_elements = [None, 'year', 'month', 'day', 'hour', 'minute', 'second']
# Get configuration of dates and set limits to the queryset
for element in date_elements[1:]:
value = jsondata.get(element, None)
if value:
f[element] = (int(value), int(value), True)
if f['year'][2] and f['month'][2] and not f['day'][2]:
(g, lastday) = calendar.monthrange(f['year'][1], f['month'][1])
f['day'] = (f['day'][0], lastday, f['day'][2])
# Limits
date_min = datetime.datetime(f['year'][0], f['month'][0], f['day'][0], f['hour'][0], f['minute'][0], f['second'][0])
date_max = datetime.datetime(f['year'][1], f['month'][1], f['day'][1], f['hour'][1], f['minute'][1], f['second'][1])
qarg1 = {"{}__gte".format(datetimeQ): date_min}
qarg2 = {"{}__lte".format(datetimeQ): date_max}
qarg3 = {datetimeQ: None}
queryset = queryset.filter((Q(**qarg1) & Q(**qarg2)) | Q(**qarg3))
# Find actual deepness
deepness_index = 0
for element in date_elements[1:]:
if f[element][2]:
deepness_index += 1
else:
break
# Get results from dates to set the new order
exclusion = {}
exclusion[datetimeQ] = None
date_results = queryset.exclude(**exclusion).values_list(datetimeQ, flat=True)
# Remove empty results (usefull when the date is allowed to be empty)
if f['day'][0] != f['day'][1]:
if f['month'][0] == f['month'][1]:
date_results = date_results.datetimes(datetimeQ, 'day')
elif f['year'][0] == f['year'][1]:
date_results = date_results.datetimes(datetimeQ, 'month')
else:
date_results = date_results.datetimes(datetimeQ, 'year')
get = context['get']
context['datefilter'] = {}
# Save the deepness
if deepness_index + 1 == len(date_elements):
context['datefilter']['deepness'] = None
else:
context['datefilter']['deepness'] = date_elements[deepness_index + 1]
context['datefilter']['deepnessback'] = []
context['datefilter']['deepnessinit'] = []
for element in get:
if (not element['name'] in date_elements):
struct = {}
struct['name'] = element['name']
struct['value'] = element['value']
context['datefilter']['deepnessinit'].append(struct)
context['datefilter']['deepnessback'].append(struct)
elif (element['name'] != date_elements[deepness_index] and f[element['name']][2]):
struct = {}
struct['name'] = element['name']
struct['value'] = element['value']
context['datefilter']['deepnessback'].append(struct)
# Build the list of elements
context['datefilter']['data'] = []
for element in date_results:
# Save the data
context['datefilter']['data'].append(element.timetuple()[deepness_index])
context['datefilter']['data'] = list(set(context['datefilter']['data']))
context['datefilter']['data'].sort()
# Prepare the rightnow result
if self.json_worker:
rightnow = {}
for key in ['year', 'month', 'day', 'hour', 'minute', 'second']:
rightnow[key] = (f[key][2] and f[key][0]) or None
else:
if f['month'][2]:
month = monthname(f['month'][0])
else:
month = '__'
if f['hour'][2]:
rightnow = string_concat(grv(f, 'day'), "/", month, "/", grv(f, 'year'), " ", grv(f, 'hour'), ":", grv(f, 'minute'), ":", grv(f, 'second'))
else:
rightnow = string_concat(grv(f, 'day'), "/", month, "/", grv(f, 'year'))
context['datefilter']['rightnow'] = rightnow
else:
context['datefilter'] = None
# Distinct
# queryset=queryset.distinct()
# Ordering field autofill
try:
order_get = jsondata.get('ordering', [])
if type(order_get) == list:
order_by_struct = order_get
else:
order_by_struct = json.loads(str(order_get))
except Exception:
order_by_struct = []
order_by = []
position = {}
counter = 1
# Build the columns structure and the fields list
context['columns'] = []
self.__fields = []
for value in fields:
self.__fields.append(value[0])
# Auto build rules
self.__autorules = self.autorules()
for order in order_by_struct:
name = list(order.keys())[0]
lbl = None
# use __autofields for ordering by alias
for field in self.__autorules:
if "{}:".format(name) in field:
name = field.split(":")[0]
lbl = field.split(":")[1]
break
direction = order[name]
if lbl and not lbl.startswith('get_') and not lbl.endswith('_display'):
name = lbl
if direction == 'asc':
order_by.append("%s" % (remove_getdisplay(name)))
elif direction == 'desc':
order_by.append("-%s" % (remove_getdisplay(name)))
position[name] = counter
counter += 1
if order_by:
queryset = queryset.order_by(*order_by)
else:
if hasattr(self, 'default_ordering'):
if type(self.default_ordering) == list:
queryset = queryset.order_by(*self.default_ordering)
else:
queryset = queryset.order_by(self.default_ordering)
else:
queryset = queryset.order_by("pk")
# Ordering field autofill
sort = {}
for value in fields:
# Get values
if value[0]:
name = value[0].split(":")[0]
order_key = name
type_field = self.get_type_field(value[0].split(":")[-1])
else:
name = value[0]
# not usable fields, example: fields.append((None, _('Selector'))) in airportslist
hash_key = hashlib.md5(value[1].encode()).hexdigest()
order_key = "#{}".format(hash_key)
type_field = None
publicname = value[1]
if len(value) > 2:
size = value[2]
else:
size = None
if len(value) > 3:
align = value[3]
else:
align = None
# filter column
if len(value) > 4:
filter_column = value[4]
else:
filter_column = None
# Process ordering
ordering = []
found = False
for order in order_by_struct:
subname = list(order.keys())[0]
direction = order[subname]
if order_key == subname:
if direction == 'desc':
direction = ''
sort_class = 'headerSortUp'
elif direction == 'asc':
direction = 'desc'
sort_class = 'headerSortDown'
else:
sort_class = ''
direction = 'asc'
found = True
if direction == 'asc' or direction == 'desc':
ordering.append({subname: direction})
if not found:
ordering.append({order_key: 'asc'})
sort_class = ''
# Save the ordering method
sort[order_key] = {}
sort[order_key]['id'] = name
sort[order_key]['name'] = publicname
sort[order_key]['align'] = align
sort[order_key]['type'] = type_field
if filter_column:
sort[order_key]['filter'] = filter_column
if jsonquery is None:
sort[order_key]['size'] = size
sort[order_key]['class'] = sort_class
if order_key and order_key[0] != '*':
sort[order_key]['ordering'] = json.dumps(ordering).replace('"', '\\"')
if order_key in position:
sort[order_key]['position'] = position[order_key]
# Save ordering in the context
if jsonquery is not None:
context['ordering'] = order_by_struct
# Build the columns structure and the fields list
context['columns'] = []
for value in fields:
field = value[0]
if field:
context['columns'].append(sort[field.split(":")[0]])
else:
hash_key = hashlib.md5(value[1].encode()).hexdigest()
field = "#{}".format(hash_key)
# selector
context['columns'].append(sort[field])
# Auto build rules
# self.__autorules = self.autorules()
# Columns
self.__columns = ['pk']
# self.__columns = ['id']
self.__foreignkeys = []
for column in self.model._meta.fields:
self.__columns.append(column.name)
if column.is_relation:
self.__foreignkeys.append(column.name)
# Localfields
self.__related_objects = []
for f in self.model._meta.related_objects:
self.__related_objects.append(f.name)
# Model properties
model_properties = self.__columns + self.__related_objects
# === Queryset optimization ===
# Get autorules ordered
autorules_keys = sorted(self.__autorules.keys())
#
query_renamed = {}
query_optimizer = []
query_verifier = []
query_select_related = []
fields_related_model = []
for rule in autorules_keys:
found = False
# name rule origin
rule_org = rule
# If rule is an alias
rulesp = rule.split(":")
if len(rulesp) == 2:
(alias, rule) = rulesp
else:
alias = rule
# If rule has a foreign key path (check first level attributes only, nfrule = no foreign rule)
nfrule = rule.split("__")
do_select_related = False
model = self.model
if len(nfrule) > 1:
ruletmp = []
field_related_model = []
for n in nfrule:
if model:
for fi in model._meta.fields:
if fi.name == n:
found = True
ruletmp.append(n)
if fi.is_relation:
model = fi.related_model
field_related_model.append(fi.name)
else:
do_select_related = True
model = None
break
if not found or model is None:
break
if field_related_model:
fields_related_model.append("__".join(field_related_model))
if ruletmp != nfrule:
do_select_related = False
elif nfrule[0] in [x.name for x in self.model._meta.fields] or nfrule[0] == 'pk':
found = True
for fi in model._meta.fields:
if fi.name == nfrule[0] and fi.is_relation:
fields_related_model.append(nfrule[0])
if not self.haystack and (do_select_related or rule in self.__foreignkeys):
# Compatibility with Django 1.10
if "__" in rule:
query_select_related.append("__".join(rule.split('__')[0:-1]))
else:
query_select_related.append(rule)
nfrule = nfrule[0]
if nfrule in self.__columns:
############################
# dejo comentada la restriccion, si se deja y hay una FK "nunca" usaria .extra ni .value
# no la elimino del todo por si hubiera algun fallo mas adelante,
# y se tuviera que parametrizarse de algun otro modo
############################
# if nfrule not in self.__foreignkeys:
if rule not in fields_related_model:
# Save verifier name
query_verifier.append(rule_org)
# Save renamed field
if alias != rule:
query_renamed[alias] = F(rule)
query_optimizer.append(alias)
else:
# Save final name
query_optimizer.append(rule)
if hasattr(self, 'annotations'):
# Prepare annotations
if callable(self.annotations):
anot = self.annotations(MODELINF)
else:
anot = self.annotations
# Process annotations
for xnfrule in anot.keys():
found = True
if xnfrule not in query_verifier:
query_verifier.append(xnfrule)
query_optimizer.append(xnfrule)
if not found:
query_renamed = {}
query_optimizer = []
query_verifier = []
query_select_related = []
break
for rename in query_renamed.keys():
if rename in model_properties:
if rename in self.__foreignkeys:
msg = "Invalid alias. The alias '{}' is a foreign key from model '{}' inside app '{}'"
elif rename in self.__columns:
msg = "Invalid alias. The alias '{}' is a columns from model '{}' inside app '{}'"
elif rename in self.__related_objects:
msg = "Invalid alias. The alias '{}' is a related object from model '{}' inside app '{}'"
raise Exception(msg.format(rename, self._modelname, self._appname))
if found and query_select_related:
queryset = queryset.select_related(*query_select_related)
# If we got the query_optimizer to optimize everything, use it
# use_extra = False
query_verifier.sort()
autorules_keys.sort()
if found and query_verifier == autorules_keys:
# use_extra = True
if query_renamed:
# queryset=queryset.extra(select=query_renamed).values(*query_optimizer)
queryset = queryset.annotate(**query_renamed).values(*query_optimizer)
else:
queryset = queryset.values(*query_optimizer)
# Custom queryset
if hasattr(self, 'custom_queryset'):
queryset = self.custom_queryset(queryset, MODELINF)
# Internal Codenerix DEBUG for Querysets
"""
raise Exception("FOUND: {} -- __foreignkeys: {} -- __columns: {} -- autorules_keys: {} -- \
query_select_related: {} -- query_renamed: {} -- query_optimizer: {} | use_extra: {}| -- \
query: {} -- meta.fields: {} -- fields_related_model: {} -- query_verifier: {}\
-- ??? {} == {}".format(
found,
self.__foreignkeys, self.__columns, autorules_keys,
query_select_related, query_renamed, query_optimizer,use_extra,
queryset.query,
[x.name for x in self.model._meta.fields],
fields_related_model, query_verifier,
query_verifier.sort(),autorules_keys.sort()
))
#"""
# Check if the user requested to return a raw queryset
if raw_query:
return queryset
else:
# Check the total count of registers + rows per page
total_rows_per_page = jsondata.get('rowsperpage', self.default_rows_per_page)
pages_to_bring = jsondata.get('pages_to_bring', 1)
if total_rows_per_page == 'All' or self.export:
total_rows_per_page = queryset.count()
paginator = Paginator(queryset, total_rows_per_page)
total_registers = paginator.count
# Rows per page
if total_rows_per_page:
try:
total_rows_per_page = int(total_rows_per_page)
except Exception:
total_rows_per_page = 'All'
else:
total_rows_per_page = self.default_rows_per_page
if total_rows_per_page == 'All':
page_number = 1
total_rows_per_page = total_registers
total_rows_per_page_out = _('All')
total_pages = 1
else:
total_rows_per_page = int(total_rows_per_page) # By default 10 rows per page
total_rows_per_page_out = total_rows_per_page
total_pages = int(total_registers / total_rows_per_page)
if total_registers % total_rows_per_page:
total_pages += 1
page_number = jsondata.get('page', 1) # If no page specified use first page
if page_number == 'last':
page_number = total_pages
else:
try:
page_number = int(page_number)
except Exception:
page_number = 1
if page_number < 1:
page_number = 1
if page_number > total_pages:
page_number = total_pages
# Build the list of page counters allowed
choice = {}
c = self.default_rows_per_page
chk = 1
while total_registers >= c:
choice[c] = c
if chk == 1:
# From 5 to 10
c = c * 2
# Next level
chk = 2
elif chk == 2:
# From 10 to 25 (10*2+10/2)
c = c * 2 + int(c / 2)
# Next level
chk = 3
elif chk == 3:
# From 25 to 50
c *= 2
chk = 1
# Don't give a too long choice
if c > 2000:
break
# Add all choice in any case
if settings.ALL_PAGESALLOWED:
choice['All'] = _('All')
# Save the pagination in the structure
context['rowsperpageallowed'] = choice
context['rowsperpage'] = total_rows_per_page_out
context['pages_to_bring'] = pages_to_bring
context['pagenumber'] = page_number
# Get the full number of registers and save it to context
context['total_registers'] = total_registers
if total_rows_per_page == 'All':
# Remove total_rows_per_page if is all
total_rows_per_page = None
context['page_before'] = None
context['page_after'] = None
context['start_register'] = 1
context['showing_registers'] = total_registers
else:
# Page before
if page_number <= 1:
context['page_before'] = None
else:
context['page_before'] = page_number-1
# Page after
if page_number >= total_pages:
context['page_after'] = None
else:
context['page_after'] = page_number+1
# Starting on register number
context['start_register'] = (page_number-1)*total_rows_per_page+1
context['showing_registers'] = total_rows_per_page
# Calculate end
context['end_register'] = min(context['start_register']+context['showing_registers']-1, total_registers)
# Add pagination
regs = []
if paginator.count:
desired_page_number = page_number
try:
range_pages_to_bring = xrange(pages_to_bring)
except NameError:
range_pages_to_bring = range(pages_to_bring)
for p in range_pages_to_bring:
try:
regs += paginator.page(desired_page_number)
desired_page_number += 1
except PageNotAnInteger:
# If page is not an integer, deliver first page.
regs += paginator.page(1)
desired_page_number = 2
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
if pages_to_bring == 1:
regs += paginator.page(paginator.num_pages)
# Leave bucle
break
# Fill pages
if total_registers:
context['pages'] = pages(paginator, page_number)
try:
range_fill = xrange(pages_to_bring-1)
except NameError:
range_fill = range(pages_to_bring-1)
for p in range_fill:
page_number += 1
context['pages'] += pages(paginator, page_number)
else:
context['pages'] = []
# Return queryset
return regs | def function[get_queryset, parameter[self, raw_query]]:
if <ast.UnaryOp object at 0x7da1b0d04ca0> begin[:]
variable[queryset] assign[=] call[call[name[super], parameter[name[GenList], name[self]]].get_queryset, parameter[]]
variable[Mfields] assign[=] constant[None]
variable[MlimitQ] assign[=] constant[None]
variable[MsearchF] assign[=] constant[None]
variable[MsearchQ] assign[=] constant[None]
if call[name[hasattr], parameter[name[self], constant[__fields__]]] begin[:]
variable[Mfields] assign[=] name[self].__fields__
if call[name[hasattr], parameter[name[self], constant[__limitQ__]]] begin[:]
variable[MlimitQ] assign[=] name[self].__limitQ__
if call[name[hasattr], parameter[name[self], constant[__searchF__]]] begin[:]
variable[MsearchF] assign[=] name[self].__searchF__
if call[name[hasattr], parameter[name[self], constant[__searchQ__]]] begin[:]
variable[MsearchQ] assign[=] name[self].__searchQ__
name[self]._viewname assign[=] name[self].__module__
variable[context] assign[=] name[self].__context
variable[jsonquerytxt] assign[=] call[name[self].request.GET.get, parameter[constant[json], call[name[self].request.POST.get, parameter[constant[json], constant[None]]]]]
if compare[name[jsonquerytxt] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b0d06da0>
variable[jsondata] assign[=] call[name[self].set_context_json, parameter[name[jsonquery]]]
variable[listid] assign[=] call[name[jsondata].pop, parameter[constant[listid]]]
variable[elementid] assign[=] call[name[jsondata].pop, parameter[constant[elementid]]]
variable[MODELINF] assign[=] call[name[MODELINFO], parameter[name[self].model, name[self]._appname, name[self]._modelname, name[self]._viewname, name[self].request, name[self].user, name[self].profile, name[jsonquery], name[Mfields], name[MlimitQ], name[MsearchF], name[MsearchQ], name[listid], name[elementid], name[self].__kwargs]]
call[name[context]][constant[filters]] assign[=] list[[]]
call[name[context]][constant[filters_obj]] assign[=] dictionary[[], []]
variable[fields] assign[=] call[name[getattr], parameter[name[self], constant[fields], call[name[MODELINF].fields, parameter[]]]]
call[name[context]][constant[get]] assign[=] list[[]]
call[name[context]][constant[getval]] assign[=] dictionary[[], []]
for taget[name[name]] in starred[name[jsondata]] begin[:]
variable[struct] assign[=] dictionary[[], []]
call[name[struct]][constant[name]] assign[=] name[name]
if compare[name[name] equal[==] constant[rowsperpage]] begin[:]
call[name[struct]][constant[value]] assign[=] name[self].default_rows_per_page
call[call[name[context]][constant[get]].append, parameter[name[struct]]]
call[call[name[context]][constant[getval]]][name[name]] assign[=] call[name[struct]][constant[value]]
variable[limits] assign[=] call[name[MODELINF].limitQ, parameter[]]
variable[qobjects] assign[=] constant[None]
variable[distinct] assign[=] constant[False]
for taget[name[name]] in starred[name[limits]] begin[:]
if <ast.BoolOp object at 0x7da18bccafb0> begin[:]
variable[distinct] assign[=] constant[True]
if name[qobjects] begin[:]
variable[queryset] assign[=] call[name[queryset].filter, parameter[name[qobjects]]]
if call[name[hasattr], parameter[name[self], constant[annotations]]] begin[:]
if <ast.UnaryOp object at 0x7da18bc72ef0> begin[:]
if call[name[callable], parameter[name[self].annotations]] begin[:]
variable[anot] assign[=] call[name[self].annotations, parameter[name[MODELINF]]]
variable[queryset] assign[=] call[name[queryset].annotate, parameter[]]
if name[distinct] begin[:]
variable[queryset] assign[=] call[name[queryset].distinct, parameter[]]
<ast.Try object at 0x7da18bc73100>
variable[listfilters] assign[=] dictionary[[], []]
if name[self].autofiltering begin[:]
call[name[listfilters].update, parameter[call[name[self].autoSearchF, parameter[name[MODELINF]]]]]
call[name[listfilters].update, parameter[call[name[MODELINF].searchF, parameter[]]]]
variable[filters_struct] assign[=] dictionary[[], []]
for taget[name[key]] in starred[name[filters_by_struct]] begin[:]
variable[value] assign[=] call[name[filters_by_struct]][name[key]]
<ast.Try object at 0x7da18bc71ff0>
if <ast.BoolOp object at 0x7da18bc72b00> begin[:]
variable[rule] assign[=] call[name[listfilters]][name[key]]
variable[typekind] assign[=] call[name[rule]][constant[2]]
if compare[call[name[type], parameter[name[typekind]]] equal[==] name[list]] begin[:]
if compare[call[name[type], parameter[name[value]]] equal[==] name[int]] begin[:]
variable[fv] assign[=] call[call[name[typekind]][binary_operation[name[value] - constant[1]]]][constant[0]]
variable[queryset] assign[=] call[name[queryset].filter, parameter[call[call[name[rule]][constant[1]], parameter[name[fv]]]]]
variable[typekind] assign[=] constant[select]
call[name[filters_struct]][name[key]] assign[=] name[value]
variable[filters_json] assign[=] call[name[json].dumps, parameter[name[filters_struct]]]
variable[get] assign[=] call[name[context]][constant[get]]
variable[filters_get] assign[=] list[[]]
for taget[name[element]] in starred[name[get]] begin[:]
if compare[call[name[element]][constant[name]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18bc718d0>]]] begin[:]
variable[struct] assign[=] dictionary[[], []]
call[name[struct]][constant[name]] assign[=] call[name[element]][constant[name]]
call[name[struct]][constant[value]] assign[=] call[name[element]][constant[value]]
call[name[filters_get].append, parameter[name[struct]]]
variable[struct] assign[=] dictionary[[], []]
call[name[struct]][constant[name]] assign[=] constant[filters]
call[name[struct]][constant[value]] assign[=] name[filters_json]
call[name[filters_get].append, parameter[name[struct]]]
call[name[context]][constant[filters_get]] assign[=] name[filters_get]
variable[filters] assign[=] list[[]]
for taget[name[key]] in starred[name[listfilters]] begin[:]
variable[typekind] assign[=] call[call[name[listfilters]][name[key]]][constant[2]]
if compare[call[name[type], parameter[name[typekind]]] equal[==] name[list]] begin[:]
variable[choice] assign[=] list[[<ast.Call object at 0x7da1b2344b80>]]
for taget[name[value]] in starred[name[typekind]] begin[:]
call[name[choice].append, parameter[call[name[value]][constant[1]]]]
if compare[name[key] in call[name[filters_struct].keys, parameter[]]] begin[:]
variable[value] assign[=] call[name[int], parameter[call[name[filters_struct]][name[key]]]]
variable[typekind] assign[=] constant[select]
variable[argument] assign[=] name[choice]
variable[filtertuple] assign[=] tuple[[<ast.Name object at 0x7da1b23469b0>, <ast.Subscript object at 0x7da1b23453c0>, <ast.Name object at 0x7da1b23476a0>, <ast.Name object at 0x7da1b2345270>, <ast.Name object at 0x7da1b23454b0>]]
call[name[filters].append, parameter[name[filtertuple]]]
call[name[context]][constant[filters]] assign[=] name[filters]
variable[search_filter_button] assign[=] call[name[jsondata].get, parameter[constant[search_filter_button], constant[None]]]
if compare[name[search_filter_button] is_not constant[None]] begin[:]
name[self].search_filter_button assign[=] name[search_filter_button]
variable[search] assign[=] call[call[name[jsondata].get, parameter[constant[search], constant[]]].lower, parameter[]]
variable[newlen] assign[=] call[name[len], parameter[name[search]]]
variable[oldlen] assign[=] constant[0]
while compare[name[newlen] not_equal[!=] name[oldlen]] begin[:]
variable[oldlen] assign[=] name[newlen]
variable[search] assign[=] call[name[search].replace, parameter[constant[ ], constant[ ]]]
variable[newlen] assign[=] call[name[len], parameter[name[search]]]
if <ast.BoolOp object at 0x7da18ede5ed0> begin[:]
variable[search] assign[=] call[name[search]][<ast.Slice object at 0x7da18ede4400>]
if <ast.BoolOp object at 0x7da18ede7b80> begin[:]
variable[search] assign[=] call[name[search]][<ast.Slice object at 0x7da18ede6f80>]
call[name[context]][constant[search]] assign[=] name[search]
variable[datetimeQ] assign[=] constant[None]
if compare[call[name[len], parameter[name[search]]] greater[>] constant[0]] begin[:]
variable[tid] assign[=] constant[None]
if compare[constant[id:] in name[search]] begin[:]
variable[tid] assign[=] call[call[call[call[name[search].split, parameter[constant[:]]]][constant[1]].split, parameter[constant[ ]]]][constant[0]]
<ast.Try object at 0x7da18ede4a60>
if name[tid] begin[:]
variable[search] assign[=] call[name[search].replace, parameter[binary_operation[constant[id:%s] <ast.Mod object at 0x7da2590d6920> name[tid]], constant[]]]
variable[search] assign[=] call[name[search].replace, parameter[constant[ ], constant[ ]]]
variable[tpk] assign[=] constant[None]
if compare[constant[pk:] in name[search]] begin[:]
variable[tpk] assign[=] call[call[call[call[name[search].split, parameter[constant[:]]]][constant[1]].split, parameter[constant[ ]]]][constant[0]]
<ast.Try object at 0x7da18ede66e0>
if name[tpk] begin[:]
variable[search] assign[=] call[name[search].replace, parameter[binary_operation[constant[pk:%s] <ast.Mod object at 0x7da2590d6920> name[tpk]], constant[]]]
variable[search] assign[=] call[name[search].replace, parameter[constant[ ], constant[ ]]]
if <ast.BoolOp object at 0x7da18ede65c0> begin[:]
variable[search] assign[=] call[name[search]][<ast.Slice object at 0x7da18ede7130>]
if <ast.BoolOp object at 0x7da18ede5c60> begin[:]
variable[search] assign[=] call[name[search]][<ast.Slice object at 0x7da18ede7f70>]
variable[searchs] assign[=] dictionary[[], []]
if name[self].autofiltering begin[:]
call[name[searchs].update, parameter[call[name[self].autoSearchQ, parameter[name[MODELINF], name[search]]]]]
variable[tmp_search] assign[=] call[name[MODELINF].searchQ, parameter[name[search]]]
if compare[call[name[type], parameter[name[tmp_search]]] equal[==] name[dict]] begin[:]
call[name[searchs].update, parameter[name[tmp_search]]]
variable[qobjects] assign[=] dictionary[[], []]
variable[qobjectsCustom] assign[=] dictionary[[], []]
for taget[name[name]] in starred[name[searchs]] begin[:]
variable[qtoken] assign[=] call[name[searchs]][name[name]]
if compare[name[qtoken] equal[==] constant[datetime]] begin[:]
variable[datetimeQ] assign[=] name[name]
continue
variable[qdata] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18c4ce3b0> begin[:]
for taget[name[word]] in starred[call[name[search].split, parameter[constant[ ]]]] begin[:]
if compare[call[call[name[word].split, parameter[constant[:]]]][constant[0]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18c4cc2e0>, <ast.Constant object at 0x7da18c4cf4c0>]]] begin[:]
if compare[call[name[word]][constant[0]] equal[==] constant[-]] begin[:]
variable[negative] assign[=] constant[True]
variable[qword] assign[=] constant[None]
for taget[name[token]] in starred[call[name[qobjects]][name[word]]] begin[:]
if name[qword] begin[:]
if name[negative] begin[:]
<ast.AugAssign object at 0x7da18c4cd510>
if name[qword] begin[:]
if name[qdata] begin[:]
<ast.AugAssign object at 0x7da18c4ce2f0>
if name[tid] begin[:]
variable[queryset] assign[=] call[name[queryset].filter, parameter[]]
if name[tpk] begin[:]
variable[queryset] assign[=] call[name[queryset].filter, parameter[]]
if name[qobjectsCustom] begin[:]
variable[queryset] assign[=] call[name[queryset].filter, parameter[name[qobjectsCustom]]]
if name[qdata] begin[:]
variable[queryset] assign[=] call[name[queryset].filter, parameter[name[qdata]]]
call[name[context]][constant[datetimeQ]] assign[=] name[datetimeQ]
if name[datetimeQ] begin[:]
variable[f] assign[=] dictionary[[], []]
call[name[f]][constant[year]] assign[=] tuple[[<ast.Constant object at 0x7da18c4cf490>, <ast.Constant object at 0x7da18c4cf4f0>, <ast.Constant object at 0x7da18c4ce020>]]
call[name[f]][constant[month]] assign[=] tuple[[<ast.Constant object at 0x7da18c4ce380>, <ast.Constant object at 0x7da18c4ced70>, <ast.Constant object at 0x7da18c4cc370>]]
call[name[f]][constant[day]] assign[=] tuple[[<ast.Constant object at 0x7da18c4cc070>, <ast.Constant object at 0x7da18c4ce740>, <ast.Constant object at 0x7da18c4cf2e0>]]
call[name[f]][constant[hour]] assign[=] tuple[[<ast.Constant object at 0x7da18c4ce6b0>, <ast.Constant object at 0x7da18c4ce140>, <ast.Constant object at 0x7da18c4cdc90>]]
call[name[f]][constant[minute]] assign[=] tuple[[<ast.Constant object at 0x7da18c4cc4f0>, <ast.Constant object at 0x7da18c4cec80>, <ast.Constant object at 0x7da18c4cd2d0>]]
call[name[f]][constant[second]] assign[=] tuple[[<ast.Constant object at 0x7da18c4cd690>, <ast.Constant object at 0x7da18c4cc4c0>, <ast.Constant object at 0x7da18c4ce350>]]
variable[date_elements] assign[=] list[[<ast.Constant object at 0x7da18c4ccf40>, <ast.Constant object at 0x7da18c4cdae0>, <ast.Constant object at 0x7da18c4cd3c0>, <ast.Constant object at 0x7da18c4cebc0>, <ast.Constant object at 0x7da18c4ccdc0>, <ast.Constant object at 0x7da18c4cd0f0>, <ast.Constant object at 0x7da18c4ce590>]]
for taget[name[element]] in starred[call[name[date_elements]][<ast.Slice object at 0x7da18c4cfca0>]] begin[:]
variable[value] assign[=] call[name[jsondata].get, parameter[name[element], constant[None]]]
if name[value] begin[:]
call[name[f]][name[element]] assign[=] tuple[[<ast.Call object at 0x7da18c4cf970>, <ast.Call object at 0x7da18c4ce6e0>, <ast.Constant object at 0x7da18c4ccd00>]]
if <ast.BoolOp object at 0x7da18c4ccd90> begin[:]
<ast.Tuple object at 0x7da18c4cccd0> assign[=] call[name[calendar].monthrange, parameter[call[call[name[f]][constant[year]]][constant[1]], call[call[name[f]][constant[month]]][constant[1]]]]
call[name[f]][constant[day]] assign[=] tuple[[<ast.Subscript object at 0x7da18c4cea70>, <ast.Name object at 0x7da18c4ce0e0>, <ast.Subscript object at 0x7da18c4cf730>]]
variable[date_min] assign[=] call[name[datetime].datetime, parameter[call[call[name[f]][constant[year]]][constant[0]], call[call[name[f]][constant[month]]][constant[0]], call[call[name[f]][constant[day]]][constant[0]], call[call[name[f]][constant[hour]]][constant[0]], call[call[name[f]][constant[minute]]][constant[0]], call[call[name[f]][constant[second]]][constant[0]]]]
variable[date_max] assign[=] call[name[datetime].datetime, parameter[call[call[name[f]][constant[year]]][constant[1]], call[call[name[f]][constant[month]]][constant[1]], call[call[name[f]][constant[day]]][constant[1]], call[call[name[f]][constant[hour]]][constant[1]], call[call[name[f]][constant[minute]]][constant[1]], call[call[name[f]][constant[second]]][constant[1]]]]
variable[qarg1] assign[=] dictionary[[<ast.Call object at 0x7da1b0e5bee0>], [<ast.Name object at 0x7da1b0ebe560>]]
variable[qarg2] assign[=] dictionary[[<ast.Call object at 0x7da1b0ebc4f0>], [<ast.Name object at 0x7da1b0ebebf0>]]
variable[qarg3] assign[=] dictionary[[<ast.Name object at 0x7da1b0ebd540>], [<ast.Constant object at 0x7da1b0ebe3e0>]]
variable[queryset] assign[=] call[name[queryset].filter, parameter[binary_operation[binary_operation[call[name[Q], parameter[]] <ast.BitAnd object at 0x7da2590d6b60> call[name[Q], parameter[]]] <ast.BitOr object at 0x7da2590d6aa0> call[name[Q], parameter[]]]]]
variable[deepness_index] assign[=] constant[0]
for taget[name[element]] in starred[call[name[date_elements]][<ast.Slice object at 0x7da1b0ebdb10>]] begin[:]
if call[call[name[f]][name[element]]][constant[2]] begin[:]
<ast.AugAssign object at 0x7da1b0ebcee0>
variable[exclusion] assign[=] dictionary[[], []]
call[name[exclusion]][name[datetimeQ]] assign[=] constant[None]
variable[date_results] assign[=] call[call[name[queryset].exclude, parameter[]].values_list, parameter[name[datetimeQ]]]
if compare[call[call[name[f]][constant[day]]][constant[0]] not_equal[!=] call[call[name[f]][constant[day]]][constant[1]]] begin[:]
if compare[call[call[name[f]][constant[month]]][constant[0]] equal[==] call[call[name[f]][constant[month]]][constant[1]]] begin[:]
variable[date_results] assign[=] call[name[date_results].datetimes, parameter[name[datetimeQ], constant[day]]]
variable[get] assign[=] call[name[context]][constant[get]]
call[name[context]][constant[datefilter]] assign[=] dictionary[[], []]
if compare[binary_operation[name[deepness_index] + constant[1]] equal[==] call[name[len], parameter[name[date_elements]]]] begin[:]
call[call[name[context]][constant[datefilter]]][constant[deepness]] assign[=] constant[None]
call[call[name[context]][constant[datefilter]]][constant[deepnessback]] assign[=] list[[]]
call[call[name[context]][constant[datefilter]]][constant[deepnessinit]] assign[=] list[[]]
for taget[name[element]] in starred[name[get]] begin[:]
if <ast.UnaryOp object at 0x7da1b0ebca00> begin[:]
variable[struct] assign[=] dictionary[[], []]
call[name[struct]][constant[name]] assign[=] call[name[element]][constant[name]]
call[name[struct]][constant[value]] assign[=] call[name[element]][constant[value]]
call[call[call[name[context]][constant[datefilter]]][constant[deepnessinit]].append, parameter[name[struct]]]
call[call[call[name[context]][constant[datefilter]]][constant[deepnessback]].append, parameter[name[struct]]]
call[call[name[context]][constant[datefilter]]][constant[data]] assign[=] list[[]]
for taget[name[element]] in starred[name[date_results]] begin[:]
call[call[call[name[context]][constant[datefilter]]][constant[data]].append, parameter[call[call[name[element].timetuple, parameter[]]][name[deepness_index]]]]
call[call[name[context]][constant[datefilter]]][constant[data]] assign[=] call[name[list], parameter[call[name[set], parameter[call[call[name[context]][constant[datefilter]]][constant[data]]]]]]
call[call[call[name[context]][constant[datefilter]]][constant[data]].sort, parameter[]]
if name[self].json_worker begin[:]
variable[rightnow] assign[=] dictionary[[], []]
for taget[name[key]] in starred[list[[<ast.Constant object at 0x7da18bcc99c0>, <ast.Constant object at 0x7da18bcc80a0>, <ast.Constant object at 0x7da18bcca110>, <ast.Constant object at 0x7da18bccb2b0>, <ast.Constant object at 0x7da18bccb100>, <ast.Constant object at 0x7da18bcc9300>]]] begin[:]
call[name[rightnow]][name[key]] assign[=] <ast.BoolOp object at 0x7da18bcc94e0>
call[call[name[context]][constant[datefilter]]][constant[rightnow]] assign[=] name[rightnow]
<ast.Try object at 0x7da18bccae60>
variable[order_by] assign[=] list[[]]
variable[position] assign[=] dictionary[[], []]
variable[counter] assign[=] constant[1]
call[name[context]][constant[columns]] assign[=] list[[]]
name[self].__fields assign[=] list[[]]
for taget[name[value]] in starred[name[fields]] begin[:]
call[name[self].__fields.append, parameter[call[name[value]][constant[0]]]]
name[self].__autorules assign[=] call[name[self].autorules, parameter[]]
for taget[name[order]] in starred[name[order_by_struct]] begin[:]
variable[name] assign[=] call[call[name[list], parameter[call[name[order].keys, parameter[]]]]][constant[0]]
variable[lbl] assign[=] constant[None]
for taget[name[field]] in starred[name[self].__autorules] begin[:]
if compare[call[constant[{}:].format, parameter[name[name]]] in name[field]] begin[:]
variable[name] assign[=] call[call[name[field].split, parameter[constant[:]]]][constant[0]]
variable[lbl] assign[=] call[call[name[field].split, parameter[constant[:]]]][constant[1]]
break
variable[direction] assign[=] call[name[order]][name[name]]
if <ast.BoolOp object at 0x7da2054a70d0> begin[:]
variable[name] assign[=] name[lbl]
if compare[name[direction] equal[==] constant[asc]] begin[:]
call[name[order_by].append, parameter[binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> call[name[remove_getdisplay], parameter[name[name]]]]]]
call[name[position]][name[name]] assign[=] name[counter]
<ast.AugAssign object at 0x7da2054a6e60>
if name[order_by] begin[:]
variable[queryset] assign[=] call[name[queryset].order_by, parameter[<ast.Starred object at 0x7da2054a5bd0>]]
variable[sort] assign[=] dictionary[[], []]
for taget[name[value]] in starred[name[fields]] begin[:]
if call[name[value]][constant[0]] begin[:]
variable[name] assign[=] call[call[call[name[value]][constant[0]].split, parameter[constant[:]]]][constant[0]]
variable[order_key] assign[=] name[name]
variable[type_field] assign[=] call[name[self].get_type_field, parameter[call[call[call[name[value]][constant[0]].split, parameter[constant[:]]]][<ast.UnaryOp object at 0x7da2054a5390>]]]
variable[publicname] assign[=] call[name[value]][constant[1]]
if compare[call[name[len], parameter[name[value]]] greater[>] constant[2]] begin[:]
variable[size] assign[=] call[name[value]][constant[2]]
if compare[call[name[len], parameter[name[value]]] greater[>] constant[3]] begin[:]
variable[align] assign[=] call[name[value]][constant[3]]
if compare[call[name[len], parameter[name[value]]] greater[>] constant[4]] begin[:]
variable[filter_column] assign[=] call[name[value]][constant[4]]
variable[ordering] assign[=] list[[]]
variable[found] assign[=] constant[False]
for taget[name[order]] in starred[name[order_by_struct]] begin[:]
variable[subname] assign[=] call[call[name[list], parameter[call[name[order].keys, parameter[]]]]][constant[0]]
variable[direction] assign[=] call[name[order]][name[subname]]
if compare[name[order_key] equal[==] name[subname]] begin[:]
if compare[name[direction] equal[==] constant[desc]] begin[:]
variable[direction] assign[=] constant[]
variable[sort_class] assign[=] constant[headerSortUp]
variable[found] assign[=] constant[True]
if <ast.BoolOp object at 0x7da18dc04790> begin[:]
call[name[ordering].append, parameter[dictionary[[<ast.Name object at 0x7da18dc05150>], [<ast.Name object at 0x7da18dc059c0>]]]]
if <ast.UnaryOp object at 0x7da18dc04cd0> begin[:]
call[name[ordering].append, parameter[dictionary[[<ast.Name object at 0x7da18dc06b00>], [<ast.Constant object at 0x7da18dc04760>]]]]
variable[sort_class] assign[=] constant[]
call[name[sort]][name[order_key]] assign[=] dictionary[[], []]
call[call[name[sort]][name[order_key]]][constant[id]] assign[=] name[name]
call[call[name[sort]][name[order_key]]][constant[name]] assign[=] name[publicname]
call[call[name[sort]][name[order_key]]][constant[align]] assign[=] name[align]
call[call[name[sort]][name[order_key]]][constant[type]] assign[=] name[type_field]
if name[filter_column] begin[:]
call[call[name[sort]][name[order_key]]][constant[filter]] assign[=] name[filter_column]
if compare[name[jsonquery] is constant[None]] begin[:]
call[call[name[sort]][name[order_key]]][constant[size]] assign[=] name[size]
call[call[name[sort]][name[order_key]]][constant[class]] assign[=] name[sort_class]
if <ast.BoolOp object at 0x7da18dc07d60> begin[:]
call[call[name[sort]][name[order_key]]][constant[ordering]] assign[=] call[call[name[json].dumps, parameter[name[ordering]]].replace, parameter[constant["], constant[\"]]]
if compare[name[order_key] in name[position]] begin[:]
call[call[name[sort]][name[order_key]]][constant[position]] assign[=] call[name[position]][name[order_key]]
if compare[name[jsonquery] is_not constant[None]] begin[:]
call[name[context]][constant[ordering]] assign[=] name[order_by_struct]
call[name[context]][constant[columns]] assign[=] list[[]]
for taget[name[value]] in starred[name[fields]] begin[:]
variable[field] assign[=] call[name[value]][constant[0]]
if name[field] begin[:]
call[call[name[context]][constant[columns]].append, parameter[call[name[sort]][call[call[name[field].split, parameter[constant[:]]]][constant[0]]]]]
name[self].__columns assign[=] list[[<ast.Constant object at 0x7da18dc07220>]]
name[self].__foreignkeys assign[=] list[[]]
for taget[name[column]] in starred[name[self].model._meta.fields] begin[:]
call[name[self].__columns.append, parameter[name[column].name]]
if name[column].is_relation begin[:]
call[name[self].__foreignkeys.append, parameter[name[column].name]]
name[self].__related_objects assign[=] list[[]]
for taget[name[f]] in starred[name[self].model._meta.related_objects] begin[:]
call[name[self].__related_objects.append, parameter[name[f].name]]
variable[model_properties] assign[=] binary_operation[name[self].__columns + name[self].__related_objects]
variable[autorules_keys] assign[=] call[name[sorted], parameter[call[name[self].__autorules.keys, parameter[]]]]
variable[query_renamed] assign[=] dictionary[[], []]
variable[query_optimizer] assign[=] list[[]]
variable[query_verifier] assign[=] list[[]]
variable[query_select_related] assign[=] list[[]]
variable[fields_related_model] assign[=] list[[]]
for taget[name[rule]] in starred[name[autorules_keys]] begin[:]
variable[found] assign[=] constant[False]
variable[rule_org] assign[=] name[rule]
variable[rulesp] assign[=] call[name[rule].split, parameter[constant[:]]]
if compare[call[name[len], parameter[name[rulesp]]] equal[==] constant[2]] begin[:]
<ast.Tuple object at 0x7da18dc051b0> assign[=] name[rulesp]
variable[nfrule] assign[=] call[name[rule].split, parameter[constant[__]]]
variable[do_select_related] assign[=] constant[False]
variable[model] assign[=] name[self].model
if compare[call[name[len], parameter[name[nfrule]]] greater[>] constant[1]] begin[:]
variable[ruletmp] assign[=] list[[]]
variable[field_related_model] assign[=] list[[]]
for taget[name[n]] in starred[name[nfrule]] begin[:]
if name[model] begin[:]
for taget[name[fi]] in starred[name[model]._meta.fields] begin[:]
if compare[name[fi].name equal[==] name[n]] begin[:]
variable[found] assign[=] constant[True]
call[name[ruletmp].append, parameter[name[n]]]
if name[fi].is_relation begin[:]
variable[model] assign[=] name[fi].related_model
call[name[field_related_model].append, parameter[name[fi].name]]
break
if <ast.BoolOp object at 0x7da18dc98340> begin[:]
break
if name[field_related_model] begin[:]
call[name[fields_related_model].append, parameter[call[constant[__].join, parameter[name[field_related_model]]]]]
if compare[name[ruletmp] not_equal[!=] name[nfrule]] begin[:]
variable[do_select_related] assign[=] constant[False]
if <ast.BoolOp object at 0x7da18dc9a6e0> begin[:]
if compare[constant[__] in name[rule]] begin[:]
call[name[query_select_related].append, parameter[call[constant[__].join, parameter[call[call[name[rule].split, parameter[constant[__]]]][<ast.Slice object at 0x7da18dc98f10>]]]]]
variable[nfrule] assign[=] call[name[nfrule]][constant[0]]
if compare[name[nfrule] in name[self].__columns] begin[:]
if compare[name[rule] <ast.NotIn object at 0x7da2590d7190> name[fields_related_model]] begin[:]
call[name[query_verifier].append, parameter[name[rule_org]]]
if compare[name[alias] not_equal[!=] name[rule]] begin[:]
call[name[query_renamed]][name[alias]] assign[=] call[name[F], parameter[name[rule]]]
call[name[query_optimizer].append, parameter[name[alias]]]
if call[name[hasattr], parameter[name[self], constant[annotations]]] begin[:]
if call[name[callable], parameter[name[self].annotations]] begin[:]
variable[anot] assign[=] call[name[self].annotations, parameter[name[MODELINF]]]
for taget[name[xnfrule]] in starred[call[name[anot].keys, parameter[]]] begin[:]
variable[found] assign[=] constant[True]
if compare[name[xnfrule] <ast.NotIn object at 0x7da2590d7190> name[query_verifier]] begin[:]
call[name[query_verifier].append, parameter[name[xnfrule]]]
call[name[query_optimizer].append, parameter[name[xnfrule]]]
if <ast.UnaryOp object at 0x7da18dc9b9a0> begin[:]
variable[query_renamed] assign[=] dictionary[[], []]
variable[query_optimizer] assign[=] list[[]]
variable[query_verifier] assign[=] list[[]]
variable[query_select_related] assign[=] list[[]]
break
for taget[name[rename]] in starred[call[name[query_renamed].keys, parameter[]]] begin[:]
if compare[name[rename] in name[model_properties]] begin[:]
if compare[name[rename] in name[self].__foreignkeys] begin[:]
variable[msg] assign[=] constant[Invalid alias. The alias '{}' is a foreign key from model '{}' inside app '{}']
<ast.Raise object at 0x7da18dc98fd0>
if <ast.BoolOp object at 0x7da18dc9bb50> begin[:]
variable[queryset] assign[=] call[name[queryset].select_related, parameter[<ast.Starred object at 0x7da18dc9b460>]]
call[name[query_verifier].sort, parameter[]]
call[name[autorules_keys].sort, parameter[]]
if <ast.BoolOp object at 0x7da18dc9b310> begin[:]
if name[query_renamed] begin[:]
variable[queryset] assign[=] call[call[name[queryset].annotate, parameter[]].values, parameter[<ast.Starred object at 0x7da20c6e72e0>]]
if call[name[hasattr], parameter[name[self], constant[custom_queryset]]] begin[:]
variable[queryset] assign[=] call[name[self].custom_queryset, parameter[name[queryset], name[MODELINF]]]
constant[
raise Exception("FOUND: {} -- __foreignkeys: {} -- __columns: {} -- autorules_keys: {} -- query_select_related: {} -- query_renamed: {} -- query_optimizer: {} | use_extra: {}| -- query: {} -- meta.fields: {} -- fields_related_model: {} -- query_verifier: {} -- ??? {} == {}".format(
found,
self.__foreignkeys, self.__columns, autorules_keys,
query_select_related, query_renamed, query_optimizer,use_extra,
queryset.query,
[x.name for x in self.model._meta.fields],
fields_related_model, query_verifier,
query_verifier.sort(),autorules_keys.sort()
))
#]
if name[raw_query] begin[:]
return[name[queryset]] | keyword[def] identifier[get_queryset] ( identifier[self] , identifier[raw_query] = keyword[False] ):
keyword[if] keyword[not] identifier[self] . identifier[haystack] :
identifier[queryset] = identifier[super] ( identifier[GenList] , identifier[self] ). identifier[get_queryset] ()
keyword[else] :
identifier[queryset] = identifier[SearchQuerySet] (). identifier[models] ( identifier[self] . identifier[model] )
identifier[Mfields] = keyword[None]
identifier[MlimitQ] = keyword[None]
identifier[MsearchF] = keyword[None]
identifier[MsearchQ] = keyword[None]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[Mfields] = identifier[self] . identifier[__fields__]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[MlimitQ] = identifier[self] . identifier[__limitQ__]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[MsearchF] = identifier[self] . identifier[__searchF__]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[MsearchQ] = identifier[self] . identifier[__searchQ__]
identifier[self] . identifier[_viewname] = identifier[self] . identifier[__module__]
identifier[context] = identifier[self] . identifier[__context]
identifier[jsonquerytxt] = identifier[self] . identifier[request] . identifier[GET] . identifier[get] ( literal[string] , identifier[self] . identifier[request] . identifier[POST] . identifier[get] ( literal[string] , keyword[None] ))
keyword[if] identifier[jsonquerytxt] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[jsonquery] = identifier[json] . identifier[loads] ( identifier[jsonquerytxt] )
keyword[except] identifier[json] . identifier[JSONDecodeError] keyword[as] identifier[e] :
keyword[raise] identifier[IOError] ( literal[string] )
identifier[jsondata] = identifier[self] . identifier[set_context_json] ( identifier[jsonquery] )
identifier[listid] = identifier[jsondata] . identifier[pop] ( literal[string] )
identifier[elementid] = identifier[jsondata] . identifier[pop] ( literal[string] )
keyword[else] :
identifier[listid] = keyword[None]
identifier[elementid] = keyword[None]
identifier[jsondata] ={}
identifier[jsonquery] ={}
identifier[MODELINF] = identifier[MODELINFO] ( identifier[self] . identifier[model] , identifier[self] . identifier[_appname] , identifier[self] . identifier[_modelname] , identifier[self] . identifier[_viewname] , identifier[self] . identifier[request] , identifier[self] . identifier[user] , identifier[self] . identifier[profile] , identifier[jsonquery] , identifier[Mfields] , identifier[MlimitQ] , identifier[MsearchF] , identifier[MsearchQ] , identifier[listid] , identifier[elementid] , identifier[self] . identifier[__kwargs] )
identifier[context] [ literal[string] ]=[]
identifier[context] [ literal[string] ]={}
identifier[fields] = identifier[getattr] ( identifier[self] , literal[string] , identifier[MODELINF] . identifier[fields] ())
identifier[context] [ literal[string] ]=[]
identifier[context] [ literal[string] ]={}
keyword[for] identifier[name] keyword[in] identifier[jsondata] :
identifier[struct] ={}
identifier[struct] [ literal[string] ]= identifier[name]
keyword[if] identifier[name] == literal[string] :
identifier[struct] [ literal[string] ]= identifier[self] . identifier[default_rows_per_page]
keyword[elif] identifier[name] == literal[string] :
identifier[struct] [ literal[string] ]= literal[int]
keyword[elif] identifier[name] == literal[string] :
identifier[struct] [ literal[string] ]= literal[int]
keyword[else] :
identifier[struct] [ literal[string] ]= identifier[jsondata] [ identifier[name] ]
identifier[context] [ literal[string] ]. identifier[append] ( identifier[struct] )
identifier[context] [ literal[string] ][ identifier[name] ]= identifier[struct] [ literal[string] ]
identifier[limits] = identifier[MODELINF] . identifier[limitQ] ()
identifier[qobjects] = keyword[None]
identifier[distinct] = keyword[False]
keyword[for] identifier[name] keyword[in] identifier[limits] :
keyword[if] identifier[name] == literal[string] keyword[or] identifier[name] == literal[string] :
identifier[distinct] = keyword[True]
keyword[else] :
keyword[if] identifier[qobjects] :
identifier[qobjects] &= identifier[limits] [ identifier[name] ]
keyword[else] :
identifier[qobjects] = identifier[limits] [ identifier[name] ]
keyword[if] identifier[qobjects] :
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[qobjects] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[if] keyword[not] identifier[self] . identifier[haystack] :
keyword[if] identifier[callable] ( identifier[self] . identifier[annotations] ):
identifier[anot] = identifier[self] . identifier[annotations] ( identifier[MODELINF] )
keyword[else] :
identifier[anot] = identifier[self] . identifier[annotations]
identifier[queryset] = identifier[queryset] . identifier[annotate] (** identifier[anot] )
keyword[else] :
keyword[raise] identifier[IOError] ( literal[string] )
keyword[if] identifier[distinct] :
identifier[queryset] = identifier[queryset] . identifier[distinct] ()
keyword[try] :
identifier[filters_get] = identifier[jsondata] . identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[type] ( identifier[filters_get] )== identifier[dict] :
identifier[filters_by_struct] = identifier[filters_get]
keyword[else] :
identifier[filters_by_struct] = identifier[json] . identifier[loads] ( identifier[str] ( identifier[filters_get] ))
keyword[except] identifier[Exception] :
identifier[filters_by_struct] =[]
identifier[listfilters] ={}
keyword[if] identifier[self] . identifier[autofiltering] :
identifier[listfilters] . identifier[update] ( identifier[self] . identifier[autoSearchF] ( identifier[MODELINF] ))
identifier[listfilters] . identifier[update] ( identifier[MODELINF] . identifier[searchF] ())
identifier[filters_struct] ={}
keyword[for] identifier[key] keyword[in] identifier[filters_by_struct] :
identifier[value] = identifier[filters_by_struct] [ identifier[key] ]
keyword[try] :
identifier[value] = identifier[int] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[except] identifier[TypeError] :
keyword[pass]
keyword[if] identifier[value] keyword[and] identifier[key] keyword[in] identifier[listfilters] :
identifier[rule] = identifier[listfilters] [ identifier[key] ]
identifier[typekind] = identifier[rule] [ literal[int] ]
keyword[if] identifier[type] ( identifier[typekind] )== identifier[list] :
keyword[if] identifier[type] ( identifier[value] )== identifier[int] :
identifier[fv] = identifier[typekind] [ identifier[value] - literal[int] ][ literal[int] ]
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[rule] [ literal[int] ]( identifier[fv] ))
identifier[typekind] = literal[string]
keyword[elif] identifier[typekind] == literal[string] :
keyword[if] identifier[type] ( identifier[value] )== identifier[int] :
identifier[fv] = identifier[rule] [ literal[int] ][ identifier[value] - literal[int] ][ literal[int] ]
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[rule] [ literal[int] ]( identifier[fv] ))
keyword[elif] identifier[typekind] keyword[in] [ literal[string] , literal[string] ]:
keyword[if] identifier[type] ( identifier[value] ) keyword[in] ( identifier[list] , identifier[tuple] ) keyword[and] identifier[len] ( identifier[value] ):
identifier[qobjects] = identifier[Q] ( identifier[rule] [ literal[int] ]( identifier[value] [ literal[int] ]))
keyword[for] identifier[fvt] keyword[in] identifier[value] [ literal[int] :]:
identifier[qobjects] |= identifier[Q] ( identifier[rule] [ literal[int] ]( identifier[fvt] ))
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[qobjects] )
keyword[elif] identifier[typekind] keyword[in] [ literal[string] , literal[string] ]:
identifier[fv] = identifier[value]
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[rule] [ literal[int] ]( identifier[fv] ))
keyword[elif] identifier[typekind] keyword[in] [ literal[string] ,]:
identifier[fv] = identifier[value]
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[rule] [ literal[int] ]( identifier[fv] ))
keyword[else] :
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[typekind] , identifier[key] ))
identifier[filters_struct] [ identifier[key] ]= identifier[value]
identifier[filters_json] = identifier[json] . identifier[dumps] ( identifier[filters_struct] )
identifier[get] = identifier[context] [ literal[string] ]
identifier[filters_get] =[]
keyword[for] identifier[element] keyword[in] identifier[get] :
keyword[if] identifier[element] [ literal[string] ] keyword[not] keyword[in] [ literal[string] ]:
identifier[struct] ={}
identifier[struct] [ literal[string] ]= identifier[element] [ literal[string] ]
identifier[struct] [ literal[string] ]= identifier[element] [ literal[string] ]
identifier[filters_get] . identifier[append] ( identifier[struct] )
identifier[struct] ={}
identifier[struct] [ literal[string] ]= literal[string]
identifier[struct] [ literal[string] ]= identifier[filters_json]
identifier[filters_get] . identifier[append] ( identifier[struct] )
identifier[context] [ literal[string] ]= identifier[filters_get]
identifier[filters] =[]
keyword[for] identifier[key] keyword[in] identifier[listfilters] :
identifier[typekind] = identifier[listfilters] [ identifier[key] ][ literal[int] ]
keyword[if] identifier[type] ( identifier[typekind] )== identifier[list] :
identifier[choice] =[ identifier[_] ( literal[string] )]
keyword[for] identifier[value] keyword[in] identifier[typekind] :
identifier[choice] . identifier[append] ( identifier[value] [ literal[int] ])
keyword[if] identifier[key] keyword[in] identifier[filters_struct] . identifier[keys] ():
identifier[value] = identifier[int] ( identifier[filters_struct] [ identifier[key] ])
keyword[else] :
identifier[value] = literal[int]
identifier[typekind] = literal[string]
identifier[argument] = identifier[choice]
keyword[elif] identifier[typekind] == literal[string] :
identifier[typevalue] = identifier[listfilters] [ identifier[key] ][ literal[int] ]
identifier[choice] =[ identifier[_] ( literal[string] )]
keyword[for] identifier[value] keyword[in] identifier[typevalue] :
identifier[choice] . identifier[append] ( identifier[value] [ literal[int] ])
keyword[if] identifier[key] keyword[in] identifier[filters_struct] . identifier[keys] ():
identifier[value] = identifier[int] ( identifier[filters_struct] [ identifier[key] ])
keyword[else] :
identifier[value] = literal[int]
identifier[argument] = identifier[choice]
keyword[elif] identifier[typekind] keyword[in] [ literal[string] , literal[string] ]:
keyword[if] identifier[typekind] == literal[string] :
identifier[typevalue] = identifier[listfilters] [ identifier[key] ][ literal[int] ]
identifier[choice] =[]
keyword[for] identifier[value] keyword[in] identifier[typevalue] :
identifier[choice] . identifier[append] ({ literal[string] : identifier[value] [ literal[int] ], literal[string] : identifier[value] [ literal[int] ]})
keyword[else] :
identifier[choice] = identifier[list] ( identifier[listfilters] [ identifier[key] ][ literal[int] :])
identifier[choice] [ literal[int] ]= identifier[reverse_lazy] ( identifier[choice] [ literal[int] ], identifier[kwargs] ={ literal[string] : literal[string] })[:- literal[int] ]
keyword[if] identifier[key] keyword[in] identifier[filters_struct] . identifier[keys] ():
identifier[value] = identifier[filters_struct] [ identifier[key] ]
keyword[else] :
identifier[value] =[]
identifier[argument] = identifier[choice]
keyword[elif] identifier[typekind] keyword[in] [ literal[string] , literal[string] ]:
identifier[argument] = keyword[None]
keyword[if] identifier[key] keyword[in] identifier[filters_struct] . identifier[keys] ():
identifier[value] = identifier[filters_struct] [ identifier[key] ]
keyword[else] :
identifier[value] = keyword[None]
keyword[elif] identifier[typekind] keyword[in] [ literal[string] ]:
identifier[argument] = keyword[None]
keyword[if] identifier[key] keyword[in] identifier[filters_struct] . identifier[keys] ():
identifier[value] = identifier[filters_struct] [ identifier[key] ]
keyword[else] :
identifier[value] = keyword[None]
keyword[else] :
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[typekind] , identifier[key] ))
identifier[filtertuple] =( identifier[key] , identifier[listfilters] [ identifier[key] ][ literal[int] ], identifier[typekind] , identifier[argument] , identifier[value] )
identifier[filters] . identifier[append] ( identifier[filtertuple] )
identifier[context] [ literal[string] ]= identifier[filters]
identifier[search_filter_button] = identifier[jsondata] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[search_filter_button] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[search_filter_button] = identifier[search_filter_button]
identifier[search] = identifier[jsondata] . identifier[get] ( literal[string] , literal[string] ). identifier[lower] ()
identifier[newlen] = identifier[len] ( identifier[search] )
identifier[oldlen] = literal[int]
keyword[while] identifier[newlen] != identifier[oldlen] :
identifier[oldlen] = identifier[newlen]
identifier[search] = identifier[search] . identifier[replace] ( literal[string] , literal[string] )
identifier[newlen] = identifier[len] ( identifier[search] )
keyword[if] identifier[len] ( identifier[search] )> literal[int] keyword[and] identifier[search] [ literal[int] ]== literal[string] :
identifier[search] = identifier[search] [ literal[int] :]
keyword[if] identifier[len] ( identifier[search] )> literal[int] keyword[and] identifier[search] [- literal[int] ]== literal[string] :
identifier[search] = identifier[search] [:- literal[int] ]
identifier[context] [ literal[string] ]= identifier[search]
identifier[datetimeQ] = keyword[None]
keyword[if] identifier[len] ( identifier[search] )> literal[int] :
identifier[tid] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[search] :
identifier[tid] = identifier[search] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
keyword[try] :
identifier[tid] = identifier[int] ( identifier[tid] )
keyword[except] identifier[Exception] :
identifier[tid] = keyword[None]
keyword[if] identifier[tid] :
identifier[search] = identifier[search] . identifier[replace] ( literal[string] %( identifier[tid] ), literal[string] )
identifier[search] = identifier[search] . identifier[replace] ( literal[string] , literal[string] )
identifier[tpk] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[search] :
identifier[tpk] = identifier[search] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
keyword[try] :
identifier[tpk] = identifier[int] ( identifier[tpk] )
keyword[except] identifier[Exception] :
identifier[tpk] = keyword[None]
keyword[if] identifier[tpk] :
identifier[search] = identifier[search] . identifier[replace] ( literal[string] %( identifier[tpk] ), literal[string] )
identifier[search] = identifier[search] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[len] ( identifier[search] )> literal[int] keyword[and] identifier[search] [ literal[int] ]== literal[string] :
identifier[search] = identifier[search] [ literal[int] :]
keyword[if] identifier[len] ( identifier[search] )> literal[int] keyword[and] identifier[search] [- literal[int] ]== literal[string] :
identifier[search] = identifier[search] [:- literal[int] ]
identifier[searchs] ={}
keyword[if] identifier[self] . identifier[autofiltering] :
identifier[searchs] . identifier[update] ( identifier[self] . identifier[autoSearchQ] ( identifier[MODELINF] , identifier[search] ))
identifier[tmp_search] = identifier[MODELINF] . identifier[searchQ] ( identifier[search] )
keyword[if] identifier[type] ( identifier[tmp_search] )== identifier[dict] :
identifier[searchs] . identifier[update] ( identifier[tmp_search] )
keyword[else] :
identifier[searchs] [ literal[string] ]&= identifier[tmp_search]
identifier[qobjects] ={}
identifier[qobjectsCustom] ={}
keyword[for] identifier[name] keyword[in] identifier[searchs] :
identifier[qtoken] = identifier[searchs] [ identifier[name] ]
keyword[if] identifier[qtoken] == literal[string] :
identifier[datetimeQ] = identifier[name]
keyword[continue]
keyword[elif] ( identifier[type] ( identifier[qtoken] )== identifier[str] ) keyword[or] ( identifier[type] ( identifier[qtoken] )== identifier[list] ):
keyword[if] identifier[type] ( identifier[qtoken] )== identifier[tuple] :
( identifier[query] , identifier[func] )= identifier[qtoken]
keyword[else] :
keyword[def] identifier[lambdax] ( identifier[x] ):
keyword[return] identifier[x]
identifier[func] = identifier[lambdax]
identifier[query] = identifier[qtoken]
keyword[if] identifier[search] :
keyword[for] identifier[word] keyword[in] identifier[search] . identifier[split] ( literal[string] ):
keyword[if] identifier[len] ( identifier[word] )> literal[int] :
keyword[if] identifier[word] [ literal[int] ]== literal[string] :
identifier[qdict] ={ literal[string] . identifier[format] ( identifier[query] ): identifier[func] ( identifier[word] [ literal[int] :])}
identifier[qtokens_element] =~ identifier[Q] (** identifier[qdict] )
keyword[else] :
identifier[qdict] ={ literal[string] . identifier[format] ( identifier[query] ): identifier[func] ( identifier[word] )}
identifier[qtokens_element] = identifier[Q] (** identifier[qdict] )
keyword[if] identifier[word] keyword[in] identifier[qobjects] :
identifier[qobjects] [ identifier[word] ]. identifier[append] ( identifier[qtokens_element] )
keyword[else] :
identifier[qobjects] [ identifier[word] ]=[ identifier[qtokens_element] ]
keyword[else] :
keyword[if] identifier[qobjectsCustom] :
identifier[qobjectsCustom] |= identifier[searchs] [ identifier[name] ]
keyword[else] :
identifier[qobjectsCustom] = identifier[searchs] [ identifier[name] ]
identifier[qdata] = keyword[None]
keyword[if] identifier[search] keyword[and] identifier[qobjects] :
keyword[for] identifier[word] keyword[in] identifier[search] . identifier[split] ( literal[string] ):
keyword[if] identifier[word] . identifier[split] ( literal[string] )[ literal[int] ] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
keyword[if] identifier[word] [ literal[int] ]== literal[string] :
identifier[negative] = keyword[True]
keyword[else] :
identifier[negative] = keyword[False]
identifier[qword] = keyword[None]
keyword[for] identifier[token] keyword[in] identifier[qobjects] [ identifier[word] ]:
keyword[if] identifier[qword] :
keyword[if] identifier[negative] :
identifier[qword] &= identifier[token]
keyword[else] :
identifier[qword] |= identifier[token]
keyword[else] :
identifier[qword] = identifier[token]
keyword[if] identifier[qword] :
keyword[if] identifier[qdata] :
identifier[qdata] &= identifier[qword]
keyword[else] :
identifier[qdata] = identifier[qword]
keyword[if] identifier[tid] :
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[id] = identifier[tid] )
keyword[if] identifier[tpk] :
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[pk] = identifier[tpk] )
keyword[if] identifier[qobjectsCustom] :
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[qobjectsCustom] )
keyword[if] identifier[qdata] :
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[qdata] )
keyword[else] :
identifier[searchs] = identifier[MODELINF] . identifier[searchQ] ( identifier[search] )
keyword[for] identifier[name] keyword[in] identifier[searchs] :
keyword[if] ( identifier[searchs] [ identifier[name] ]== literal[string] ):
identifier[datetimeQ] = identifier[name]
keyword[continue]
identifier[context] [ literal[string] ]= identifier[datetimeQ]
keyword[if] identifier[datetimeQ] :
identifier[f] ={}
identifier[f] [ literal[string] ]=( literal[int] , literal[int] , keyword[False] )
identifier[f] [ literal[string] ]=( literal[int] , literal[int] , keyword[False] )
identifier[f] [ literal[string] ]=( literal[int] , literal[int] , keyword[False] )
identifier[f] [ literal[string] ]=( literal[int] , literal[int] , keyword[False] )
identifier[f] [ literal[string] ]=( literal[int] , literal[int] , keyword[False] )
identifier[f] [ literal[string] ]=( literal[int] , literal[int] , keyword[False] )
identifier[date_elements] =[ keyword[None] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[for] identifier[element] keyword[in] identifier[date_elements] [ literal[int] :]:
identifier[value] = identifier[jsondata] . identifier[get] ( identifier[element] , keyword[None] )
keyword[if] identifier[value] :
identifier[f] [ identifier[element] ]=( identifier[int] ( identifier[value] ), identifier[int] ( identifier[value] ), keyword[True] )
keyword[if] identifier[f] [ literal[string] ][ literal[int] ] keyword[and] identifier[f] [ literal[string] ][ literal[int] ] keyword[and] keyword[not] identifier[f] [ literal[string] ][ literal[int] ]:
( identifier[g] , identifier[lastday] )= identifier[calendar] . identifier[monthrange] ( identifier[f] [ literal[string] ][ literal[int] ], identifier[f] [ literal[string] ][ literal[int] ])
identifier[f] [ literal[string] ]=( identifier[f] [ literal[string] ][ literal[int] ], identifier[lastday] , identifier[f] [ literal[string] ][ literal[int] ])
identifier[date_min] = identifier[datetime] . identifier[datetime] ( identifier[f] [ literal[string] ][ literal[int] ], identifier[f] [ literal[string] ][ literal[int] ], identifier[f] [ literal[string] ][ literal[int] ], identifier[f] [ literal[string] ][ literal[int] ], identifier[f] [ literal[string] ][ literal[int] ], identifier[f] [ literal[string] ][ literal[int] ])
identifier[date_max] = identifier[datetime] . identifier[datetime] ( identifier[f] [ literal[string] ][ literal[int] ], identifier[f] [ literal[string] ][ literal[int] ], identifier[f] [ literal[string] ][ literal[int] ], identifier[f] [ literal[string] ][ literal[int] ], identifier[f] [ literal[string] ][ literal[int] ], identifier[f] [ literal[string] ][ literal[int] ])
identifier[qarg1] ={ literal[string] . identifier[format] ( identifier[datetimeQ] ): identifier[date_min] }
identifier[qarg2] ={ literal[string] . identifier[format] ( identifier[datetimeQ] ): identifier[date_max] }
identifier[qarg3] ={ identifier[datetimeQ] : keyword[None] }
identifier[queryset] = identifier[queryset] . identifier[filter] (( identifier[Q] (** identifier[qarg1] )& identifier[Q] (** identifier[qarg2] ))| identifier[Q] (** identifier[qarg3] ))
identifier[deepness_index] = literal[int]
keyword[for] identifier[element] keyword[in] identifier[date_elements] [ literal[int] :]:
keyword[if] identifier[f] [ identifier[element] ][ literal[int] ]:
identifier[deepness_index] += literal[int]
keyword[else] :
keyword[break]
identifier[exclusion] ={}
identifier[exclusion] [ identifier[datetimeQ] ]= keyword[None]
identifier[date_results] = identifier[queryset] . identifier[exclude] (** identifier[exclusion] ). identifier[values_list] ( identifier[datetimeQ] , identifier[flat] = keyword[True] )
keyword[if] identifier[f] [ literal[string] ][ literal[int] ]!= identifier[f] [ literal[string] ][ literal[int] ]:
keyword[if] identifier[f] [ literal[string] ][ literal[int] ]== identifier[f] [ literal[string] ][ literal[int] ]:
identifier[date_results] = identifier[date_results] . identifier[datetimes] ( identifier[datetimeQ] , literal[string] )
keyword[elif] identifier[f] [ literal[string] ][ literal[int] ]== identifier[f] [ literal[string] ][ literal[int] ]:
identifier[date_results] = identifier[date_results] . identifier[datetimes] ( identifier[datetimeQ] , literal[string] )
keyword[else] :
identifier[date_results] = identifier[date_results] . identifier[datetimes] ( identifier[datetimeQ] , literal[string] )
identifier[get] = identifier[context] [ literal[string] ]
identifier[context] [ literal[string] ]={}
keyword[if] identifier[deepness_index] + literal[int] == identifier[len] ( identifier[date_elements] ):
identifier[context] [ literal[string] ][ literal[string] ]= keyword[None]
keyword[else] :
identifier[context] [ literal[string] ][ literal[string] ]= identifier[date_elements] [ identifier[deepness_index] + literal[int] ]
identifier[context] [ literal[string] ][ literal[string] ]=[]
identifier[context] [ literal[string] ][ literal[string] ]=[]
keyword[for] identifier[element] keyword[in] identifier[get] :
keyword[if] ( keyword[not] identifier[element] [ literal[string] ] keyword[in] identifier[date_elements] ):
identifier[struct] ={}
identifier[struct] [ literal[string] ]= identifier[element] [ literal[string] ]
identifier[struct] [ literal[string] ]= identifier[element] [ literal[string] ]
identifier[context] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[struct] )
identifier[context] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[struct] )
keyword[elif] ( identifier[element] [ literal[string] ]!= identifier[date_elements] [ identifier[deepness_index] ] keyword[and] identifier[f] [ identifier[element] [ literal[string] ]][ literal[int] ]):
identifier[struct] ={}
identifier[struct] [ literal[string] ]= identifier[element] [ literal[string] ]
identifier[struct] [ literal[string] ]= identifier[element] [ literal[string] ]
identifier[context] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[struct] )
identifier[context] [ literal[string] ][ literal[string] ]=[]
keyword[for] identifier[element] keyword[in] identifier[date_results] :
identifier[context] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[element] . identifier[timetuple] ()[ identifier[deepness_index] ])
identifier[context] [ literal[string] ][ literal[string] ]= identifier[list] ( identifier[set] ( identifier[context] [ literal[string] ][ literal[string] ]))
identifier[context] [ literal[string] ][ literal[string] ]. identifier[sort] ()
keyword[if] identifier[self] . identifier[json_worker] :
identifier[rightnow] ={}
keyword[for] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[rightnow] [ identifier[key] ]=( identifier[f] [ identifier[key] ][ literal[int] ] keyword[and] identifier[f] [ identifier[key] ][ literal[int] ]) keyword[or] keyword[None]
keyword[else] :
keyword[if] identifier[f] [ literal[string] ][ literal[int] ]:
identifier[month] = identifier[monthname] ( identifier[f] [ literal[string] ][ literal[int] ])
keyword[else] :
identifier[month] = literal[string]
keyword[if] identifier[f] [ literal[string] ][ literal[int] ]:
identifier[rightnow] = identifier[string_concat] ( identifier[grv] ( identifier[f] , literal[string] ), literal[string] , identifier[month] , literal[string] , identifier[grv] ( identifier[f] , literal[string] ), literal[string] , identifier[grv] ( identifier[f] , literal[string] ), literal[string] , identifier[grv] ( identifier[f] , literal[string] ), literal[string] , identifier[grv] ( identifier[f] , literal[string] ))
keyword[else] :
identifier[rightnow] = identifier[string_concat] ( identifier[grv] ( identifier[f] , literal[string] ), literal[string] , identifier[month] , literal[string] , identifier[grv] ( identifier[f] , literal[string] ))
identifier[context] [ literal[string] ][ literal[string] ]= identifier[rightnow]
keyword[else] :
identifier[context] [ literal[string] ]= keyword[None]
keyword[try] :
identifier[order_get] = identifier[jsondata] . identifier[get] ( literal[string] ,[])
keyword[if] identifier[type] ( identifier[order_get] )== identifier[list] :
identifier[order_by_struct] = identifier[order_get]
keyword[else] :
identifier[order_by_struct] = identifier[json] . identifier[loads] ( identifier[str] ( identifier[order_get] ))
keyword[except] identifier[Exception] :
identifier[order_by_struct] =[]
identifier[order_by] =[]
identifier[position] ={}
identifier[counter] = literal[int]
identifier[context] [ literal[string] ]=[]
identifier[self] . identifier[__fields] =[]
keyword[for] identifier[value] keyword[in] identifier[fields] :
identifier[self] . identifier[__fields] . identifier[append] ( identifier[value] [ literal[int] ])
identifier[self] . identifier[__autorules] = identifier[self] . identifier[autorules] ()
keyword[for] identifier[order] keyword[in] identifier[order_by_struct] :
identifier[name] = identifier[list] ( identifier[order] . identifier[keys] ())[ literal[int] ]
identifier[lbl] = keyword[None]
keyword[for] identifier[field] keyword[in] identifier[self] . identifier[__autorules] :
keyword[if] literal[string] . identifier[format] ( identifier[name] ) keyword[in] identifier[field] :
identifier[name] = identifier[field] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[lbl] = identifier[field] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[break]
identifier[direction] = identifier[order] [ identifier[name] ]
keyword[if] identifier[lbl] keyword[and] keyword[not] identifier[lbl] . identifier[startswith] ( literal[string] ) keyword[and] keyword[not] identifier[lbl] . identifier[endswith] ( literal[string] ):
identifier[name] = identifier[lbl]
keyword[if] identifier[direction] == literal[string] :
identifier[order_by] . identifier[append] ( literal[string] %( identifier[remove_getdisplay] ( identifier[name] )))
keyword[elif] identifier[direction] == literal[string] :
identifier[order_by] . identifier[append] ( literal[string] %( identifier[remove_getdisplay] ( identifier[name] )))
identifier[position] [ identifier[name] ]= identifier[counter]
identifier[counter] += literal[int]
keyword[if] identifier[order_by] :
identifier[queryset] = identifier[queryset] . identifier[order_by] (* identifier[order_by] )
keyword[else] :
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[if] identifier[type] ( identifier[self] . identifier[default_ordering] )== identifier[list] :
identifier[queryset] = identifier[queryset] . identifier[order_by] (* identifier[self] . identifier[default_ordering] )
keyword[else] :
identifier[queryset] = identifier[queryset] . identifier[order_by] ( identifier[self] . identifier[default_ordering] )
keyword[else] :
identifier[queryset] = identifier[queryset] . identifier[order_by] ( literal[string] )
identifier[sort] ={}
keyword[for] identifier[value] keyword[in] identifier[fields] :
keyword[if] identifier[value] [ literal[int] ]:
identifier[name] = identifier[value] [ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
identifier[order_key] = identifier[name]
identifier[type_field] = identifier[self] . identifier[get_type_field] ( identifier[value] [ literal[int] ]. identifier[split] ( literal[string] )[- literal[int] ])
keyword[else] :
identifier[name] = identifier[value] [ literal[int] ]
identifier[hash_key] = identifier[hashlib] . identifier[md5] ( identifier[value] [ literal[int] ]. identifier[encode] ()). identifier[hexdigest] ()
identifier[order_key] = literal[string] . identifier[format] ( identifier[hash_key] )
identifier[type_field] = keyword[None]
identifier[publicname] = identifier[value] [ literal[int] ]
keyword[if] identifier[len] ( identifier[value] )> literal[int] :
identifier[size] = identifier[value] [ literal[int] ]
keyword[else] :
identifier[size] = keyword[None]
keyword[if] identifier[len] ( identifier[value] )> literal[int] :
identifier[align] = identifier[value] [ literal[int] ]
keyword[else] :
identifier[align] = keyword[None]
keyword[if] identifier[len] ( identifier[value] )> literal[int] :
identifier[filter_column] = identifier[value] [ literal[int] ]
keyword[else] :
identifier[filter_column] = keyword[None]
identifier[ordering] =[]
identifier[found] = keyword[False]
keyword[for] identifier[order] keyword[in] identifier[order_by_struct] :
identifier[subname] = identifier[list] ( identifier[order] . identifier[keys] ())[ literal[int] ]
identifier[direction] = identifier[order] [ identifier[subname] ]
keyword[if] identifier[order_key] == identifier[subname] :
keyword[if] identifier[direction] == literal[string] :
identifier[direction] = literal[string]
identifier[sort_class] = literal[string]
keyword[elif] identifier[direction] == literal[string] :
identifier[direction] = literal[string]
identifier[sort_class] = literal[string]
keyword[else] :
identifier[sort_class] = literal[string]
identifier[direction] = literal[string]
identifier[found] = keyword[True]
keyword[if] identifier[direction] == literal[string] keyword[or] identifier[direction] == literal[string] :
identifier[ordering] . identifier[append] ({ identifier[subname] : identifier[direction] })
keyword[if] keyword[not] identifier[found] :
identifier[ordering] . identifier[append] ({ identifier[order_key] : literal[string] })
identifier[sort_class] = literal[string]
identifier[sort] [ identifier[order_key] ]={}
identifier[sort] [ identifier[order_key] ][ literal[string] ]= identifier[name]
identifier[sort] [ identifier[order_key] ][ literal[string] ]= identifier[publicname]
identifier[sort] [ identifier[order_key] ][ literal[string] ]= identifier[align]
identifier[sort] [ identifier[order_key] ][ literal[string] ]= identifier[type_field]
keyword[if] identifier[filter_column] :
identifier[sort] [ identifier[order_key] ][ literal[string] ]= identifier[filter_column]
keyword[if] identifier[jsonquery] keyword[is] keyword[None] :
identifier[sort] [ identifier[order_key] ][ literal[string] ]= identifier[size]
identifier[sort] [ identifier[order_key] ][ literal[string] ]= identifier[sort_class]
keyword[if] identifier[order_key] keyword[and] identifier[order_key] [ literal[int] ]!= literal[string] :
identifier[sort] [ identifier[order_key] ][ literal[string] ]= identifier[json] . identifier[dumps] ( identifier[ordering] ). identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[order_key] keyword[in] identifier[position] :
identifier[sort] [ identifier[order_key] ][ literal[string] ]= identifier[position] [ identifier[order_key] ]
keyword[if] identifier[jsonquery] keyword[is] keyword[not] keyword[None] :
identifier[context] [ literal[string] ]= identifier[order_by_struct]
identifier[context] [ literal[string] ]=[]
keyword[for] identifier[value] keyword[in] identifier[fields] :
identifier[field] = identifier[value] [ literal[int] ]
keyword[if] identifier[field] :
identifier[context] [ literal[string] ]. identifier[append] ( identifier[sort] [ identifier[field] . identifier[split] ( literal[string] )[ literal[int] ]])
keyword[else] :
identifier[hash_key] = identifier[hashlib] . identifier[md5] ( identifier[value] [ literal[int] ]. identifier[encode] ()). identifier[hexdigest] ()
identifier[field] = literal[string] . identifier[format] ( identifier[hash_key] )
identifier[context] [ literal[string] ]. identifier[append] ( identifier[sort] [ identifier[field] ])
identifier[self] . identifier[__columns] =[ literal[string] ]
identifier[self] . identifier[__foreignkeys] =[]
keyword[for] identifier[column] keyword[in] identifier[self] . identifier[model] . identifier[_meta] . identifier[fields] :
identifier[self] . identifier[__columns] . identifier[append] ( identifier[column] . identifier[name] )
keyword[if] identifier[column] . identifier[is_relation] :
identifier[self] . identifier[__foreignkeys] . identifier[append] ( identifier[column] . identifier[name] )
identifier[self] . identifier[__related_objects] =[]
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[model] . identifier[_meta] . identifier[related_objects] :
identifier[self] . identifier[__related_objects] . identifier[append] ( identifier[f] . identifier[name] )
identifier[model_properties] = identifier[self] . identifier[__columns] + identifier[self] . identifier[__related_objects]
identifier[autorules_keys] = identifier[sorted] ( identifier[self] . identifier[__autorules] . identifier[keys] ())
identifier[query_renamed] ={}
identifier[query_optimizer] =[]
identifier[query_verifier] =[]
identifier[query_select_related] =[]
identifier[fields_related_model] =[]
keyword[for] identifier[rule] keyword[in] identifier[autorules_keys] :
identifier[found] = keyword[False]
identifier[rule_org] = identifier[rule]
identifier[rulesp] = identifier[rule] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[rulesp] )== literal[int] :
( identifier[alias] , identifier[rule] )= identifier[rulesp]
keyword[else] :
identifier[alias] = identifier[rule]
identifier[nfrule] = identifier[rule] . identifier[split] ( literal[string] )
identifier[do_select_related] = keyword[False]
identifier[model] = identifier[self] . identifier[model]
keyword[if] identifier[len] ( identifier[nfrule] )> literal[int] :
identifier[ruletmp] =[]
identifier[field_related_model] =[]
keyword[for] identifier[n] keyword[in] identifier[nfrule] :
keyword[if] identifier[model] :
keyword[for] identifier[fi] keyword[in] identifier[model] . identifier[_meta] . identifier[fields] :
keyword[if] identifier[fi] . identifier[name] == identifier[n] :
identifier[found] = keyword[True]
identifier[ruletmp] . identifier[append] ( identifier[n] )
keyword[if] identifier[fi] . identifier[is_relation] :
identifier[model] = identifier[fi] . identifier[related_model]
identifier[field_related_model] . identifier[append] ( identifier[fi] . identifier[name] )
keyword[else] :
identifier[do_select_related] = keyword[True]
identifier[model] = keyword[None]
keyword[break]
keyword[if] keyword[not] identifier[found] keyword[or] identifier[model] keyword[is] keyword[None] :
keyword[break]
keyword[if] identifier[field_related_model] :
identifier[fields_related_model] . identifier[append] ( literal[string] . identifier[join] ( identifier[field_related_model] ))
keyword[if] identifier[ruletmp] != identifier[nfrule] :
identifier[do_select_related] = keyword[False]
keyword[elif] identifier[nfrule] [ literal[int] ] keyword[in] [ identifier[x] . identifier[name] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[model] . identifier[_meta] . identifier[fields] ] keyword[or] identifier[nfrule] [ literal[int] ]== literal[string] :
identifier[found] = keyword[True]
keyword[for] identifier[fi] keyword[in] identifier[model] . identifier[_meta] . identifier[fields] :
keyword[if] identifier[fi] . identifier[name] == identifier[nfrule] [ literal[int] ] keyword[and] identifier[fi] . identifier[is_relation] :
identifier[fields_related_model] . identifier[append] ( identifier[nfrule] [ literal[int] ])
keyword[if] keyword[not] identifier[self] . identifier[haystack] keyword[and] ( identifier[do_select_related] keyword[or] identifier[rule] keyword[in] identifier[self] . identifier[__foreignkeys] ):
keyword[if] literal[string] keyword[in] identifier[rule] :
identifier[query_select_related] . identifier[append] ( literal[string] . identifier[join] ( identifier[rule] . identifier[split] ( literal[string] )[ literal[int] :- literal[int] ]))
keyword[else] :
identifier[query_select_related] . identifier[append] ( identifier[rule] )
identifier[nfrule] = identifier[nfrule] [ literal[int] ]
keyword[if] identifier[nfrule] keyword[in] identifier[self] . identifier[__columns] :
keyword[if] identifier[rule] keyword[not] keyword[in] identifier[fields_related_model] :
identifier[query_verifier] . identifier[append] ( identifier[rule_org] )
keyword[if] identifier[alias] != identifier[rule] :
identifier[query_renamed] [ identifier[alias] ]= identifier[F] ( identifier[rule] )
identifier[query_optimizer] . identifier[append] ( identifier[alias] )
keyword[else] :
identifier[query_optimizer] . identifier[append] ( identifier[rule] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[if] identifier[callable] ( identifier[self] . identifier[annotations] ):
identifier[anot] = identifier[self] . identifier[annotations] ( identifier[MODELINF] )
keyword[else] :
identifier[anot] = identifier[self] . identifier[annotations]
keyword[for] identifier[xnfrule] keyword[in] identifier[anot] . identifier[keys] ():
identifier[found] = keyword[True]
keyword[if] identifier[xnfrule] keyword[not] keyword[in] identifier[query_verifier] :
identifier[query_verifier] . identifier[append] ( identifier[xnfrule] )
identifier[query_optimizer] . identifier[append] ( identifier[xnfrule] )
keyword[if] keyword[not] identifier[found] :
identifier[query_renamed] ={}
identifier[query_optimizer] =[]
identifier[query_verifier] =[]
identifier[query_select_related] =[]
keyword[break]
keyword[for] identifier[rename] keyword[in] identifier[query_renamed] . identifier[keys] ():
keyword[if] identifier[rename] keyword[in] identifier[model_properties] :
keyword[if] identifier[rename] keyword[in] identifier[self] . identifier[__foreignkeys] :
identifier[msg] = literal[string]
keyword[elif] identifier[rename] keyword[in] identifier[self] . identifier[__columns] :
identifier[msg] = literal[string]
keyword[elif] identifier[rename] keyword[in] identifier[self] . identifier[__related_objects] :
identifier[msg] = literal[string]
keyword[raise] identifier[Exception] ( identifier[msg] . identifier[format] ( identifier[rename] , identifier[self] . identifier[_modelname] , identifier[self] . identifier[_appname] ))
keyword[if] identifier[found] keyword[and] identifier[query_select_related] :
identifier[queryset] = identifier[queryset] . identifier[select_related] (* identifier[query_select_related] )
identifier[query_verifier] . identifier[sort] ()
identifier[autorules_keys] . identifier[sort] ()
keyword[if] identifier[found] keyword[and] identifier[query_verifier] == identifier[autorules_keys] :
keyword[if] identifier[query_renamed] :
identifier[queryset] = identifier[queryset] . identifier[annotate] (** identifier[query_renamed] ). identifier[values] (* identifier[query_optimizer] )
keyword[else] :
identifier[queryset] = identifier[queryset] . identifier[values] (* identifier[query_optimizer] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[queryset] = identifier[self] . identifier[custom_queryset] ( identifier[queryset] , identifier[MODELINF] )
literal[string]
keyword[if] identifier[raw_query] :
keyword[return] identifier[queryset]
keyword[else] :
identifier[total_rows_per_page] = identifier[jsondata] . identifier[get] ( literal[string] , identifier[self] . identifier[default_rows_per_page] )
identifier[pages_to_bring] = identifier[jsondata] . identifier[get] ( literal[string] , literal[int] )
keyword[if] identifier[total_rows_per_page] == literal[string] keyword[or] identifier[self] . identifier[export] :
identifier[total_rows_per_page] = identifier[queryset] . identifier[count] ()
identifier[paginator] = identifier[Paginator] ( identifier[queryset] , identifier[total_rows_per_page] )
identifier[total_registers] = identifier[paginator] . identifier[count]
keyword[if] identifier[total_rows_per_page] :
keyword[try] :
identifier[total_rows_per_page] = identifier[int] ( identifier[total_rows_per_page] )
keyword[except] identifier[Exception] :
identifier[total_rows_per_page] = literal[string]
keyword[else] :
identifier[total_rows_per_page] = identifier[self] . identifier[default_rows_per_page]
keyword[if] identifier[total_rows_per_page] == literal[string] :
identifier[page_number] = literal[int]
identifier[total_rows_per_page] = identifier[total_registers]
identifier[total_rows_per_page_out] = identifier[_] ( literal[string] )
identifier[total_pages] = literal[int]
keyword[else] :
identifier[total_rows_per_page] = identifier[int] ( identifier[total_rows_per_page] )
identifier[total_rows_per_page_out] = identifier[total_rows_per_page]
identifier[total_pages] = identifier[int] ( identifier[total_registers] / identifier[total_rows_per_page] )
keyword[if] identifier[total_registers] % identifier[total_rows_per_page] :
identifier[total_pages] += literal[int]
identifier[page_number] = identifier[jsondata] . identifier[get] ( literal[string] , literal[int] )
keyword[if] identifier[page_number] == literal[string] :
identifier[page_number] = identifier[total_pages]
keyword[else] :
keyword[try] :
identifier[page_number] = identifier[int] ( identifier[page_number] )
keyword[except] identifier[Exception] :
identifier[page_number] = literal[int]
keyword[if] identifier[page_number] < literal[int] :
identifier[page_number] = literal[int]
keyword[if] identifier[page_number] > identifier[total_pages] :
identifier[page_number] = identifier[total_pages]
identifier[choice] ={}
identifier[c] = identifier[self] . identifier[default_rows_per_page]
identifier[chk] = literal[int]
keyword[while] identifier[total_registers] >= identifier[c] :
identifier[choice] [ identifier[c] ]= identifier[c]
keyword[if] identifier[chk] == literal[int] :
identifier[c] = identifier[c] * literal[int]
identifier[chk] = literal[int]
keyword[elif] identifier[chk] == literal[int] :
identifier[c] = identifier[c] * literal[int] + identifier[int] ( identifier[c] / literal[int] )
identifier[chk] = literal[int]
keyword[elif] identifier[chk] == literal[int] :
identifier[c] *= literal[int]
identifier[chk] = literal[int]
keyword[if] identifier[c] > literal[int] :
keyword[break]
keyword[if] identifier[settings] . identifier[ALL_PAGESALLOWED] :
identifier[choice] [ literal[string] ]= identifier[_] ( literal[string] )
identifier[context] [ literal[string] ]= identifier[choice]
identifier[context] [ literal[string] ]= identifier[total_rows_per_page_out]
identifier[context] [ literal[string] ]= identifier[pages_to_bring]
identifier[context] [ literal[string] ]= identifier[page_number]
identifier[context] [ literal[string] ]= identifier[total_registers]
keyword[if] identifier[total_rows_per_page] == literal[string] :
identifier[total_rows_per_page] = keyword[None]
identifier[context] [ literal[string] ]= keyword[None]
identifier[context] [ literal[string] ]= keyword[None]
identifier[context] [ literal[string] ]= literal[int]
identifier[context] [ literal[string] ]= identifier[total_registers]
keyword[else] :
keyword[if] identifier[page_number] <= literal[int] :
identifier[context] [ literal[string] ]= keyword[None]
keyword[else] :
identifier[context] [ literal[string] ]= identifier[page_number] - literal[int]
keyword[if] identifier[page_number] >= identifier[total_pages] :
identifier[context] [ literal[string] ]= keyword[None]
keyword[else] :
identifier[context] [ literal[string] ]= identifier[page_number] + literal[int]
identifier[context] [ literal[string] ]=( identifier[page_number] - literal[int] )* identifier[total_rows_per_page] + literal[int]
identifier[context] [ literal[string] ]= identifier[total_rows_per_page]
identifier[context] [ literal[string] ]= identifier[min] ( identifier[context] [ literal[string] ]+ identifier[context] [ literal[string] ]- literal[int] , identifier[total_registers] )
identifier[regs] =[]
keyword[if] identifier[paginator] . identifier[count] :
identifier[desired_page_number] = identifier[page_number]
keyword[try] :
identifier[range_pages_to_bring] = identifier[xrange] ( identifier[pages_to_bring] )
keyword[except] identifier[NameError] :
identifier[range_pages_to_bring] = identifier[range] ( identifier[pages_to_bring] )
keyword[for] identifier[p] keyword[in] identifier[range_pages_to_bring] :
keyword[try] :
identifier[regs] += identifier[paginator] . identifier[page] ( identifier[desired_page_number] )
identifier[desired_page_number] += literal[int]
keyword[except] identifier[PageNotAnInteger] :
identifier[regs] += identifier[paginator] . identifier[page] ( literal[int] )
identifier[desired_page_number] = literal[int]
keyword[except] identifier[EmptyPage] :
keyword[if] identifier[pages_to_bring] == literal[int] :
identifier[regs] += identifier[paginator] . identifier[page] ( identifier[paginator] . identifier[num_pages] )
keyword[break]
keyword[if] identifier[total_registers] :
identifier[context] [ literal[string] ]= identifier[pages] ( identifier[paginator] , identifier[page_number] )
keyword[try] :
identifier[range_fill] = identifier[xrange] ( identifier[pages_to_bring] - literal[int] )
keyword[except] identifier[NameError] :
identifier[range_fill] = identifier[range] ( identifier[pages_to_bring] - literal[int] )
keyword[for] identifier[p] keyword[in] identifier[range_fill] :
identifier[page_number] += literal[int]
identifier[context] [ literal[string] ]+= identifier[pages] ( identifier[paginator] , identifier[page_number] )
keyword[else] :
identifier[context] [ literal[string] ]=[]
keyword[return] identifier[regs] | def get_queryset(self, raw_query=False):
# Call the base implementation
if not self.haystack:
queryset = super(GenList, self).get_queryset() # depends on [control=['if'], data=[]]
else:
queryset = SearchQuerySet().models(self.model)
# Optional tweak methods
Mfields = None
MlimitQ = None
MsearchF = None
MsearchQ = None
if hasattr(self, '__fields__'):
Mfields = self.__fields__ # depends on [control=['if'], data=[]]
if hasattr(self, '__limitQ__'):
MlimitQ = self.__limitQ__ # depends on [control=['if'], data=[]]
if hasattr(self, '__searchF__'):
MsearchF = self.__searchF__ # depends on [control=['if'], data=[]]
if hasattr(self, '__searchQ__'):
MsearchQ = self.__searchQ__ # depends on [control=['if'], data=[]]
self._viewname = self.__module__
# Link to our context and kwargs
context = self.__context
# Update kwargs if json key is present
jsonquerytxt = self.request.GET.get('json', self.request.POST.get('json', None))
if jsonquerytxt is not None:
# Decode json
try:
jsonquery = json.loads(jsonquerytxt) # depends on [control=['try'], data=[]]
except json.JSONDecodeError as e:
raise IOError('json argument in your GET/POST parameters is not a valid JSON string') # depends on [control=['except'], data=[]]
# Set json context
jsondata = self.set_context_json(jsonquery)
# Get listid
listid = jsondata.pop('listid')
# Get elementid
elementid = jsondata.pop('elementid') # depends on [control=['if'], data=['jsonquerytxt']]
else:
listid = None
elementid = None
jsondata = {}
jsonquery = {}
# Build info for GenModel methods
MODELINF = MODELINFO(self.model, self._appname, self._modelname, self._viewname, self.request, self.user, self.profile, jsonquery, Mfields, MlimitQ, MsearchF, MsearchQ, listid, elementid, self.__kwargs)
# Process the filter
context['filters'] = []
context['filters_obj'] = {}
# Get field list
fields = getattr(self, 'fields', MODELINF.fields())
# Save GET values
context['get'] = []
context['getval'] = {}
for name in jsondata:
struct = {}
struct['name'] = name
if name == 'rowsperpage':
struct['value'] = self.default_rows_per_page # depends on [control=['if'], data=[]]
elif name == 'page':
struct['value'] = 1 # depends on [control=['if'], data=[]]
elif name == 'pages_to_bring':
struct['value'] = 1 # depends on [control=['if'], data=[]]
else:
struct['value'] = jsondata[name]
context['get'].append(struct)
context['getval'][name] = struct['value'] # depends on [control=['for'], data=['name']]
# Filter on limits
limits = MODELINF.limitQ()
qobjects = None
distinct = False
for name in limits:
if name == 'i_distinct' or name == 'e_distinct':
distinct = True # depends on [control=['if'], data=[]]
elif qobjects:
qobjects &= limits[name] # depends on [control=['if'], data=[]]
else:
qobjects = limits[name] # depends on [control=['for'], data=['name']]
if qobjects:
queryset = queryset.filter(qobjects) # depends on [control=['if'], data=[]]
if hasattr(self, 'annotations'):
if not self.haystack:
# Prepare annotations
if callable(self.annotations):
anot = self.annotations(MODELINF) # depends on [control=['if'], data=[]]
else:
anot = self.annotations
# Set annotations
queryset = queryset.annotate(**anot) # depends on [control=['if'], data=[]]
else:
raise IOError("Haystack doesn't support annotate") # depends on [control=['if'], data=[]]
if distinct:
queryset = queryset.distinct() # depends on [control=['if'], data=[]]
# Filters on fields requested by the user request
try:
filters_get = jsondata.get('filters', '{}')
if type(filters_get) == dict:
filters_by_struct = filters_get # depends on [control=['if'], data=[]]
else:
filters_by_struct = json.loads(str(filters_get)) # depends on [control=['try'], data=[]]
except Exception:
filters_by_struct = [] # depends on [control=['except'], data=[]]
listfilters = {}
# Autofilter system
if self.autofiltering:
listfilters.update(self.autoSearchF(MODELINF)) # depends on [control=['if'], data=[]]
# List of filters from the MODELINF
listfilters.update(MODELINF.searchF())
# Process the search
filters_struct = {}
for key in filters_by_struct:
# Get the value of the original filter
value = filters_by_struct[key]
# If there is something to filter, filter is not being changed and filter is known by the class
try:
value = int(value) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]]
except TypeError:
pass # depends on [control=['except'], data=[]]
# ORIG if (key in listfilters) and ((value>0) or (type(value) == list)):
# V1 if (value and type(value) == int and key in listfilters) and ((value > 0) or (type(value) == list)):
# V2 if (value and type(value) == int and key in listfilters) or ((value > 0) or (type(value) == list)):
if value and key in listfilters:
# Add the filter to the queryset
rule = listfilters[key]
# Get type
typekind = rule[2]
if type(typekind) == list:
# Compatibility: set typekind and fv in the old fassion
if type(value) == int:
fv = typekind[value - 1][0]
queryset = queryset.filter(rule[1](fv))
typekind = 'select' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif typekind == 'select':
# Get selected value from rule
if type(value) == int:
fv = rule[3][value - 1][0]
queryset = queryset.filter(rule[1](fv)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif typekind in ['multiselect', 'multidynamicselect']:
# Get selected values from rule
if type(value) in (list, tuple) and len(value):
qobjects = Q(rule[1](value[0]))
for fvt in value[1:]:
qobjects |= Q(rule[1](fvt)) # depends on [control=['for'], data=['fvt']]
queryset = queryset.filter(qobjects) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif typekind in ['daterange', 'input']:
# No arguments
fv = value
queryset = queryset.filter(rule[1](fv)) # depends on [control=['if'], data=[]]
elif typekind in ['checkbox']:
fv = value
queryset = queryset.filter(rule[1](fv)) # depends on [control=['if'], data=[]]
else:
raise IOError("Wrong typekind '{0}' for filter '{1}'".format(typekind, key))
# Save it in the struct as a valid filter
filters_struct[key] = value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
# Rewrite filters_json updated
filters_json = json.dumps(filters_struct)
# Build the clean get for filters
get = context['get']
filters_get = []
for element in get:
if element['name'] not in ['filters']:
struct = {}
struct['name'] = element['name']
struct['value'] = element['value']
filters_get.append(struct) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['element']]
# Add filter_json
struct = {}
struct['name'] = 'filters'
struct['value'] = filters_json
filters_get.append(struct)
context['filters_get'] = filters_get
# Get the list of filters allowed by this class
filters = []
for key in listfilters:
typekind = listfilters[key][2]
if type(typekind) == list:
# Compatibility: set typekind and fv in the old fassion
choice = [_('All')]
for value in typekind:
choice.append(value[1]) # depends on [control=['for'], data=['value']]
# Decide the choosen field
if key in filters_struct.keys():
value = int(filters_struct[key]) # depends on [control=['if'], data=['key']]
else:
value = 0
typekind = 'select'
argument = choice # depends on [control=['if'], data=[]]
elif typekind == 'select':
typevalue = listfilters[key][3]
choice = [_('All')]
for value in typevalue:
choice.append(value[1]) # depends on [control=['for'], data=['value']]
# Decide the choosen field
if key in filters_struct.keys():
value = int(filters_struct[key]) # depends on [control=['if'], data=['key']]
else:
value = 0
# Set choice as the command's argument
argument = choice # depends on [control=['if'], data=[]]
elif typekind in ['multiselect', 'multidynamicselect']:
if typekind == 'multiselect':
typevalue = listfilters[key][3]
choice = []
for value in typevalue:
choice.append({'id': value[0], 'label': value[1]}) # depends on [control=['for'], data=['value']] # depends on [control=['if'], data=[]]
else:
choice = list(listfilters[key][3:])
choice[1] = reverse_lazy(choice[1], kwargs={'search': 'a'})[:-1]
# Decide the choosen field
if key in filters_struct.keys():
value = filters_struct[key] # depends on [control=['if'], data=['key']]
else:
value = []
# Set choice as the command's argument
argument = choice # depends on [control=['if'], data=['typekind']]
elif typekind in ['daterange', 'input']:
# Commands withouth arguments
argument = None
# Get the selected value
if key in filters_struct.keys():
value = filters_struct[key] # depends on [control=['if'], data=['key']]
else:
value = None # depends on [control=['if'], data=[]]
elif typekind in ['checkbox']:
# Commands withouth arguments
argument = None
# Get the selected value
if key in filters_struct.keys():
value = filters_struct[key] # depends on [control=['if'], data=['key']]
else:
value = None # depends on [control=['if'], data=[]]
else:
raise IOError("Wrong typekind '{0}' for filter '{1}'".format(typekind, key))
# Build filtertuple
filtertuple = (key, listfilters[key][0], typekind, argument, value)
# Save this filter in the corresponding list
filters.append(filtertuple) # depends on [control=['for'], data=['key']]
# Save all filters
context['filters'] = filters
# Search filter button
search_filter_button = jsondata.get('search_filter_button', None)
if search_filter_button is not None:
self.search_filter_button = search_filter_button # depends on [control=['if'], data=['search_filter_button']]
# Search text in all fields
search = jsondata.get('search', '').lower()
# Remove extra spaces
newlen = len(search)
oldlen = 0
while newlen != oldlen:
oldlen = newlen
search = search.replace(' ', ' ')
newlen = len(search) # depends on [control=['while'], data=['newlen', 'oldlen']]
if len(search) > 0 and search[0] == ' ':
search = search[1:] # depends on [control=['if'], data=[]]
if len(search) > 0 and search[-1] == ' ':
search = search[:-1] # depends on [control=['if'], data=[]]
# Save in context
context['search'] = search
datetimeQ = None
if len(search) > 0:
# Get ID
tid = None
if 'id:' in search:
tid = search.split(':')[1].split(' ')[0]
# Decide if it is what we expect
try:
tid = int(tid) # depends on [control=['try'], data=[]]
except Exception:
tid = None # depends on [control=['except'], data=[]]
# Remove the token
if tid:
search = search.replace('id:%s' % tid, '')
search = search.replace(' ', ' ') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['search']]
# Get PK
tpk = None
if 'pk:' in search:
tpk = search.split(':')[1].split(' ')[0]
# Decide if it is what we expect
try:
tpk = int(tpk) # depends on [control=['try'], data=[]]
except Exception:
tpk = None # depends on [control=['except'], data=[]]
# Remove the token
if tpk:
search = search.replace('pk:%s' % tpk, '')
search = search.replace(' ', ' ') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['search']]
# Spaces on front and behind
if len(search) > 0 and search[0] == ' ':
search = search[1:] # depends on [control=['if'], data=[]]
if len(search) > 0 and search[-1] == ' ':
search = search[:-1] # depends on [control=['if'], data=[]]
searchs = {}
# Autofilter system
if self.autofiltering:
searchs.update(self.autoSearchQ(MODELINF, search)) # depends on [control=['if'], data=[]]
# Fields to search in from the MODELINF
tmp_search = MODELINF.searchQ(search)
if type(tmp_search) == dict:
searchs.update(tmp_search) # depends on [control=['if'], data=[]]
else:
searchs['autoSearchQ'] &= tmp_search
qobjects = {}
qobjectsCustom = {}
for name in searchs:
# Extract the token
qtoken = searchs[name]
if qtoken == 'datetime':
# If it is a datetime
datetimeQ = name
continue # depends on [control=['if'], data=[]]
elif type(qtoken) == str or type(qtoken) == list:
# Prepare query
if type(qtoken) == tuple:
(query, func) = qtoken # depends on [control=['if'], data=[]]
else:
def lambdax(x):
return x
func = lambdax
query = qtoken
# If it is a string
if search:
for word in search.split(' '):
# If there is a word to process
if len(word) > 0:
# Build the key for the arguments and set the word as a value for the Q search
if word[0] == '-':
# If negated request
# key="-{}".format(hashlib.md5(word[1:].encode()).hexdigest())
qdict = {'{}'.format(query): func(word[1:])}
qtokens_element = ~Q(**qdict) # depends on [control=['if'], data=[]]
else:
# If positive request
# key="-{}".format(hashlib.md5(word[1:].encode()).hexdigest())
qdict = {'{}'.format(query): func(word)}
qtokens_element = Q(**qdict)
# Safe the token
if word in qobjects:
qobjects[word].append(qtokens_element) # depends on [control=['if'], data=['word', 'qobjects']]
else:
qobjects[word] = [qtokens_element] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif qobjectsCustom:
qobjectsCustom |= searchs[name] # depends on [control=['if'], data=[]]
else:
qobjectsCustom = searchs[name] # depends on [control=['for'], data=['name']]
# Build positive/negative
qdata = None
if search and qobjects:
for word in search.split(' '):
if word.split(':')[0] not in ['id', 'pk']:
if word[0] == '-':
negative = True # depends on [control=['if'], data=[]]
else:
negative = False
qword = None
for token in qobjects[word]:
if qword:
if negative:
qword &= token # depends on [control=['if'], data=[]]
else:
qword |= token # depends on [control=['if'], data=[]]
else:
qword = token # depends on [control=['for'], data=['token']]
if qword:
if qdata:
qdata &= qword # depends on [control=['if'], data=[]]
else:
qdata = qword # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']] # depends on [control=['if'], data=[]]
# Process ID/PK specific searches
if tid:
queryset = queryset.filter(id=tid) # depends on [control=['if'], data=[]]
if tpk:
queryset = queryset.filter(pk=tpk) # depends on [control=['if'], data=[]]
# Add custom Q-objects
if qobjectsCustom:
queryset = queryset.filter(qobjectsCustom) # depends on [control=['if'], data=[]]
# Add word by word search Q-objects
if qdata:
queryset = queryset.filter(qdata) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# Look for datetimeQ field
searchs = MODELINF.searchQ(search)
for name in searchs:
if searchs[name] == 'datetime':
datetimeQ = name
continue # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
# Datetime Q
context['datetimeQ'] = datetimeQ
if datetimeQ:
# Inicialization
f = {}
f['year'] = (1900, 2100, False)
f['month'] = (1, 12, False)
f['day'] = (1, 31, False)
f['hour'] = (0, 23, False)
f['minute'] = (0, 59, False)
f['second'] = (0, 59, False)
date_elements = [None, 'year', 'month', 'day', 'hour', 'minute', 'second']
# Get configuration of dates and set limits to the queryset
for element in date_elements[1:]:
value = jsondata.get(element, None)
if value:
f[element] = (int(value), int(value), True) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['element']]
if f['year'][2] and f['month'][2] and (not f['day'][2]):
(g, lastday) = calendar.monthrange(f['year'][1], f['month'][1])
f['day'] = (f['day'][0], lastday, f['day'][2]) # depends on [control=['if'], data=[]]
# Limits
date_min = datetime.datetime(f['year'][0], f['month'][0], f['day'][0], f['hour'][0], f['minute'][0], f['second'][0])
date_max = datetime.datetime(f['year'][1], f['month'][1], f['day'][1], f['hour'][1], f['minute'][1], f['second'][1])
qarg1 = {'{}__gte'.format(datetimeQ): date_min}
qarg2 = {'{}__lte'.format(datetimeQ): date_max}
qarg3 = {datetimeQ: None}
queryset = queryset.filter(Q(**qarg1) & Q(**qarg2) | Q(**qarg3))
# Find actual deepness
deepness_index = 0
for element in date_elements[1:]:
if f[element][2]:
deepness_index += 1 # depends on [control=['if'], data=[]]
else:
break # depends on [control=['for'], data=['element']]
# Get results from dates to set the new order
exclusion = {}
exclusion[datetimeQ] = None
date_results = queryset.exclude(**exclusion).values_list(datetimeQ, flat=True)
# Remove empty results (usefull when the date is allowed to be empty)
if f['day'][0] != f['day'][1]:
if f['month'][0] == f['month'][1]:
date_results = date_results.datetimes(datetimeQ, 'day') # depends on [control=['if'], data=[]]
elif f['year'][0] == f['year'][1]:
date_results = date_results.datetimes(datetimeQ, 'month') # depends on [control=['if'], data=[]]
else:
date_results = date_results.datetimes(datetimeQ, 'year') # depends on [control=['if'], data=[]]
get = context['get']
context['datefilter'] = {}
# Save the deepness
if deepness_index + 1 == len(date_elements):
context['datefilter']['deepness'] = None # depends on [control=['if'], data=[]]
else:
context['datefilter']['deepness'] = date_elements[deepness_index + 1]
context['datefilter']['deepnessback'] = []
context['datefilter']['deepnessinit'] = []
for element in get:
if not element['name'] in date_elements:
struct = {}
struct['name'] = element['name']
struct['value'] = element['value']
context['datefilter']['deepnessinit'].append(struct)
context['datefilter']['deepnessback'].append(struct) # depends on [control=['if'], data=[]]
elif element['name'] != date_elements[deepness_index] and f[element['name']][2]:
struct = {}
struct['name'] = element['name']
struct['value'] = element['value']
context['datefilter']['deepnessback'].append(struct) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['element']]
# Build the list of elements
context['datefilter']['data'] = []
for element in date_results:
# Save the data
context['datefilter']['data'].append(element.timetuple()[deepness_index]) # depends on [control=['for'], data=['element']]
context['datefilter']['data'] = list(set(context['datefilter']['data']))
context['datefilter']['data'].sort()
# Prepare the rightnow result
if self.json_worker:
rightnow = {}
for key in ['year', 'month', 'day', 'hour', 'minute', 'second']:
rightnow[key] = f[key][2] and f[key][0] or None # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
else:
if f['month'][2]:
month = monthname(f['month'][0]) # depends on [control=['if'], data=[]]
else:
month = '__'
if f['hour'][2]:
rightnow = string_concat(grv(f, 'day'), '/', month, '/', grv(f, 'year'), ' ', grv(f, 'hour'), ':', grv(f, 'minute'), ':', grv(f, 'second')) # depends on [control=['if'], data=[]]
else:
rightnow = string_concat(grv(f, 'day'), '/', month, '/', grv(f, 'year'))
context['datefilter']['rightnow'] = rightnow # depends on [control=['if'], data=[]]
else:
context['datefilter'] = None
# Distinct
# queryset=queryset.distinct()
# Ordering field autofill
try:
order_get = jsondata.get('ordering', [])
if type(order_get) == list:
order_by_struct = order_get # depends on [control=['if'], data=[]]
else:
order_by_struct = json.loads(str(order_get)) # depends on [control=['try'], data=[]]
except Exception:
order_by_struct = [] # depends on [control=['except'], data=[]]
order_by = []
position = {}
counter = 1
# Build the columns structure and the fields list
context['columns'] = []
self.__fields = []
for value in fields:
self.__fields.append(value[0]) # depends on [control=['for'], data=['value']]
# Auto build rules
self.__autorules = self.autorules()
for order in order_by_struct:
name = list(order.keys())[0]
lbl = None
# use __autofields for ordering by alias
for field in self.__autorules:
if '{}:'.format(name) in field:
name = field.split(':')[0]
lbl = field.split(':')[1]
break # depends on [control=['if'], data=['field']] # depends on [control=['for'], data=['field']]
direction = order[name]
if lbl and (not lbl.startswith('get_')) and (not lbl.endswith('_display')):
name = lbl # depends on [control=['if'], data=[]]
if direction == 'asc':
order_by.append('%s' % remove_getdisplay(name)) # depends on [control=['if'], data=[]]
elif direction == 'desc':
order_by.append('-%s' % remove_getdisplay(name)) # depends on [control=['if'], data=[]]
position[name] = counter
counter += 1 # depends on [control=['for'], data=['order']]
if order_by:
queryset = queryset.order_by(*order_by) # depends on [control=['if'], data=[]]
elif hasattr(self, 'default_ordering'):
if type(self.default_ordering) == list:
queryset = queryset.order_by(*self.default_ordering) # depends on [control=['if'], data=[]]
else:
queryset = queryset.order_by(self.default_ordering) # depends on [control=['if'], data=[]]
else:
queryset = queryset.order_by('pk')
# Ordering field autofill
sort = {}
for value in fields:
# Get values
if value[0]:
name = value[0].split(':')[0]
order_key = name
type_field = self.get_type_field(value[0].split(':')[-1]) # depends on [control=['if'], data=[]]
else:
name = value[0]
# not usable fields, example: fields.append((None, _('Selector'))) in airportslist
hash_key = hashlib.md5(value[1].encode()).hexdigest()
order_key = '#{}'.format(hash_key)
type_field = None
publicname = value[1]
if len(value) > 2:
size = value[2] # depends on [control=['if'], data=[]]
else:
size = None
if len(value) > 3:
align = value[3] # depends on [control=['if'], data=[]]
else:
align = None
# filter column
if len(value) > 4:
filter_column = value[4] # depends on [control=['if'], data=[]]
else:
filter_column = None
# Process ordering
ordering = []
found = False
for order in order_by_struct:
subname = list(order.keys())[0]
direction = order[subname]
if order_key == subname:
if direction == 'desc':
direction = ''
sort_class = 'headerSortUp' # depends on [control=['if'], data=['direction']]
elif direction == 'asc':
direction = 'desc'
sort_class = 'headerSortDown' # depends on [control=['if'], data=['direction']]
else:
sort_class = ''
direction = 'asc'
found = True # depends on [control=['if'], data=[]]
if direction == 'asc' or direction == 'desc':
ordering.append({subname: direction}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['order']]
if not found:
ordering.append({order_key: 'asc'})
sort_class = '' # depends on [control=['if'], data=[]]
# Save the ordering method
sort[order_key] = {}
sort[order_key]['id'] = name
sort[order_key]['name'] = publicname
sort[order_key]['align'] = align
sort[order_key]['type'] = type_field
if filter_column:
sort[order_key]['filter'] = filter_column # depends on [control=['if'], data=[]]
if jsonquery is None:
sort[order_key]['size'] = size
sort[order_key]['class'] = sort_class
if order_key and order_key[0] != '*':
sort[order_key]['ordering'] = json.dumps(ordering).replace('"', '\\"') # depends on [control=['if'], data=[]]
if order_key in position:
sort[order_key]['position'] = position[order_key] # depends on [control=['if'], data=['order_key', 'position']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['value']]
# Save ordering in the context
if jsonquery is not None:
context['ordering'] = order_by_struct # depends on [control=['if'], data=[]]
# Build the columns structure and the fields list
context['columns'] = []
for value in fields:
field = value[0]
if field:
context['columns'].append(sort[field.split(':')[0]]) # depends on [control=['if'], data=[]]
else:
hash_key = hashlib.md5(value[1].encode()).hexdigest()
field = '#{}'.format(hash_key)
# selector
context['columns'].append(sort[field]) # depends on [control=['for'], data=['value']]
# Auto build rules
# self.__autorules = self.autorules()
# Columns
self.__columns = ['pk']
# self.__columns = ['id']
self.__foreignkeys = []
for column in self.model._meta.fields:
self.__columns.append(column.name)
if column.is_relation:
self.__foreignkeys.append(column.name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['column']]
# Localfields
self.__related_objects = []
for f in self.model._meta.related_objects:
self.__related_objects.append(f.name) # depends on [control=['for'], data=['f']]
# Model properties
model_properties = self.__columns + self.__related_objects
# === Queryset optimization ===
# Get autorules ordered
autorules_keys = sorted(self.__autorules.keys())
#
query_renamed = {}
query_optimizer = []
query_verifier = []
query_select_related = []
fields_related_model = []
for rule in autorules_keys:
found = False
# name rule origin
rule_org = rule
# If rule is an alias
rulesp = rule.split(':')
if len(rulesp) == 2:
(alias, rule) = rulesp # depends on [control=['if'], data=[]]
else:
alias = rule
# If rule has a foreign key path (check first level attributes only, nfrule = no foreign rule)
nfrule = rule.split('__')
do_select_related = False
model = self.model
if len(nfrule) > 1:
ruletmp = []
field_related_model = []
for n in nfrule:
if model:
for fi in model._meta.fields:
if fi.name == n:
found = True
ruletmp.append(n)
if fi.is_relation:
model = fi.related_model
field_related_model.append(fi.name) # depends on [control=['if'], data=[]]
else:
do_select_related = True
model = None
break # depends on [control=['if'], data=['n']] # depends on [control=['for'], data=['fi']] # depends on [control=['if'], data=[]]
if not found or model is None:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']]
if field_related_model:
fields_related_model.append('__'.join(field_related_model)) # depends on [control=['if'], data=[]]
if ruletmp != nfrule:
do_select_related = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif nfrule[0] in [x.name for x in self.model._meta.fields] or nfrule[0] == 'pk':
found = True
for fi in model._meta.fields:
if fi.name == nfrule[0] and fi.is_relation:
fields_related_model.append(nfrule[0]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fi']] # depends on [control=['if'], data=[]]
if not self.haystack and (do_select_related or rule in self.__foreignkeys):
# Compatibility with Django 1.10
if '__' in rule:
query_select_related.append('__'.join(rule.split('__')[0:-1])) # depends on [control=['if'], data=['rule']]
else:
query_select_related.append(rule) # depends on [control=['if'], data=[]]
nfrule = nfrule[0]
if nfrule in self.__columns:
############################
# dejo comentada la restriccion, si se deja y hay una FK "nunca" usaria .extra ni .value
# no la elimino del todo por si hubiera algun fallo mas adelante,
# y se tuviera que parametrizarse de algun otro modo
############################
# if nfrule not in self.__foreignkeys:
if rule not in fields_related_model:
# Save verifier name
query_verifier.append(rule_org) # depends on [control=['if'], data=[]]
# Save renamed field
if alias != rule:
query_renamed[alias] = F(rule)
query_optimizer.append(alias) # depends on [control=['if'], data=['alias', 'rule']]
else:
# Save final name
query_optimizer.append(rule) # depends on [control=['if'], data=[]]
if hasattr(self, 'annotations'):
# Prepare annotations
if callable(self.annotations):
anot = self.annotations(MODELINF) # depends on [control=['if'], data=[]]
else:
anot = self.annotations
# Process annotations
for xnfrule in anot.keys():
found = True
if xnfrule not in query_verifier:
query_verifier.append(xnfrule)
query_optimizer.append(xnfrule) # depends on [control=['if'], data=['xnfrule', 'query_verifier']] # depends on [control=['for'], data=['xnfrule']] # depends on [control=['if'], data=[]]
if not found:
query_renamed = {}
query_optimizer = []
query_verifier = []
query_select_related = []
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rule']]
for rename in query_renamed.keys():
if rename in model_properties:
if rename in self.__foreignkeys:
msg = "Invalid alias. The alias '{}' is a foreign key from model '{}' inside app '{}'" # depends on [control=['if'], data=[]]
elif rename in self.__columns:
msg = "Invalid alias. The alias '{}' is a columns from model '{}' inside app '{}'" # depends on [control=['if'], data=[]]
elif rename in self.__related_objects:
msg = "Invalid alias. The alias '{}' is a related object from model '{}' inside app '{}'" # depends on [control=['if'], data=[]]
raise Exception(msg.format(rename, self._modelname, self._appname)) # depends on [control=['if'], data=['rename']] # depends on [control=['for'], data=['rename']]
if found and query_select_related:
queryset = queryset.select_related(*query_select_related) # depends on [control=['if'], data=[]]
# If we got the query_optimizer to optimize everything, use it
# use_extra = False
query_verifier.sort()
autorules_keys.sort()
if found and query_verifier == autorules_keys:
# use_extra = True
if query_renamed:
# queryset=queryset.extra(select=query_renamed).values(*query_optimizer)
queryset = queryset.annotate(**query_renamed).values(*query_optimizer) # depends on [control=['if'], data=[]]
else:
queryset = queryset.values(*query_optimizer) # depends on [control=['if'], data=[]]
# Custom queryset
if hasattr(self, 'custom_queryset'):
queryset = self.custom_queryset(queryset, MODELINF) # depends on [control=['if'], data=[]]
# Internal Codenerix DEBUG for Querysets
'\n raise Exception("FOUND: {} -- __foreignkeys: {} -- __columns: {} -- autorules_keys: {} -- query_select_related: {} -- query_renamed: {} -- query_optimizer: {} | use_extra: {}| -- query: {} -- meta.fields: {} -- fields_related_model: {} -- query_verifier: {} -- ??? {} == {}".format(\n found,\n self.__foreignkeys, self.__columns, autorules_keys,\n query_select_related, query_renamed, query_optimizer,use_extra,\n queryset.query,\n [x.name for x in self.model._meta.fields],\n fields_related_model, query_verifier,\n query_verifier.sort(),autorules_keys.sort()\n ))\n #'
# Check if the user requested to return a raw queryset
if raw_query:
return queryset # depends on [control=['if'], data=[]]
else:
# Check the total count of registers + rows per page
total_rows_per_page = jsondata.get('rowsperpage', self.default_rows_per_page)
pages_to_bring = jsondata.get('pages_to_bring', 1)
if total_rows_per_page == 'All' or self.export:
total_rows_per_page = queryset.count() # depends on [control=['if'], data=[]]
paginator = Paginator(queryset, total_rows_per_page)
total_registers = paginator.count
# Rows per page
if total_rows_per_page:
try:
total_rows_per_page = int(total_rows_per_page) # depends on [control=['try'], data=[]]
except Exception:
total_rows_per_page = 'All' # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
total_rows_per_page = self.default_rows_per_page
if total_rows_per_page == 'All':
page_number = 1
total_rows_per_page = total_registers
total_rows_per_page_out = _('All')
total_pages = 1 # depends on [control=['if'], data=['total_rows_per_page']]
else:
total_rows_per_page = int(total_rows_per_page) # By default 10 rows per page
total_rows_per_page_out = total_rows_per_page
total_pages = int(total_registers / total_rows_per_page)
if total_registers % total_rows_per_page:
total_pages += 1 # depends on [control=['if'], data=[]]
page_number = jsondata.get('page', 1) # If no page specified use first page
if page_number == 'last':
page_number = total_pages # depends on [control=['if'], data=['page_number']]
else:
try:
page_number = int(page_number) # depends on [control=['try'], data=[]]
except Exception:
page_number = 1 # depends on [control=['except'], data=[]]
if page_number < 1:
page_number = 1 # depends on [control=['if'], data=['page_number']]
if page_number > total_pages:
page_number = total_pages # depends on [control=['if'], data=['page_number', 'total_pages']]
# Build the list of page counters allowed
choice = {}
c = self.default_rows_per_page
chk = 1
while total_registers >= c:
choice[c] = c
if chk == 1:
# From 5 to 10
c = c * 2
# Next level
chk = 2 # depends on [control=['if'], data=['chk']]
elif chk == 2:
# From 10 to 25 (10*2+10/2)
c = c * 2 + int(c / 2)
# Next level
chk = 3 # depends on [control=['if'], data=['chk']]
elif chk == 3:
# From 25 to 50
c *= 2
chk = 1 # depends on [control=['if'], data=['chk']]
# Don't give a too long choice
if c > 2000:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['c']]
# Add all choice in any case
if settings.ALL_PAGESALLOWED:
choice['All'] = _('All') # depends on [control=['if'], data=[]]
# Save the pagination in the structure
context['rowsperpageallowed'] = choice
context['rowsperpage'] = total_rows_per_page_out
context['pages_to_bring'] = pages_to_bring
context['pagenumber'] = page_number
# Get the full number of registers and save it to context
context['total_registers'] = total_registers
if total_rows_per_page == 'All':
# Remove total_rows_per_page if is all
total_rows_per_page = None
context['page_before'] = None
context['page_after'] = None
context['start_register'] = 1
context['showing_registers'] = total_registers # depends on [control=['if'], data=['total_rows_per_page']]
else:
# Page before
if page_number <= 1:
context['page_before'] = None # depends on [control=['if'], data=[]]
else:
context['page_before'] = page_number - 1
# Page after
if page_number >= total_pages:
context['page_after'] = None # depends on [control=['if'], data=[]]
else:
context['page_after'] = page_number + 1
# Starting on register number
context['start_register'] = (page_number - 1) * total_rows_per_page + 1
context['showing_registers'] = total_rows_per_page
# Calculate end
context['end_register'] = min(context['start_register'] + context['showing_registers'] - 1, total_registers)
# Add pagination
regs = []
if paginator.count:
desired_page_number = page_number
try:
range_pages_to_bring = xrange(pages_to_bring) # depends on [control=['try'], data=[]]
except NameError:
range_pages_to_bring = range(pages_to_bring) # depends on [control=['except'], data=[]]
for p in range_pages_to_bring:
try:
regs += paginator.page(desired_page_number)
desired_page_number += 1 # depends on [control=['try'], data=[]]
except PageNotAnInteger:
# If page is not an integer, deliver first page.
regs += paginator.page(1)
desired_page_number = 2 # depends on [control=['except'], data=[]]
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
if pages_to_bring == 1:
regs += paginator.page(paginator.num_pages) # depends on [control=['if'], data=[]]
# Leave bucle
break # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# Fill pages
if total_registers:
context['pages'] = pages(paginator, page_number)
try:
range_fill = xrange(pages_to_bring - 1) # depends on [control=['try'], data=[]]
except NameError:
range_fill = range(pages_to_bring - 1) # depends on [control=['except'], data=[]]
for p in range_fill:
page_number += 1
context['pages'] += pages(paginator, page_number) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
context['pages'] = []
# Return queryset
return regs |
def find_serial_devices(serial_matcher="ED"):
"""
Finds a list of USB devices where the serial number (partially) matches the given string.
:param str serial_matcher (optional):
only device IDs starting with this string are returned
:rtype: List[str]
"""
objWMIService = win32com.client.Dispatch("WbemScripting.SWbemLocator")
objSWbemServices = objWMIService.ConnectServer(".", "root\cimv2")
items = objSWbemServices.ExecQuery("SELECT * FROM Win32_USBControllerDevice")
ids = (item.Dependent.strip('"')[-8:] for item in items)
return [e for e in ids if e.startswith(serial_matcher)] | def function[find_serial_devices, parameter[serial_matcher]]:
constant[
Finds a list of USB devices where the serial number (partially) matches the given string.
:param str serial_matcher (optional):
only device IDs starting with this string are returned
:rtype: List[str]
]
variable[objWMIService] assign[=] call[name[win32com].client.Dispatch, parameter[constant[WbemScripting.SWbemLocator]]]
variable[objSWbemServices] assign[=] call[name[objWMIService].ConnectServer, parameter[constant[.], constant[root\cimv2]]]
variable[items] assign[=] call[name[objSWbemServices].ExecQuery, parameter[constant[SELECT * FROM Win32_USBControllerDevice]]]
variable[ids] assign[=] <ast.GeneratorExp object at 0x7da1b1bc0e50>
return[<ast.ListComp object at 0x7da1b1bc3a60>] | keyword[def] identifier[find_serial_devices] ( identifier[serial_matcher] = literal[string] ):
literal[string]
identifier[objWMIService] = identifier[win32com] . identifier[client] . identifier[Dispatch] ( literal[string] )
identifier[objSWbemServices] = identifier[objWMIService] . identifier[ConnectServer] ( literal[string] , literal[string] )
identifier[items] = identifier[objSWbemServices] . identifier[ExecQuery] ( literal[string] )
identifier[ids] =( identifier[item] . identifier[Dependent] . identifier[strip] ( literal[string] )[- literal[int] :] keyword[for] identifier[item] keyword[in] identifier[items] )
keyword[return] [ identifier[e] keyword[for] identifier[e] keyword[in] identifier[ids] keyword[if] identifier[e] . identifier[startswith] ( identifier[serial_matcher] )] | def find_serial_devices(serial_matcher='ED'):
"""
Finds a list of USB devices where the serial number (partially) matches the given string.
:param str serial_matcher (optional):
only device IDs starting with this string are returned
:rtype: List[str]
"""
objWMIService = win32com.client.Dispatch('WbemScripting.SWbemLocator')
objSWbemServices = objWMIService.ConnectServer('.', 'root\\cimv2')
items = objSWbemServices.ExecQuery('SELECT * FROM Win32_USBControllerDevice')
ids = (item.Dependent.strip('"')[-8:] for item in items)
return [e for e in ids if e.startswith(serial_matcher)] |
def increase_verbosity():
"""
Increase the verbosity of the root handler by one defined level.
Understands custom logging levels like defined by my ``verboselogs``
module.
"""
defined_levels = sorted(set(find_defined_levels().values()))
current_index = defined_levels.index(get_level())
selected_index = max(0, current_index - 1)
set_level(defined_levels[selected_index]) | def function[increase_verbosity, parameter[]]:
constant[
Increase the verbosity of the root handler by one defined level.
Understands custom logging levels like defined by my ``verboselogs``
module.
]
variable[defined_levels] assign[=] call[name[sorted], parameter[call[name[set], parameter[call[call[name[find_defined_levels], parameter[]].values, parameter[]]]]]]
variable[current_index] assign[=] call[name[defined_levels].index, parameter[call[name[get_level], parameter[]]]]
variable[selected_index] assign[=] call[name[max], parameter[constant[0], binary_operation[name[current_index] - constant[1]]]]
call[name[set_level], parameter[call[name[defined_levels]][name[selected_index]]]] | keyword[def] identifier[increase_verbosity] ():
literal[string]
identifier[defined_levels] = identifier[sorted] ( identifier[set] ( identifier[find_defined_levels] (). identifier[values] ()))
identifier[current_index] = identifier[defined_levels] . identifier[index] ( identifier[get_level] ())
identifier[selected_index] = identifier[max] ( literal[int] , identifier[current_index] - literal[int] )
identifier[set_level] ( identifier[defined_levels] [ identifier[selected_index] ]) | def increase_verbosity():
"""
Increase the verbosity of the root handler by one defined level.
Understands custom logging levels like defined by my ``verboselogs``
module.
"""
defined_levels = sorted(set(find_defined_levels().values()))
current_index = defined_levels.index(get_level())
selected_index = max(0, current_index - 1)
set_level(defined_levels[selected_index]) |
def go_through_dict(key, d, setdefault=None):
"""
Split up the `key` by . and get the value from the base dictionary `d`
Parameters
----------
key: str
The key in the `config` configuration. %(get_value_note)s
d: dict
The configuration dictionary containing the key
setdefault: callable
If not None and an item is not existent in `d`, it is created by
calling the given function
Returns
-------
str
The last level of the key
dict
The dictionary in `d` that contains the last level of the key
"""
patt = re.compile(r'(?<!\\)\.')
sub_d = d
splitted = patt.split(key)
n = len(splitted)
for i, k in enumerate(splitted):
if i < n - 1:
if setdefault is not None:
sub_d = sub_d.setdefault(k, setdefault())
else:
sub_d = sub_d[k]
else:
return k, sub_d | def function[go_through_dict, parameter[key, d, setdefault]]:
constant[
Split up the `key` by . and get the value from the base dictionary `d`
Parameters
----------
key: str
The key in the `config` configuration. %(get_value_note)s
d: dict
The configuration dictionary containing the key
setdefault: callable
If not None and an item is not existent in `d`, it is created by
calling the given function
Returns
-------
str
The last level of the key
dict
The dictionary in `d` that contains the last level of the key
]
variable[patt] assign[=] call[name[re].compile, parameter[constant[(?<!\\)\.]]]
variable[sub_d] assign[=] name[d]
variable[splitted] assign[=] call[name[patt].split, parameter[name[key]]]
variable[n] assign[=] call[name[len], parameter[name[splitted]]]
for taget[tuple[[<ast.Name object at 0x7da2054a7f70>, <ast.Name object at 0x7da2054a49d0>]]] in starred[call[name[enumerate], parameter[name[splitted]]]] begin[:]
if compare[name[i] less[<] binary_operation[name[n] - constant[1]]] begin[:]
if compare[name[setdefault] is_not constant[None]] begin[:]
variable[sub_d] assign[=] call[name[sub_d].setdefault, parameter[name[k], call[name[setdefault], parameter[]]]] | keyword[def] identifier[go_through_dict] ( identifier[key] , identifier[d] , identifier[setdefault] = keyword[None] ):
literal[string]
identifier[patt] = identifier[re] . identifier[compile] ( literal[string] )
identifier[sub_d] = identifier[d]
identifier[splitted] = identifier[patt] . identifier[split] ( identifier[key] )
identifier[n] = identifier[len] ( identifier[splitted] )
keyword[for] identifier[i] , identifier[k] keyword[in] identifier[enumerate] ( identifier[splitted] ):
keyword[if] identifier[i] < identifier[n] - literal[int] :
keyword[if] identifier[setdefault] keyword[is] keyword[not] keyword[None] :
identifier[sub_d] = identifier[sub_d] . identifier[setdefault] ( identifier[k] , identifier[setdefault] ())
keyword[else] :
identifier[sub_d] = identifier[sub_d] [ identifier[k] ]
keyword[else] :
keyword[return] identifier[k] , identifier[sub_d] | def go_through_dict(key, d, setdefault=None):
"""
Split up the `key` by . and get the value from the base dictionary `d`
Parameters
----------
key: str
The key in the `config` configuration. %(get_value_note)s
d: dict
The configuration dictionary containing the key
setdefault: callable
If not None and an item is not existent in `d`, it is created by
calling the given function
Returns
-------
str
The last level of the key
dict
The dictionary in `d` that contains the last level of the key
"""
patt = re.compile('(?<!\\\\)\\.')
sub_d = d
splitted = patt.split(key)
n = len(splitted)
for (i, k) in enumerate(splitted):
if i < n - 1:
if setdefault is not None:
sub_d = sub_d.setdefault(k, setdefault()) # depends on [control=['if'], data=['setdefault']]
else:
sub_d = sub_d[k] # depends on [control=['if'], data=[]]
else:
return (k, sub_d) # depends on [control=['for'], data=[]] |
def hydrate_time(nanoseconds, tz=None):
""" Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
"""
seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
seconds = (1000000000 * seconds + nanoseconds) / 1000000000
t = Time(hours, minutes, seconds)
if tz is None:
return t
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
return zone.localize(t) | def function[hydrate_time, parameter[nanoseconds, tz]]:
constant[ Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
]
<ast.Tuple object at 0x7da18f09d5a0> assign[=] call[name[map], parameter[name[int], call[name[divmod], parameter[name[nanoseconds], constant[1000000000]]]]]
<ast.Tuple object at 0x7da1b16d1750> assign[=] call[name[map], parameter[name[int], call[name[divmod], parameter[name[seconds], constant[60]]]]]
<ast.Tuple object at 0x7da20c7962f0> assign[=] call[name[map], parameter[name[int], call[name[divmod], parameter[name[minutes], constant[60]]]]]
variable[seconds] assign[=] binary_operation[binary_operation[binary_operation[constant[1000000000] * name[seconds]] + name[nanoseconds]] / constant[1000000000]]
variable[t] assign[=] call[name[Time], parameter[name[hours], name[minutes], name[seconds]]]
if compare[name[tz] is constant[None]] begin[:]
return[name[t]]
<ast.Tuple object at 0x7da20c7944c0> assign[=] call[name[divmod], parameter[name[tz], constant[60]]]
variable[zone] assign[=] call[name[FixedOffset], parameter[name[tz_offset_minutes]]]
return[call[name[zone].localize, parameter[name[t]]]] | keyword[def] identifier[hydrate_time] ( identifier[nanoseconds] , identifier[tz] = keyword[None] ):
literal[string]
identifier[seconds] , identifier[nanoseconds] = identifier[map] ( identifier[int] , identifier[divmod] ( identifier[nanoseconds] , literal[int] ))
identifier[minutes] , identifier[seconds] = identifier[map] ( identifier[int] , identifier[divmod] ( identifier[seconds] , literal[int] ))
identifier[hours] , identifier[minutes] = identifier[map] ( identifier[int] , identifier[divmod] ( identifier[minutes] , literal[int] ))
identifier[seconds] =( literal[int] * identifier[seconds] + identifier[nanoseconds] )/ literal[int]
identifier[t] = identifier[Time] ( identifier[hours] , identifier[minutes] , identifier[seconds] )
keyword[if] identifier[tz] keyword[is] keyword[None] :
keyword[return] identifier[t]
identifier[tz_offset_minutes] , identifier[tz_offset_seconds] = identifier[divmod] ( identifier[tz] , literal[int] )
identifier[zone] = identifier[FixedOffset] ( identifier[tz_offset_minutes] )
keyword[return] identifier[zone] . identifier[localize] ( identifier[t] ) | def hydrate_time(nanoseconds, tz=None):
""" Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
"""
(seconds, nanoseconds) = map(int, divmod(nanoseconds, 1000000000))
(minutes, seconds) = map(int, divmod(seconds, 60))
(hours, minutes) = map(int, divmod(minutes, 60))
seconds = (1000000000 * seconds + nanoseconds) / 1000000000
t = Time(hours, minutes, seconds)
if tz is None:
return t # depends on [control=['if'], data=[]]
(tz_offset_minutes, tz_offset_seconds) = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
return zone.localize(t) |
def unpack_data(cls, argument_count, payload):
"""Unpack payload by splitting up the raw payload into list of locator_ids
:param argument_count: number of locator_ids in payload is equal to argument_count
:param payload: BytesIO instance with list of concatenated locator_ids, where each locator_id is 8 bytes long
"""
pl = payload.read()
locator_ids = [pl[start:start+8] for start in range(0, len(pl), 8)]
return locator_ids, | def function[unpack_data, parameter[cls, argument_count, payload]]:
constant[Unpack payload by splitting up the raw payload into list of locator_ids
:param argument_count: number of locator_ids in payload is equal to argument_count
:param payload: BytesIO instance with list of concatenated locator_ids, where each locator_id is 8 bytes long
]
variable[pl] assign[=] call[name[payload].read, parameter[]]
variable[locator_ids] assign[=] <ast.ListComp object at 0x7da20c76f370>
return[tuple[[<ast.Name object at 0x7da20c76c340>]]] | keyword[def] identifier[unpack_data] ( identifier[cls] , identifier[argument_count] , identifier[payload] ):
literal[string]
identifier[pl] = identifier[payload] . identifier[read] ()
identifier[locator_ids] =[ identifier[pl] [ identifier[start] : identifier[start] + literal[int] ] keyword[for] identifier[start] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[pl] ), literal[int] )]
keyword[return] identifier[locator_ids] , | def unpack_data(cls, argument_count, payload):
"""Unpack payload by splitting up the raw payload into list of locator_ids
:param argument_count: number of locator_ids in payload is equal to argument_count
:param payload: BytesIO instance with list of concatenated locator_ids, where each locator_id is 8 bytes long
"""
pl = payload.read()
locator_ids = [pl[start:start + 8] for start in range(0, len(pl), 8)]
return (locator_ids,) |
def map_region(self, addr, length, permissions, init_zero=False):
"""
Map a number of pages at address `addr` with permissions `permissions`.
:param addr: address to map the pages at
:param length: length in bytes of region to map, will be rounded upwards to the page size
:param permissions: AST of permissions to map, will be a bitvalue representing flags
:param init_zero: Initialize page with zeros
"""
l.info("Mapping [%#x, %#x] as %s", addr, addr + length - 1, permissions)
return self.mem.map_region(addr, length, permissions, init_zero=init_zero) | def function[map_region, parameter[self, addr, length, permissions, init_zero]]:
constant[
Map a number of pages at address `addr` with permissions `permissions`.
:param addr: address to map the pages at
:param length: length in bytes of region to map, will be rounded upwards to the page size
:param permissions: AST of permissions to map, will be a bitvalue representing flags
:param init_zero: Initialize page with zeros
]
call[name[l].info, parameter[constant[Mapping [%#x, %#x] as %s], name[addr], binary_operation[binary_operation[name[addr] + name[length]] - constant[1]], name[permissions]]]
return[call[name[self].mem.map_region, parameter[name[addr], name[length], name[permissions]]]] | keyword[def] identifier[map_region] ( identifier[self] , identifier[addr] , identifier[length] , identifier[permissions] , identifier[init_zero] = keyword[False] ):
literal[string]
identifier[l] . identifier[info] ( literal[string] , identifier[addr] , identifier[addr] + identifier[length] - literal[int] , identifier[permissions] )
keyword[return] identifier[self] . identifier[mem] . identifier[map_region] ( identifier[addr] , identifier[length] , identifier[permissions] , identifier[init_zero] = identifier[init_zero] ) | def map_region(self, addr, length, permissions, init_zero=False):
"""
Map a number of pages at address `addr` with permissions `permissions`.
:param addr: address to map the pages at
:param length: length in bytes of region to map, will be rounded upwards to the page size
:param permissions: AST of permissions to map, will be a bitvalue representing flags
:param init_zero: Initialize page with zeros
"""
l.info('Mapping [%#x, %#x] as %s', addr, addr + length - 1, permissions)
return self.mem.map_region(addr, length, permissions, init_zero=init_zero) |
def _CheckPythonVersionAndDisableWarnings(self):
"""Checks python version, and disables SSL warnings.
urllib3 will warn on each HTTPS request made by older versions of Python.
Rather than spamming the user, we print one warning message, then disable
warnings in urllib3.
"""
if self._checked_for_old_python_version:
return
if sys.version_info[0:3] < (2, 7, 9):
logger.warning(
'You are running a version of Python prior to 2.7.9. Your version '
'of Python has multiple weaknesses in its SSL implementation that '
'can allow an attacker to read or modify SSL encrypted data. '
'Please update. Further SSL warnings will be suppressed. See '
'https://www.python.org/dev/peps/pep-0466/ for more information.')
# Some distributions de-vendor urllib3 from requests, so we have to
# check if this has occurred and disable warnings in the correct
# package.
urllib3_module = urllib3
if not urllib3_module:
if hasattr(requests, 'packages'):
urllib3_module = getattr(requests.packages, 'urllib3')
if urllib3_module and hasattr(urllib3_module, 'disable_warnings'):
urllib3_module.disable_warnings()
self._checked_for_old_python_version = True | def function[_CheckPythonVersionAndDisableWarnings, parameter[self]]:
constant[Checks python version, and disables SSL warnings.
urllib3 will warn on each HTTPS request made by older versions of Python.
Rather than spamming the user, we print one warning message, then disable
warnings in urllib3.
]
if name[self]._checked_for_old_python_version begin[:]
return[None]
if compare[call[name[sys].version_info][<ast.Slice object at 0x7da20c6ab6d0>] less[<] tuple[[<ast.Constant object at 0x7da20c6a9300>, <ast.Constant object at 0x7da20c6a9210>, <ast.Constant object at 0x7da20c6aa440>]]] begin[:]
call[name[logger].warning, parameter[constant[You are running a version of Python prior to 2.7.9. Your version of Python has multiple weaknesses in its SSL implementation that can allow an attacker to read or modify SSL encrypted data. Please update. Further SSL warnings will be suppressed. See https://www.python.org/dev/peps/pep-0466/ for more information.]]]
variable[urllib3_module] assign[=] name[urllib3]
if <ast.UnaryOp object at 0x7da20c6a8a90> begin[:]
if call[name[hasattr], parameter[name[requests], constant[packages]]] begin[:]
variable[urllib3_module] assign[=] call[name[getattr], parameter[name[requests].packages, constant[urllib3]]]
if <ast.BoolOp object at 0x7da20c6a9270> begin[:]
call[name[urllib3_module].disable_warnings, parameter[]]
name[self]._checked_for_old_python_version assign[=] constant[True] | keyword[def] identifier[_CheckPythonVersionAndDisableWarnings] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_checked_for_old_python_version] :
keyword[return]
keyword[if] identifier[sys] . identifier[version_info] [ literal[int] : literal[int] ]<( literal[int] , literal[int] , literal[int] ):
identifier[logger] . identifier[warning] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[urllib3_module] = identifier[urllib3]
keyword[if] keyword[not] identifier[urllib3_module] :
keyword[if] identifier[hasattr] ( identifier[requests] , literal[string] ):
identifier[urllib3_module] = identifier[getattr] ( identifier[requests] . identifier[packages] , literal[string] )
keyword[if] identifier[urllib3_module] keyword[and] identifier[hasattr] ( identifier[urllib3_module] , literal[string] ):
identifier[urllib3_module] . identifier[disable_warnings] ()
identifier[self] . identifier[_checked_for_old_python_version] = keyword[True] | def _CheckPythonVersionAndDisableWarnings(self):
"""Checks python version, and disables SSL warnings.
urllib3 will warn on each HTTPS request made by older versions of Python.
Rather than spamming the user, we print one warning message, then disable
warnings in urllib3.
"""
if self._checked_for_old_python_version:
return # depends on [control=['if'], data=[]]
if sys.version_info[0:3] < (2, 7, 9):
logger.warning('You are running a version of Python prior to 2.7.9. Your version of Python has multiple weaknesses in its SSL implementation that can allow an attacker to read or modify SSL encrypted data. Please update. Further SSL warnings will be suppressed. See https://www.python.org/dev/peps/pep-0466/ for more information.')
# Some distributions de-vendor urllib3 from requests, so we have to
# check if this has occurred and disable warnings in the correct
# package.
urllib3_module = urllib3
if not urllib3_module:
if hasattr(requests, 'packages'):
urllib3_module = getattr(requests.packages, 'urllib3') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if urllib3_module and hasattr(urllib3_module, 'disable_warnings'):
urllib3_module.disable_warnings() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self._checked_for_old_python_version = True |
def get_working_path(self):
"""
Gets the working path. If the path starts with a ~, this will be replaced by the current user's home path.
:return:
"""
self.set_section('Files')
raw_path = self.option("working_path")
if raw_path.startswith('~'):
raw_path = os.path.expanduser('~') + raw_path[1:]
return raw_path | def function[get_working_path, parameter[self]]:
constant[
Gets the working path. If the path starts with a ~, this will be replaced by the current user's home path.
:return:
]
call[name[self].set_section, parameter[constant[Files]]]
variable[raw_path] assign[=] call[name[self].option, parameter[constant[working_path]]]
if call[name[raw_path].startswith, parameter[constant[~]]] begin[:]
variable[raw_path] assign[=] binary_operation[call[name[os].path.expanduser, parameter[constant[~]]] + call[name[raw_path]][<ast.Slice object at 0x7da18c4cd7b0>]]
return[name[raw_path]] | keyword[def] identifier[get_working_path] ( identifier[self] ):
literal[string]
identifier[self] . identifier[set_section] ( literal[string] )
identifier[raw_path] = identifier[self] . identifier[option] ( literal[string] )
keyword[if] identifier[raw_path] . identifier[startswith] ( literal[string] ):
identifier[raw_path] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )+ identifier[raw_path] [ literal[int] :]
keyword[return] identifier[raw_path] | def get_working_path(self):
"""
Gets the working path. If the path starts with a ~, this will be replaced by the current user's home path.
:return:
"""
self.set_section('Files')
raw_path = self.option('working_path')
if raw_path.startswith('~'):
raw_path = os.path.expanduser('~') + raw_path[1:] # depends on [control=['if'], data=[]]
return raw_path |
def parse_buffer(stream, separator=None):
"""
Returns a dictionary of the lines of a stream, an array of rows of the
stream (split by separator), and an array of the columns of the stream
(also split by separator)
:param stream:
:param separator:
:return: dict
"""
rows = []
lines = []
for line, row in parse_lines(stream, separator):
lines.append(line)
rows.append(row)
cols = zip(*rows)
return {
'rows': rows,
'lines': lines,
'cols': cols,
} | def function[parse_buffer, parameter[stream, separator]]:
constant[
Returns a dictionary of the lines of a stream, an array of rows of the
stream (split by separator), and an array of the columns of the stream
(also split by separator)
:param stream:
:param separator:
:return: dict
]
variable[rows] assign[=] list[[]]
variable[lines] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0bd9c60>, <ast.Name object at 0x7da1b0bd8bb0>]]] in starred[call[name[parse_lines], parameter[name[stream], name[separator]]]] begin[:]
call[name[lines].append, parameter[name[line]]]
call[name[rows].append, parameter[name[row]]]
variable[cols] assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da1b0bd9600>]]
return[dictionary[[<ast.Constant object at 0x7da1b0bdbc40>, <ast.Constant object at 0x7da1b0bdbf10>, <ast.Constant object at 0x7da1b0bda020>], [<ast.Name object at 0x7da1b0bdbb80>, <ast.Name object at 0x7da1b0bd8ac0>, <ast.Name object at 0x7da1b0bd9750>]]] | keyword[def] identifier[parse_buffer] ( identifier[stream] , identifier[separator] = keyword[None] ):
literal[string]
identifier[rows] =[]
identifier[lines] =[]
keyword[for] identifier[line] , identifier[row] keyword[in] identifier[parse_lines] ( identifier[stream] , identifier[separator] ):
identifier[lines] . identifier[append] ( identifier[line] )
identifier[rows] . identifier[append] ( identifier[row] )
identifier[cols] = identifier[zip] (* identifier[rows] )
keyword[return] {
literal[string] : identifier[rows] ,
literal[string] : identifier[lines] ,
literal[string] : identifier[cols] ,
} | def parse_buffer(stream, separator=None):
"""
Returns a dictionary of the lines of a stream, an array of rows of the
stream (split by separator), and an array of the columns of the stream
(also split by separator)
:param stream:
:param separator:
:return: dict
"""
rows = []
lines = []
for (line, row) in parse_lines(stream, separator):
lines.append(line)
rows.append(row) # depends on [control=['for'], data=[]]
cols = zip(*rows)
return {'rows': rows, 'lines': lines, 'cols': cols} |
def char_on_predicate(compiler, cont, test):
'''return current char and step if @test succeed, where
@test: a python function with one argument, which tests on one char and return True or False
@test must be registered with register_function'''
test = test.interlang()
text = compiler.new_var(il.ConstLocalVar('text'))
pos = compiler.new_var(il.ConstLocalVar('pos'))
if not isinstance(test, il.PyFunction):
raise DaoCompileTypeError(test)
return il.Begin((
il.AssignFromList(text, pos, il.parse_state),
il.If(il.Ge(pos,il.Len(text)),
il.failcont(il.FALSE),
il.If(il.Call(test, il.GetItem(text, pos)),
il.begin(
il.SetParseState(il.Tuple(text, il.add(pos, il.Integer(1)))),
il.append_failcont(compiler,
il.SetParseState(il.Tuple(text, pos))),
cont(il.GetItem(text, pos))),
il.failcont(il.FALSE))))) | def function[char_on_predicate, parameter[compiler, cont, test]]:
constant[return current char and step if @test succeed, where
@test: a python function with one argument, which tests on one char and return True or False
@test must be registered with register_function]
variable[test] assign[=] call[name[test].interlang, parameter[]]
variable[text] assign[=] call[name[compiler].new_var, parameter[call[name[il].ConstLocalVar, parameter[constant[text]]]]]
variable[pos] assign[=] call[name[compiler].new_var, parameter[call[name[il].ConstLocalVar, parameter[constant[pos]]]]]
if <ast.UnaryOp object at 0x7da18c4ce350> begin[:]
<ast.Raise object at 0x7da18c4ce410>
return[call[name[il].Begin, parameter[tuple[[<ast.Call object at 0x7da18c4cd150>, <ast.Call object at 0x7da18c4cffa0>]]]]] | keyword[def] identifier[char_on_predicate] ( identifier[compiler] , identifier[cont] , identifier[test] ):
literal[string]
identifier[test] = identifier[test] . identifier[interlang] ()
identifier[text] = identifier[compiler] . identifier[new_var] ( identifier[il] . identifier[ConstLocalVar] ( literal[string] ))
identifier[pos] = identifier[compiler] . identifier[new_var] ( identifier[il] . identifier[ConstLocalVar] ( literal[string] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[test] , identifier[il] . identifier[PyFunction] ):
keyword[raise] identifier[DaoCompileTypeError] ( identifier[test] )
keyword[return] identifier[il] . identifier[Begin] ((
identifier[il] . identifier[AssignFromList] ( identifier[text] , identifier[pos] , identifier[il] . identifier[parse_state] ),
identifier[il] . identifier[If] ( identifier[il] . identifier[Ge] ( identifier[pos] , identifier[il] . identifier[Len] ( identifier[text] )),
identifier[il] . identifier[failcont] ( identifier[il] . identifier[FALSE] ),
identifier[il] . identifier[If] ( identifier[il] . identifier[Call] ( identifier[test] , identifier[il] . identifier[GetItem] ( identifier[text] , identifier[pos] )),
identifier[il] . identifier[begin] (
identifier[il] . identifier[SetParseState] ( identifier[il] . identifier[Tuple] ( identifier[text] , identifier[il] . identifier[add] ( identifier[pos] , identifier[il] . identifier[Integer] ( literal[int] )))),
identifier[il] . identifier[append_failcont] ( identifier[compiler] ,
identifier[il] . identifier[SetParseState] ( identifier[il] . identifier[Tuple] ( identifier[text] , identifier[pos] ))),
identifier[cont] ( identifier[il] . identifier[GetItem] ( identifier[text] , identifier[pos] ))),
identifier[il] . identifier[failcont] ( identifier[il] . identifier[FALSE] ))))) | def char_on_predicate(compiler, cont, test):
"""return current char and step if @test succeed, where
@test: a python function with one argument, which tests on one char and return True or False
@test must be registered with register_function"""
test = test.interlang()
text = compiler.new_var(il.ConstLocalVar('text'))
pos = compiler.new_var(il.ConstLocalVar('pos'))
if not isinstance(test, il.PyFunction):
raise DaoCompileTypeError(test) # depends on [control=['if'], data=[]]
return il.Begin((il.AssignFromList(text, pos, il.parse_state), il.If(il.Ge(pos, il.Len(text)), il.failcont(il.FALSE), il.If(il.Call(test, il.GetItem(text, pos)), il.begin(il.SetParseState(il.Tuple(text, il.add(pos, il.Integer(1)))), il.append_failcont(compiler, il.SetParseState(il.Tuple(text, pos))), cont(il.GetItem(text, pos))), il.failcont(il.FALSE))))) |
def decision_function(self, X):
""" Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
Args:
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns:
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
Note: This is a copy of the method in `sklearn.linear_model.base.LinearClassifierMixin()`,
changed the `check_array` to convert X to boolean.
"""
if not hasattr(self, 'coef_') or self.coef_ is None:
raise NotFittedError("This %(name)s instance is not fitted "
"yet" % {'name': type(self).__name__})
# Number of NER features. These can be int
n_ner = len(self.B)
X_fea = X[ : , :-n_ner]
X_ner = X[ : , -n_ner:]
X_fea = check_array(X_fea, dtype=bool, accept_sparse='csr')
X_ner = check_array(X_ner, dtype=int, ensure_min_features=0, accept_sparse='csr')
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d" % (X.shape[1], n_features))
scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
return scores.ravel() if scores.shape[1] == 1 else scores | def function[decision_function, parameter[self, X]]:
constant[ Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
Args:
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns:
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
Note: This is a copy of the method in `sklearn.linear_model.base.LinearClassifierMixin()`,
changed the `check_array` to convert X to boolean.
]
if <ast.BoolOp object at 0x7da20e956aa0> begin[:]
<ast.Raise object at 0x7da20e957f70>
variable[n_ner] assign[=] call[name[len], parameter[name[self].B]]
variable[X_fea] assign[=] call[name[X]][tuple[[<ast.Slice object at 0x7da20e955c00>, <ast.Slice object at 0x7da20e954f70>]]]
variable[X_ner] assign[=] call[name[X]][tuple[[<ast.Slice object at 0x7da20e955e10>, <ast.Slice object at 0x7da20e955ab0>]]]
variable[X_fea] assign[=] call[name[check_array], parameter[name[X_fea]]]
variable[X_ner] assign[=] call[name[check_array], parameter[name[X_ner]]]
variable[n_features] assign[=] call[name[self].coef_.shape][constant[1]]
if compare[call[name[X].shape][constant[1]] not_equal[!=] name[n_features]] begin[:]
<ast.Raise object at 0x7da1b2580e80>
variable[scores] assign[=] binary_operation[call[name[safe_sparse_dot], parameter[name[X], name[self].coef_.T]] + name[self].intercept_]
return[<ast.IfExp object at 0x7da1b2582b30>] | keyword[def] identifier[decision_function] ( identifier[self] , identifier[X] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[or] identifier[self] . identifier[coef_] keyword[is] keyword[None] :
keyword[raise] identifier[NotFittedError] ( literal[string]
literal[string] %{ literal[string] : identifier[type] ( identifier[self] ). identifier[__name__] })
identifier[n_ner] = identifier[len] ( identifier[self] . identifier[B] )
identifier[X_fea] = identifier[X] [:,:- identifier[n_ner] ]
identifier[X_ner] = identifier[X] [:,- identifier[n_ner] :]
identifier[X_fea] = identifier[check_array] ( identifier[X_fea] , identifier[dtype] = identifier[bool] , identifier[accept_sparse] = literal[string] )
identifier[X_ner] = identifier[check_array] ( identifier[X_ner] , identifier[dtype] = identifier[int] , identifier[ensure_min_features] = literal[int] , identifier[accept_sparse] = literal[string] )
identifier[n_features] = identifier[self] . identifier[coef_] . identifier[shape] [ literal[int] ]
keyword[if] identifier[X] . identifier[shape] [ literal[int] ]!= identifier[n_features] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[X] . identifier[shape] [ literal[int] ], identifier[n_features] ))
identifier[scores] = identifier[safe_sparse_dot] ( identifier[X] , identifier[self] . identifier[coef_] . identifier[T] , identifier[dense_output] = keyword[True] )+ identifier[self] . identifier[intercept_]
keyword[return] identifier[scores] . identifier[ravel] () keyword[if] identifier[scores] . identifier[shape] [ literal[int] ]== literal[int] keyword[else] identifier[scores] | def decision_function(self, X):
""" Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
Args:
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns:
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
Note: This is a copy of the method in `sklearn.linear_model.base.LinearClassifierMixin()`,
changed the `check_array` to convert X to boolean.
"""
if not hasattr(self, 'coef_') or self.coef_ is None:
raise NotFittedError('This %(name)s instance is not fitted yet' % {'name': type(self).__name__}) # depends on [control=['if'], data=[]] # Number of NER features. These can be int
n_ner = len(self.B)
X_fea = X[:, :-n_ner]
X_ner = X[:, -n_ner:]
X_fea = check_array(X_fea, dtype=bool, accept_sparse='csr')
X_ner = check_array(X_ner, dtype=int, ensure_min_features=0, accept_sparse='csr')
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError('X has %d features per sample; expecting %d' % (X.shape[1], n_features)) # depends on [control=['if'], data=['n_features']]
scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
return scores.ravel() if scores.shape[1] == 1 else scores |
def query(cls, name, type_=Type.String, description=None, required=None, default=None,
minimum=None, maximum=None, enum=None, **options):
"""
Define a query parameter
"""
if minimum is not None and maximum is not None and minimum > maximum:
raise ValueError("Minimum must be less than or equal to the maximum.")
return cls(name, In.Query, type_, None, description,
required=required, default=default,
minimum=minimum, maximum=maximum,
enum=enum, **options) | def function[query, parameter[cls, name, type_, description, required, default, minimum, maximum, enum]]:
constant[
Define a query parameter
]
if <ast.BoolOp object at 0x7da20c6a9fc0> begin[:]
<ast.Raise object at 0x7da1b26adde0>
return[call[name[cls], parameter[name[name], name[In].Query, name[type_], constant[None], name[description]]]] | keyword[def] identifier[query] ( identifier[cls] , identifier[name] , identifier[type_] = identifier[Type] . identifier[String] , identifier[description] = keyword[None] , identifier[required] = keyword[None] , identifier[default] = keyword[None] ,
identifier[minimum] = keyword[None] , identifier[maximum] = keyword[None] , identifier[enum] = keyword[None] ,** identifier[options] ):
literal[string]
keyword[if] identifier[minimum] keyword[is] keyword[not] keyword[None] keyword[and] identifier[maximum] keyword[is] keyword[not] keyword[None] keyword[and] identifier[minimum] > identifier[maximum] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[cls] ( identifier[name] , identifier[In] . identifier[Query] , identifier[type_] , keyword[None] , identifier[description] ,
identifier[required] = identifier[required] , identifier[default] = identifier[default] ,
identifier[minimum] = identifier[minimum] , identifier[maximum] = identifier[maximum] ,
identifier[enum] = identifier[enum] ,** identifier[options] ) | def query(cls, name, type_=Type.String, description=None, required=None, default=None, minimum=None, maximum=None, enum=None, **options):
"""
Define a query parameter
"""
if minimum is not None and maximum is not None and (minimum > maximum):
raise ValueError('Minimum must be less than or equal to the maximum.') # depends on [control=['if'], data=[]]
return cls(name, In.Query, type_, None, description, required=required, default=default, minimum=minimum, maximum=maximum, enum=enum, **options) |
def update_pagenumber(self):
"""300 page number."""
pages = record_get_field_instances(self.record, '300')
for field in pages:
for idx, (key, value) in enumerate(field[0]):
if key == 'a':
field[0][idx] = ('a', "{0} p".format(value)) | def function[update_pagenumber, parameter[self]]:
constant[300 page number.]
variable[pages] assign[=] call[name[record_get_field_instances], parameter[name[self].record, constant[300]]]
for taget[name[field]] in starred[name[pages]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c6c7d30>, <ast.Tuple object at 0x7da20c6c6c80>]]] in starred[call[name[enumerate], parameter[call[name[field]][constant[0]]]]] begin[:]
if compare[name[key] equal[==] constant[a]] begin[:]
call[call[name[field]][constant[0]]][name[idx]] assign[=] tuple[[<ast.Constant object at 0x7da20c6c5660>, <ast.Call object at 0x7da20c6c7460>]] | keyword[def] identifier[update_pagenumber] ( identifier[self] ):
literal[string]
identifier[pages] = identifier[record_get_field_instances] ( identifier[self] . identifier[record] , literal[string] )
keyword[for] identifier[field] keyword[in] identifier[pages] :
keyword[for] identifier[idx] ,( identifier[key] , identifier[value] ) keyword[in] identifier[enumerate] ( identifier[field] [ literal[int] ]):
keyword[if] identifier[key] == literal[string] :
identifier[field] [ literal[int] ][ identifier[idx] ]=( literal[string] , literal[string] . identifier[format] ( identifier[value] )) | def update_pagenumber(self):
"""300 page number."""
pages = record_get_field_instances(self.record, '300')
for field in pages:
for (idx, (key, value)) in enumerate(field[0]):
if key == 'a':
field[0][idx] = ('a', '{0} p'.format(value)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['field']] |
def get_language_from_json(language, key):
"""Finds the given language in a json file."""
file_name = os.path.join(
os.path.dirname(__file__),
'languages',
'{0}.json').format(key.lower())
if os.path.exists(file_name):
try:
with open(file_name, 'r', encoding='utf-8') as fh:
languages = json.loads(fh.read())
if languages.get(language.lower()):
return languages[language.lower()]
except:
log.traceback(logging.DEBUG)
return None | def function[get_language_from_json, parameter[language, key]]:
constant[Finds the given language in a json file.]
variable[file_name] assign[=] call[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[__file__]]], constant[languages], constant[{0}.json]]].format, parameter[call[name[key].lower, parameter[]]]]
if call[name[os].path.exists, parameter[name[file_name]]] begin[:]
<ast.Try object at 0x7da20c6c63e0>
return[constant[None]] | keyword[def] identifier[get_language_from_json] ( identifier[language] , identifier[key] ):
literal[string]
identifier[file_name] = identifier[os] . identifier[path] . identifier[join] (
identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ),
literal[string] ,
literal[string] ). identifier[format] ( identifier[key] . identifier[lower] ())
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[file_name] ):
keyword[try] :
keyword[with] identifier[open] ( identifier[file_name] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[fh] :
identifier[languages] = identifier[json] . identifier[loads] ( identifier[fh] . identifier[read] ())
keyword[if] identifier[languages] . identifier[get] ( identifier[language] . identifier[lower] ()):
keyword[return] identifier[languages] [ identifier[language] . identifier[lower] ()]
keyword[except] :
identifier[log] . identifier[traceback] ( identifier[logging] . identifier[DEBUG] )
keyword[return] keyword[None] | def get_language_from_json(language, key):
"""Finds the given language in a json file."""
file_name = os.path.join(os.path.dirname(__file__), 'languages', '{0}.json').format(key.lower())
if os.path.exists(file_name):
try:
with open(file_name, 'r', encoding='utf-8') as fh:
languages = json.loads(fh.read())
if languages.get(language.lower()):
return languages[language.lower()] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['fh']] # depends on [control=['try'], data=[]]
except:
log.traceback(logging.DEBUG) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return None |
def patch_namespaced_deployment_scale(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_deployment_scale # noqa: E501
partially update scale of the specified Deployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: ExtensionsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data | def function[patch_namespaced_deployment_scale, parameter[self, name, namespace, body]]:
constant[patch_namespaced_deployment_scale # noqa: E501
partially update scale of the specified Deployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: ExtensionsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].patch_namespaced_deployment_scale_with_http_info, parameter[name[name], name[namespace], name[body]]]] | keyword[def] identifier[patch_namespaced_deployment_scale] ( identifier[self] , identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[patch_namespaced_deployment_scale_with_http_info] ( identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[patch_namespaced_deployment_scale_with_http_info] ( identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] )
keyword[return] identifier[data] | def patch_namespaced_deployment_scale(self, name, namespace, body, **kwargs): # noqa: E501
"patch_namespaced_deployment_scale # noqa: E501\n\n partially update scale of the specified Deployment # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.patch_namespaced_deployment_scale(name, namespace, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the Scale (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param UNKNOWN_BASE_TYPE body: (required)\n :param str pretty: If 'true', then the output is pretty printed.\n :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed\n :return: ExtensionsV1beta1Scale\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data |
def delete_contacts(
self,
ids: List[int]
):
"""Use this method to delete contacts from your Telegram address book.
Args:
ids (List of ``int``):
A list of unique identifiers for the target users.
Can be an ID (int), a username (string) or phone number (string).
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
contacts = []
for i in ids:
try:
input_user = self.resolve_peer(i)
except PeerIdInvalid:
continue
else:
if isinstance(input_user, types.InputPeerUser):
contacts.append(input_user)
return self.send(
functions.contacts.DeleteContacts(
id=contacts
)
) | def function[delete_contacts, parameter[self, ids]]:
constant[Use this method to delete contacts from your Telegram address book.
Args:
ids (List of ``int``):
A list of unique identifiers for the target users.
Can be an ID (int), a username (string) or phone number (string).
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
]
variable[contacts] assign[=] list[[]]
for taget[name[i]] in starred[name[ids]] begin[:]
<ast.Try object at 0x7da20c76e110>
return[call[name[self].send, parameter[call[name[functions].contacts.DeleteContacts, parameter[]]]]] | keyword[def] identifier[delete_contacts] (
identifier[self] ,
identifier[ids] : identifier[List] [ identifier[int] ]
):
literal[string]
identifier[contacts] =[]
keyword[for] identifier[i] keyword[in] identifier[ids] :
keyword[try] :
identifier[input_user] = identifier[self] . identifier[resolve_peer] ( identifier[i] )
keyword[except] identifier[PeerIdInvalid] :
keyword[continue]
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[input_user] , identifier[types] . identifier[InputPeerUser] ):
identifier[contacts] . identifier[append] ( identifier[input_user] )
keyword[return] identifier[self] . identifier[send] (
identifier[functions] . identifier[contacts] . identifier[DeleteContacts] (
identifier[id] = identifier[contacts]
)
) | def delete_contacts(self, ids: List[int]):
"""Use this method to delete contacts from your Telegram address book.
Args:
ids (List of ``int``):
A list of unique identifiers for the target users.
Can be an ID (int), a username (string) or phone number (string).
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
contacts = []
for i in ids:
try:
input_user = self.resolve_peer(i) # depends on [control=['try'], data=[]]
except PeerIdInvalid:
continue # depends on [control=['except'], data=[]]
else:
if isinstance(input_user, types.InputPeerUser):
contacts.append(input_user) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return self.send(functions.contacts.DeleteContacts(id=contacts)) |
def read_external_annotation(fn):
"""Read file with junctions from some database. This does not have to be the
same splice junction database used with STAR.
Parameters
----------
fn : filename str
File with splice junctions from annotation. The file should have a
header and contain the following columns: 'gene', 'chrom', 'start',
'end', 'strand', 'chrom:start', 'chrom:end', 'donor', 'acceptor',
'intron'.
Returns
-------
extDF : pandas.DataFrame
DataFrame indexed by splice junction
stats : list of strings
Human readable statistics about the external database.
"""
assert os.path.exists(fn)
extDF = pd.read_table(fn, index_col=0, header=0)
total_num = extDF.shape[0]
# In rare cases, a splice junction might be used by more than one gene. For
# my purposes, these cases are confounding, so I will remove all such splice
# junctions.
intron_count = extDF.intron.value_counts()
extDF['intron_count'] = extDF.intron.apply(lambda x: intron_count.ix[x])
extDF = extDF[extDF.intron_count == 1]
extDF = extDF.drop('intron_count', axis=1)
stats = []
stats.append('External database stats')
stats.append('Read external annotation\t{}'.format(fn))
stats.append('Total number of junctions\t{:,}'.format(total_num))
stats.append(('Number of junctions used in only one '
'gene\t{:,}').format(extDF.shape[0]))
return extDF, stats | def function[read_external_annotation, parameter[fn]]:
constant[Read file with junctions from some database. This does not have to be the
same splice junction database used with STAR.
Parameters
----------
fn : filename str
File with splice junctions from annotation. The file should have a
header and contain the following columns: 'gene', 'chrom', 'start',
'end', 'strand', 'chrom:start', 'chrom:end', 'donor', 'acceptor',
'intron'.
Returns
-------
extDF : pandas.DataFrame
DataFrame indexed by splice junction
stats : list of strings
Human readable statistics about the external database.
]
assert[call[name[os].path.exists, parameter[name[fn]]]]
variable[extDF] assign[=] call[name[pd].read_table, parameter[name[fn]]]
variable[total_num] assign[=] call[name[extDF].shape][constant[0]]
variable[intron_count] assign[=] call[name[extDF].intron.value_counts, parameter[]]
call[name[extDF]][constant[intron_count]] assign[=] call[name[extDF].intron.apply, parameter[<ast.Lambda object at 0x7da20c6a9a50>]]
variable[extDF] assign[=] call[name[extDF]][compare[name[extDF].intron_count equal[==] constant[1]]]
variable[extDF] assign[=] call[name[extDF].drop, parameter[constant[intron_count]]]
variable[stats] assign[=] list[[]]
call[name[stats].append, parameter[constant[External database stats]]]
call[name[stats].append, parameter[call[constant[Read external annotation {}].format, parameter[name[fn]]]]]
call[name[stats].append, parameter[call[constant[Total number of junctions {:,}].format, parameter[name[total_num]]]]]
call[name[stats].append, parameter[call[constant[Number of junctions used in only one gene {:,}].format, parameter[call[name[extDF].shape][constant[0]]]]]]
return[tuple[[<ast.Name object at 0x7da1b143d480>, <ast.Name object at 0x7da1b143c250>]]] | keyword[def] identifier[read_external_annotation] ( identifier[fn] ):
literal[string]
keyword[assert] identifier[os] . identifier[path] . identifier[exists] ( identifier[fn] )
identifier[extDF] = identifier[pd] . identifier[read_table] ( identifier[fn] , identifier[index_col] = literal[int] , identifier[header] = literal[int] )
identifier[total_num] = identifier[extDF] . identifier[shape] [ literal[int] ]
identifier[intron_count] = identifier[extDF] . identifier[intron] . identifier[value_counts] ()
identifier[extDF] [ literal[string] ]= identifier[extDF] . identifier[intron] . identifier[apply] ( keyword[lambda] identifier[x] : identifier[intron_count] . identifier[ix] [ identifier[x] ])
identifier[extDF] = identifier[extDF] [ identifier[extDF] . identifier[intron_count] == literal[int] ]
identifier[extDF] = identifier[extDF] . identifier[drop] ( literal[string] , identifier[axis] = literal[int] )
identifier[stats] =[]
identifier[stats] . identifier[append] ( literal[string] )
identifier[stats] . identifier[append] ( literal[string] . identifier[format] ( identifier[fn] ))
identifier[stats] . identifier[append] ( literal[string] . identifier[format] ( identifier[total_num] ))
identifier[stats] . identifier[append] (( literal[string]
literal[string] ). identifier[format] ( identifier[extDF] . identifier[shape] [ literal[int] ]))
keyword[return] identifier[extDF] , identifier[stats] | def read_external_annotation(fn):
"""Read file with junctions from some database. This does not have to be the
same splice junction database used with STAR.
Parameters
----------
fn : filename str
File with splice junctions from annotation. The file should have a
header and contain the following columns: 'gene', 'chrom', 'start',
'end', 'strand', 'chrom:start', 'chrom:end', 'donor', 'acceptor',
'intron'.
Returns
-------
extDF : pandas.DataFrame
DataFrame indexed by splice junction
stats : list of strings
Human readable statistics about the external database.
"""
assert os.path.exists(fn)
extDF = pd.read_table(fn, index_col=0, header=0)
total_num = extDF.shape[0]
# In rare cases, a splice junction might be used by more than one gene. For
# my purposes, these cases are confounding, so I will remove all such splice
# junctions.
intron_count = extDF.intron.value_counts()
extDF['intron_count'] = extDF.intron.apply(lambda x: intron_count.ix[x])
extDF = extDF[extDF.intron_count == 1]
extDF = extDF.drop('intron_count', axis=1)
stats = []
stats.append('External database stats')
stats.append('Read external annotation\t{}'.format(fn))
stats.append('Total number of junctions\t{:,}'.format(total_num))
stats.append('Number of junctions used in only one gene\t{:,}'.format(extDF.shape[0]))
return (extDF, stats) |
def write(self, s):
"""Writes ``s`` to the terminal output stream
Writes can be disabled by setting the environment variable
`PROGRESSIVE_NOWRITE` to `'True'`
"""
should_write_s = os.getenv('PROGRESSIVE_NOWRITE') != "True"
if should_write_s:
self._stream.write(s) | def function[write, parameter[self, s]]:
constant[Writes ``s`` to the terminal output stream
Writes can be disabled by setting the environment variable
`PROGRESSIVE_NOWRITE` to `'True'`
]
variable[should_write_s] assign[=] compare[call[name[os].getenv, parameter[constant[PROGRESSIVE_NOWRITE]]] not_equal[!=] constant[True]]
if name[should_write_s] begin[:]
call[name[self]._stream.write, parameter[name[s]]] | keyword[def] identifier[write] ( identifier[self] , identifier[s] ):
literal[string]
identifier[should_write_s] = identifier[os] . identifier[getenv] ( literal[string] )!= literal[string]
keyword[if] identifier[should_write_s] :
identifier[self] . identifier[_stream] . identifier[write] ( identifier[s] ) | def write(self, s):
"""Writes ``s`` to the terminal output stream
Writes can be disabled by setting the environment variable
`PROGRESSIVE_NOWRITE` to `'True'`
"""
should_write_s = os.getenv('PROGRESSIVE_NOWRITE') != 'True'
if should_write_s:
self._stream.write(s) # depends on [control=['if'], data=[]] |
def to_dicts(self, ignore_none: bool=True, force_value: bool=True, ignore_empty: bool=False) -> List[dict]:
"""From instance to dict
:param ignore_none: Properties which is None are excluded if True
:param force_value: Transform to value using to_value (default: str()) of ValueTransformer which inherited if True
:param ignore_empty: Properties which is empty are excluded if True
:return: List[Dict]
Usage:
>>> from owlmixin.samples import Human, Food
>>> human_dicts = [
... {
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple"}}
... ]
... },
... {
... "id": 2,
... "name": "John",
... "favorites": [
... {"name": "Orange", "names_by_lang": {"en": "Orange"}}
... ]
... }
... ]
>>> Human.from_dicts(human_dicts).to_dicts() == human_dicts
True
You can include None properties by specifying False for ignore_none
>>> f = Food.from_dicts([{"name": "Apple"}]).to_dicts(ignore_none=False)
>>> f[0]["name"]
'Apple'
>>> "names_by_lang" in f[0]
True
>>> f[0]["names_by_lang"]
As default
>>> f = Food.from_dicts([{"name": "Apple"}]).to_dicts()
>>> f[0]["name"]
'Apple'
>>> "names_by_lang" in f[0]
False
You can exclude Empty properties by specifying True for ignore_empty
>>> f = Human.from_dicts([{"id": 1, "name": "Ichiro", "favorites": []}]).to_dicts()
>>> f[0]["favorites"]
[]
>>> f = Human.from_dicts([{"id": 1, "name": "Ichiro", "favorites": []}]).to_dicts(ignore_empty=True)
>>> "favorites" in f[0]
False
"""
return traverse_list(self, ignore_none, force_value, ignore_empty) | def function[to_dicts, parameter[self, ignore_none, force_value, ignore_empty]]:
constant[From instance to dict
:param ignore_none: Properties which is None are excluded if True
:param force_value: Transform to value using to_value (default: str()) of ValueTransformer which inherited if True
:param ignore_empty: Properties which is empty are excluded if True
:return: List[Dict]
Usage:
>>> from owlmixin.samples import Human, Food
>>> human_dicts = [
... {
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple"}}
... ]
... },
... {
... "id": 2,
... "name": "John",
... "favorites": [
... {"name": "Orange", "names_by_lang": {"en": "Orange"}}
... ]
... }
... ]
>>> Human.from_dicts(human_dicts).to_dicts() == human_dicts
True
You can include None properties by specifying False for ignore_none
>>> f = Food.from_dicts([{"name": "Apple"}]).to_dicts(ignore_none=False)
>>> f[0]["name"]
'Apple'
>>> "names_by_lang" in f[0]
True
>>> f[0]["names_by_lang"]
As default
>>> f = Food.from_dicts([{"name": "Apple"}]).to_dicts()
>>> f[0]["name"]
'Apple'
>>> "names_by_lang" in f[0]
False
You can exclude Empty properties by specifying True for ignore_empty
>>> f = Human.from_dicts([{"id": 1, "name": "Ichiro", "favorites": []}]).to_dicts()
>>> f[0]["favorites"]
[]
>>> f = Human.from_dicts([{"id": 1, "name": "Ichiro", "favorites": []}]).to_dicts(ignore_empty=True)
>>> "favorites" in f[0]
False
]
return[call[name[traverse_list], parameter[name[self], name[ignore_none], name[force_value], name[ignore_empty]]]] | keyword[def] identifier[to_dicts] ( identifier[self] , identifier[ignore_none] : identifier[bool] = keyword[True] , identifier[force_value] : identifier[bool] = keyword[True] , identifier[ignore_empty] : identifier[bool] = keyword[False] )-> identifier[List] [ identifier[dict] ]:
literal[string]
keyword[return] identifier[traverse_list] ( identifier[self] , identifier[ignore_none] , identifier[force_value] , identifier[ignore_empty] ) | def to_dicts(self, ignore_none: bool=True, force_value: bool=True, ignore_empty: bool=False) -> List[dict]:
"""From instance to dict
:param ignore_none: Properties which is None are excluded if True
:param force_value: Transform to value using to_value (default: str()) of ValueTransformer which inherited if True
:param ignore_empty: Properties which is empty are excluded if True
:return: List[Dict]
Usage:
>>> from owlmixin.samples import Human, Food
>>> human_dicts = [
... {
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple"}}
... ]
... },
... {
... "id": 2,
... "name": "John",
... "favorites": [
... {"name": "Orange", "names_by_lang": {"en": "Orange"}}
... ]
... }
... ]
>>> Human.from_dicts(human_dicts).to_dicts() == human_dicts
True
You can include None properties by specifying False for ignore_none
>>> f = Food.from_dicts([{"name": "Apple"}]).to_dicts(ignore_none=False)
>>> f[0]["name"]
'Apple'
>>> "names_by_lang" in f[0]
True
>>> f[0]["names_by_lang"]
As default
>>> f = Food.from_dicts([{"name": "Apple"}]).to_dicts()
>>> f[0]["name"]
'Apple'
>>> "names_by_lang" in f[0]
False
You can exclude Empty properties by specifying True for ignore_empty
>>> f = Human.from_dicts([{"id": 1, "name": "Ichiro", "favorites": []}]).to_dicts()
>>> f[0]["favorites"]
[]
>>> f = Human.from_dicts([{"id": 1, "name": "Ichiro", "favorites": []}]).to_dicts(ignore_empty=True)
>>> "favorites" in f[0]
False
"""
return traverse_list(self, ignore_none, force_value, ignore_empty) |
def _send_stream_features(self):
"""Send stream <features/>.
[receiving entity only]"""
self.features = self._make_stream_features()
self._write_element(self.features) | def function[_send_stream_features, parameter[self]]:
constant[Send stream <features/>.
[receiving entity only]]
name[self].features assign[=] call[name[self]._make_stream_features, parameter[]]
call[name[self]._write_element, parameter[name[self].features]] | keyword[def] identifier[_send_stream_features] ( identifier[self] ):
literal[string]
identifier[self] . identifier[features] = identifier[self] . identifier[_make_stream_features] ()
identifier[self] . identifier[_write_element] ( identifier[self] . identifier[features] ) | def _send_stream_features(self):
"""Send stream <features/>.
[receiving entity only]"""
self.features = self._make_stream_features()
self._write_element(self.features) |
def flatten(self, l):
"""Turn a list of lists into a list."""
if l == []:
return l
if isinstance(l[0], list):
return self.flatten(l[0]) + self.flatten(l[1:])
return l[:1] + self.flatten(l[1:]) | def function[flatten, parameter[self, l]]:
constant[Turn a list of lists into a list.]
if compare[name[l] equal[==] list[[]]] begin[:]
return[name[l]]
if call[name[isinstance], parameter[call[name[l]][constant[0]], name[list]]] begin[:]
return[binary_operation[call[name[self].flatten, parameter[call[name[l]][constant[0]]]] + call[name[self].flatten, parameter[call[name[l]][<ast.Slice object at 0x7da18bc73730>]]]]]
return[binary_operation[call[name[l]][<ast.Slice object at 0x7da18bc71540>] + call[name[self].flatten, parameter[call[name[l]][<ast.Slice object at 0x7da18bc72aa0>]]]]] | keyword[def] identifier[flatten] ( identifier[self] , identifier[l] ):
literal[string]
keyword[if] identifier[l] ==[]:
keyword[return] identifier[l]
keyword[if] identifier[isinstance] ( identifier[l] [ literal[int] ], identifier[list] ):
keyword[return] identifier[self] . identifier[flatten] ( identifier[l] [ literal[int] ])+ identifier[self] . identifier[flatten] ( identifier[l] [ literal[int] :])
keyword[return] identifier[l] [: literal[int] ]+ identifier[self] . identifier[flatten] ( identifier[l] [ literal[int] :]) | def flatten(self, l):
"""Turn a list of lists into a list."""
if l == []:
return l # depends on [control=['if'], data=['l']]
if isinstance(l[0], list):
return self.flatten(l[0]) + self.flatten(l[1:]) # depends on [control=['if'], data=[]]
return l[:1] + self.flatten(l[1:]) |
def wrap(s, width=80):
"""
Formats the text input with newlines given the user specified width for
each line.
Parameters
----------
s : str
width : int
Returns
-------
text : str
Notes
-----
.. versionadded:: 1.1
"""
return '\n'.join(textwrap.wrap(str(s), width=width)) | def function[wrap, parameter[s, width]]:
constant[
Formats the text input with newlines given the user specified width for
each line.
Parameters
----------
s : str
width : int
Returns
-------
text : str
Notes
-----
.. versionadded:: 1.1
]
return[call[constant[
].join, parameter[call[name[textwrap].wrap, parameter[call[name[str], parameter[name[s]]]]]]]] | keyword[def] identifier[wrap] ( identifier[s] , identifier[width] = literal[int] ):
literal[string]
keyword[return] literal[string] . identifier[join] ( identifier[textwrap] . identifier[wrap] ( identifier[str] ( identifier[s] ), identifier[width] = identifier[width] )) | def wrap(s, width=80):
"""
Formats the text input with newlines given the user specified width for
each line.
Parameters
----------
s : str
width : int
Returns
-------
text : str
Notes
-----
.. versionadded:: 1.1
"""
return '\n'.join(textwrap.wrap(str(s), width=width)) |
def _ack_coord_handle(
coord, coord_handle, queue_mapper, msg_tracker, timing_state,
tile_proc_logger, stats_handler):
"""share code for acknowledging a coordinate"""
# returns tuple of (handle, error), either of which can be None
track_result = msg_tracker.done(coord_handle)
queue_handle = track_result.queue_handle
if not queue_handle:
return None, None
tile_queue = queue_mapper.get_queue(queue_handle.queue_id)
assert tile_queue, \
'Missing tile_queue: %s' % queue_handle.queue_id
parent_tile = None
if track_result.all_done:
parent_tile = track_result.parent_tile
try:
tile_queue.job_done(queue_handle.handle)
except Exception as e:
stacktrace = format_stacktrace_one_line()
tile_proc_logger.error_job_done(
'tile_queue.job_done', e, stacktrace,
coord, parent_tile,
)
return queue_handle, e
if parent_tile is not None:
# we completed a tile pyramid and should log appropriately
start_time = timing_state['start']
stop_time = convert_seconds_to_millis(time.time())
tile_proc_logger.log_processed_pyramid(
parent_tile, start_time, stop_time)
stats_handler.processed_pyramid(
parent_tile, start_time, stop_time)
else:
try:
tile_queue.job_progress(queue_handle.handle)
except Exception as e:
stacktrace = format_stacktrace_one_line()
err_details = {"queue_handle": queue_handle.handle}
if isinstance(e, JobProgressException):
err_details = e.err_details
tile_proc_logger.error_job_progress(
'tile_queue.job_progress', e, stacktrace,
coord, parent_tile, err_details,
)
return queue_handle, e
return queue_handle, None | def function[_ack_coord_handle, parameter[coord, coord_handle, queue_mapper, msg_tracker, timing_state, tile_proc_logger, stats_handler]]:
constant[share code for acknowledging a coordinate]
variable[track_result] assign[=] call[name[msg_tracker].done, parameter[name[coord_handle]]]
variable[queue_handle] assign[=] name[track_result].queue_handle
if <ast.UnaryOp object at 0x7da1b04a55a0> begin[:]
return[tuple[[<ast.Constant object at 0x7da1b04a4cd0>, <ast.Constant object at 0x7da1b04a6050>]]]
variable[tile_queue] assign[=] call[name[queue_mapper].get_queue, parameter[name[queue_handle].queue_id]]
assert[name[tile_queue]]
variable[parent_tile] assign[=] constant[None]
if name[track_result].all_done begin[:]
variable[parent_tile] assign[=] name[track_result].parent_tile
<ast.Try object at 0x7da1b04a70d0>
if compare[name[parent_tile] is_not constant[None]] begin[:]
variable[start_time] assign[=] call[name[timing_state]][constant[start]]
variable[stop_time] assign[=] call[name[convert_seconds_to_millis], parameter[call[name[time].time, parameter[]]]]
call[name[tile_proc_logger].log_processed_pyramid, parameter[name[parent_tile], name[start_time], name[stop_time]]]
call[name[stats_handler].processed_pyramid, parameter[name[parent_tile], name[start_time], name[stop_time]]]
return[tuple[[<ast.Name object at 0x7da1b04a7250>, <ast.Constant object at 0x7da1b04a4fd0>]]] | keyword[def] identifier[_ack_coord_handle] (
identifier[coord] , identifier[coord_handle] , identifier[queue_mapper] , identifier[msg_tracker] , identifier[timing_state] ,
identifier[tile_proc_logger] , identifier[stats_handler] ):
literal[string]
identifier[track_result] = identifier[msg_tracker] . identifier[done] ( identifier[coord_handle] )
identifier[queue_handle] = identifier[track_result] . identifier[queue_handle]
keyword[if] keyword[not] identifier[queue_handle] :
keyword[return] keyword[None] , keyword[None]
identifier[tile_queue] = identifier[queue_mapper] . identifier[get_queue] ( identifier[queue_handle] . identifier[queue_id] )
keyword[assert] identifier[tile_queue] , literal[string] % identifier[queue_handle] . identifier[queue_id]
identifier[parent_tile] = keyword[None]
keyword[if] identifier[track_result] . identifier[all_done] :
identifier[parent_tile] = identifier[track_result] . identifier[parent_tile]
keyword[try] :
identifier[tile_queue] . identifier[job_done] ( identifier[queue_handle] . identifier[handle] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[stacktrace] = identifier[format_stacktrace_one_line] ()
identifier[tile_proc_logger] . identifier[error_job_done] (
literal[string] , identifier[e] , identifier[stacktrace] ,
identifier[coord] , identifier[parent_tile] ,
)
keyword[return] identifier[queue_handle] , identifier[e]
keyword[if] identifier[parent_tile] keyword[is] keyword[not] keyword[None] :
identifier[start_time] = identifier[timing_state] [ literal[string] ]
identifier[stop_time] = identifier[convert_seconds_to_millis] ( identifier[time] . identifier[time] ())
identifier[tile_proc_logger] . identifier[log_processed_pyramid] (
identifier[parent_tile] , identifier[start_time] , identifier[stop_time] )
identifier[stats_handler] . identifier[processed_pyramid] (
identifier[parent_tile] , identifier[start_time] , identifier[stop_time] )
keyword[else] :
keyword[try] :
identifier[tile_queue] . identifier[job_progress] ( identifier[queue_handle] . identifier[handle] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[stacktrace] = identifier[format_stacktrace_one_line] ()
identifier[err_details] ={ literal[string] : identifier[queue_handle] . identifier[handle] }
keyword[if] identifier[isinstance] ( identifier[e] , identifier[JobProgressException] ):
identifier[err_details] = identifier[e] . identifier[err_details]
identifier[tile_proc_logger] . identifier[error_job_progress] (
literal[string] , identifier[e] , identifier[stacktrace] ,
identifier[coord] , identifier[parent_tile] , identifier[err_details] ,
)
keyword[return] identifier[queue_handle] , identifier[e]
keyword[return] identifier[queue_handle] , keyword[None] | def _ack_coord_handle(coord, coord_handle, queue_mapper, msg_tracker, timing_state, tile_proc_logger, stats_handler):
"""share code for acknowledging a coordinate"""
# returns tuple of (handle, error), either of which can be None
track_result = msg_tracker.done(coord_handle)
queue_handle = track_result.queue_handle
if not queue_handle:
return (None, None) # depends on [control=['if'], data=[]]
tile_queue = queue_mapper.get_queue(queue_handle.queue_id)
assert tile_queue, 'Missing tile_queue: %s' % queue_handle.queue_id
parent_tile = None
if track_result.all_done:
parent_tile = track_result.parent_tile
try:
tile_queue.job_done(queue_handle.handle) # depends on [control=['try'], data=[]]
except Exception as e:
stacktrace = format_stacktrace_one_line()
tile_proc_logger.error_job_done('tile_queue.job_done', e, stacktrace, coord, parent_tile)
return (queue_handle, e) # depends on [control=['except'], data=['e']]
if parent_tile is not None:
# we completed a tile pyramid and should log appropriately
start_time = timing_state['start']
stop_time = convert_seconds_to_millis(time.time())
tile_proc_logger.log_processed_pyramid(parent_tile, start_time, stop_time)
stats_handler.processed_pyramid(parent_tile, start_time, stop_time) # depends on [control=['if'], data=['parent_tile']] # depends on [control=['if'], data=[]]
else:
try:
tile_queue.job_progress(queue_handle.handle) # depends on [control=['try'], data=[]]
except Exception as e:
stacktrace = format_stacktrace_one_line()
err_details = {'queue_handle': queue_handle.handle}
if isinstance(e, JobProgressException):
err_details = e.err_details # depends on [control=['if'], data=[]]
tile_proc_logger.error_job_progress('tile_queue.job_progress', e, stacktrace, coord, parent_tile, err_details)
return (queue_handle, e) # depends on [control=['except'], data=['e']]
return (queue_handle, None) |
def filter_noexpand_columns(columns):
"""Return columns not containing and containing the noexpand prefix.
Parameters
----------
columns: sequence of str
A sequence of strings to be split
Returns
-------
Two lists, the first containing strings without the noexpand prefix, the
second containing those that do with the prefix filtered out.
"""
prefix_len = len(NOEXPAND_PREFIX)
noexpand = [c[prefix_len:] for c in columns if c.startswith(NOEXPAND_PREFIX)]
other = [c for c in columns if not c.startswith(NOEXPAND_PREFIX)]
return other, noexpand | def function[filter_noexpand_columns, parameter[columns]]:
constant[Return columns not containing and containing the noexpand prefix.
Parameters
----------
columns: sequence of str
A sequence of strings to be split
Returns
-------
Two lists, the first containing strings without the noexpand prefix, the
second containing those that do with the prefix filtered out.
]
variable[prefix_len] assign[=] call[name[len], parameter[name[NOEXPAND_PREFIX]]]
variable[noexpand] assign[=] <ast.ListComp object at 0x7da1b11fc670>
variable[other] assign[=] <ast.ListComp object at 0x7da1b11fc940>
return[tuple[[<ast.Name object at 0x7da1b11fd330>, <ast.Name object at 0x7da1b11fd300>]]] | keyword[def] identifier[filter_noexpand_columns] ( identifier[columns] ):
literal[string]
identifier[prefix_len] = identifier[len] ( identifier[NOEXPAND_PREFIX] )
identifier[noexpand] =[ identifier[c] [ identifier[prefix_len] :] keyword[for] identifier[c] keyword[in] identifier[columns] keyword[if] identifier[c] . identifier[startswith] ( identifier[NOEXPAND_PREFIX] )]
identifier[other] =[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[columns] keyword[if] keyword[not] identifier[c] . identifier[startswith] ( identifier[NOEXPAND_PREFIX] )]
keyword[return] identifier[other] , identifier[noexpand] | def filter_noexpand_columns(columns):
"""Return columns not containing and containing the noexpand prefix.
Parameters
----------
columns: sequence of str
A sequence of strings to be split
Returns
-------
Two lists, the first containing strings without the noexpand prefix, the
second containing those that do with the prefix filtered out.
"""
prefix_len = len(NOEXPAND_PREFIX)
noexpand = [c[prefix_len:] for c in columns if c.startswith(NOEXPAND_PREFIX)]
other = [c for c in columns if not c.startswith(NOEXPAND_PREFIX)]
return (other, noexpand) |
def obfuscate_global_import_methods(module, tokens, name_generator, table=None):
"""
Replaces the used methods of globally-imported modules with obfuscated
equivalents. Updates *tokens* in-place.
*module* must be the name of the module we're currently obfuscating
If *table* is provided, replacements for import methods will be attempted
to be looked up there before generating a new unique name.
"""
global_imports = analyze.enumerate_global_imports(tokens)
#print("global_imports: %s" % global_imports)
local_imports = analyze.enumerate_local_modules(tokens, os.getcwd())
#print("local_imports: %s" % local_imports)
module_methods = analyze.enumerate_import_methods(tokens)
#print("module_methods: %s" % module_methods)
# Make a 1-to-1 mapping dict of module_method<->replacement:
if table:
replacement_dict = {}
for module_method in module_methods:
if module_method in table[0].keys():
replacement_dict.update({module_method: table[0][module_method]})
else:
replacement_dict.update({module_method: next(name_generator)})
# Update the global lookup table with the new entries:
table[0].update(replacement_dict)
else:
method_map = [next(name_generator) for i in module_methods]
replacement_dict = dict(zip(module_methods, method_map))
import_line = False
# Replace module methods with our obfuscated names in *tokens*
for module_method in module_methods:
for index, tok in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
if token_type != tokenize.NAME:
continue # Speedup
tokens[index+1][1]
if token_string == module_method.split('.')[0]:
if tokens[index+1][1] == '.':
if tokens[index+2][1] == module_method.split('.')[1]:
if table: # Attempt to use an existing value
tokens[index][1] = table[0][module_method]
tokens[index+1][1] = ""
tokens[index+2][1] = ""
else:
tokens[index][1] = replacement_dict[module_method]
tokens[index+1][1] = ""
tokens[index+2][1] = ""
# Insert our map of replacement=what after each respective module import
for module_method, replacement in replacement_dict.items():
indents = []
index = 0
for tok in tokens[:]:
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.NEWLINE:
import_line = False
elif token_type == tokenize.INDENT:
indents.append(tok)
elif token_type == tokenize.DEDENT:
indents.pop()
elif token_string == "import":
import_line = True
elif import_line:
if token_string == module_method.split('.')[0]:
# Insert the obfuscation assignment after the import
imported_module = ".".join(module_method.split('.')[:-1])
if table and imported_module in local_imports:
line = "%s=%s.%s\n" % ( # This ends up being 6 tokens
replacement_dict[module_method],
imported_module,
replacement_dict[module_method]
)
else:
line = "%s=%s\n" % ( # This ends up being 6 tokens
replacement_dict[module_method], module_method)
for indent in indents: # Fix indentation
line = "%s%s" % (indent[1], line)
index += 1
insert_in_next_line(tokens, index, line)
index += 6 # To make up for the six tokens we inserted
index += 1 | def function[obfuscate_global_import_methods, parameter[module, tokens, name_generator, table]]:
constant[
Replaces the used methods of globally-imported modules with obfuscated
equivalents. Updates *tokens* in-place.
*module* must be the name of the module we're currently obfuscating
If *table* is provided, replacements for import methods will be attempted
to be looked up there before generating a new unique name.
]
variable[global_imports] assign[=] call[name[analyze].enumerate_global_imports, parameter[name[tokens]]]
variable[local_imports] assign[=] call[name[analyze].enumerate_local_modules, parameter[name[tokens], call[name[os].getcwd, parameter[]]]]
variable[module_methods] assign[=] call[name[analyze].enumerate_import_methods, parameter[name[tokens]]]
if name[table] begin[:]
variable[replacement_dict] assign[=] dictionary[[], []]
for taget[name[module_method]] in starred[name[module_methods]] begin[:]
if compare[name[module_method] in call[call[name[table]][constant[0]].keys, parameter[]]] begin[:]
call[name[replacement_dict].update, parameter[dictionary[[<ast.Name object at 0x7da18f58d1e0>], [<ast.Subscript object at 0x7da18f58c280>]]]]
call[call[name[table]][constant[0]].update, parameter[name[replacement_dict]]]
variable[import_line] assign[=] constant[False]
for taget[name[module_method]] in starred[name[module_methods]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f58e5f0>, <ast.Name object at 0x7da18f58cfd0>]]] in starred[call[name[enumerate], parameter[name[tokens]]]] begin[:]
variable[token_type] assign[=] call[name[tok]][constant[0]]
variable[token_string] assign[=] call[name[tok]][constant[1]]
if compare[name[token_type] not_equal[!=] name[tokenize].NAME] begin[:]
continue
call[call[name[tokens]][binary_operation[name[index] + constant[1]]]][constant[1]]
if compare[name[token_string] equal[==] call[call[name[module_method].split, parameter[constant[.]]]][constant[0]]] begin[:]
if compare[call[call[name[tokens]][binary_operation[name[index] + constant[1]]]][constant[1]] equal[==] constant[.]] begin[:]
if compare[call[call[name[tokens]][binary_operation[name[index] + constant[2]]]][constant[1]] equal[==] call[call[name[module_method].split, parameter[constant[.]]]][constant[1]]] begin[:]
if name[table] begin[:]
call[call[name[tokens]][name[index]]][constant[1]] assign[=] call[call[name[table]][constant[0]]][name[module_method]]
call[call[name[tokens]][binary_operation[name[index] + constant[1]]]][constant[1]] assign[=] constant[]
call[call[name[tokens]][binary_operation[name[index] + constant[2]]]][constant[1]] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da2054a7310>, <ast.Name object at 0x7da2054a64a0>]]] in starred[call[name[replacement_dict].items, parameter[]]] begin[:]
variable[indents] assign[=] list[[]]
variable[index] assign[=] constant[0]
for taget[name[tok]] in starred[call[name[tokens]][<ast.Slice object at 0x7da2054a58d0>]] begin[:]
variable[token_type] assign[=] call[name[tok]][constant[0]]
variable[token_string] assign[=] call[name[tok]][constant[1]]
if compare[name[token_type] equal[==] name[tokenize].NEWLINE] begin[:]
variable[import_line] assign[=] constant[False]
<ast.AugAssign object at 0x7da2054a44f0> | keyword[def] identifier[obfuscate_global_import_methods] ( identifier[module] , identifier[tokens] , identifier[name_generator] , identifier[table] = keyword[None] ):
literal[string]
identifier[global_imports] = identifier[analyze] . identifier[enumerate_global_imports] ( identifier[tokens] )
identifier[local_imports] = identifier[analyze] . identifier[enumerate_local_modules] ( identifier[tokens] , identifier[os] . identifier[getcwd] ())
identifier[module_methods] = identifier[analyze] . identifier[enumerate_import_methods] ( identifier[tokens] )
keyword[if] identifier[table] :
identifier[replacement_dict] ={}
keyword[for] identifier[module_method] keyword[in] identifier[module_methods] :
keyword[if] identifier[module_method] keyword[in] identifier[table] [ literal[int] ]. identifier[keys] ():
identifier[replacement_dict] . identifier[update] ({ identifier[module_method] : identifier[table] [ literal[int] ][ identifier[module_method] ]})
keyword[else] :
identifier[replacement_dict] . identifier[update] ({ identifier[module_method] : identifier[next] ( identifier[name_generator] )})
identifier[table] [ literal[int] ]. identifier[update] ( identifier[replacement_dict] )
keyword[else] :
identifier[method_map] =[ identifier[next] ( identifier[name_generator] ) keyword[for] identifier[i] keyword[in] identifier[module_methods] ]
identifier[replacement_dict] = identifier[dict] ( identifier[zip] ( identifier[module_methods] , identifier[method_map] ))
identifier[import_line] = keyword[False]
keyword[for] identifier[module_method] keyword[in] identifier[module_methods] :
keyword[for] identifier[index] , identifier[tok] keyword[in] identifier[enumerate] ( identifier[tokens] ):
identifier[token_type] = identifier[tok] [ literal[int] ]
identifier[token_string] = identifier[tok] [ literal[int] ]
keyword[if] identifier[token_type] != identifier[tokenize] . identifier[NAME] :
keyword[continue]
identifier[tokens] [ identifier[index] + literal[int] ][ literal[int] ]
keyword[if] identifier[token_string] == identifier[module_method] . identifier[split] ( literal[string] )[ literal[int] ]:
keyword[if] identifier[tokens] [ identifier[index] + literal[int] ][ literal[int] ]== literal[string] :
keyword[if] identifier[tokens] [ identifier[index] + literal[int] ][ literal[int] ]== identifier[module_method] . identifier[split] ( literal[string] )[ literal[int] ]:
keyword[if] identifier[table] :
identifier[tokens] [ identifier[index] ][ literal[int] ]= identifier[table] [ literal[int] ][ identifier[module_method] ]
identifier[tokens] [ identifier[index] + literal[int] ][ literal[int] ]= literal[string]
identifier[tokens] [ identifier[index] + literal[int] ][ literal[int] ]= literal[string]
keyword[else] :
identifier[tokens] [ identifier[index] ][ literal[int] ]= identifier[replacement_dict] [ identifier[module_method] ]
identifier[tokens] [ identifier[index] + literal[int] ][ literal[int] ]= literal[string]
identifier[tokens] [ identifier[index] + literal[int] ][ literal[int] ]= literal[string]
keyword[for] identifier[module_method] , identifier[replacement] keyword[in] identifier[replacement_dict] . identifier[items] ():
identifier[indents] =[]
identifier[index] = literal[int]
keyword[for] identifier[tok] keyword[in] identifier[tokens] [:]:
identifier[token_type] = identifier[tok] [ literal[int] ]
identifier[token_string] = identifier[tok] [ literal[int] ]
keyword[if] identifier[token_type] == identifier[tokenize] . identifier[NEWLINE] :
identifier[import_line] = keyword[False]
keyword[elif] identifier[token_type] == identifier[tokenize] . identifier[INDENT] :
identifier[indents] . identifier[append] ( identifier[tok] )
keyword[elif] identifier[token_type] == identifier[tokenize] . identifier[DEDENT] :
identifier[indents] . identifier[pop] ()
keyword[elif] identifier[token_string] == literal[string] :
identifier[import_line] = keyword[True]
keyword[elif] identifier[import_line] :
keyword[if] identifier[token_string] == identifier[module_method] . identifier[split] ( literal[string] )[ literal[int] ]:
identifier[imported_module] = literal[string] . identifier[join] ( identifier[module_method] . identifier[split] ( literal[string] )[:- literal[int] ])
keyword[if] identifier[table] keyword[and] identifier[imported_module] keyword[in] identifier[local_imports] :
identifier[line] = literal[string] %(
identifier[replacement_dict] [ identifier[module_method] ],
identifier[imported_module] ,
identifier[replacement_dict] [ identifier[module_method] ]
)
keyword[else] :
identifier[line] = literal[string] %(
identifier[replacement_dict] [ identifier[module_method] ], identifier[module_method] )
keyword[for] identifier[indent] keyword[in] identifier[indents] :
identifier[line] = literal[string] %( identifier[indent] [ literal[int] ], identifier[line] )
identifier[index] += literal[int]
identifier[insert_in_next_line] ( identifier[tokens] , identifier[index] , identifier[line] )
identifier[index] += literal[int]
identifier[index] += literal[int] | def obfuscate_global_import_methods(module, tokens, name_generator, table=None):
"""
Replaces the used methods of globally-imported modules with obfuscated
equivalents. Updates *tokens* in-place.
*module* must be the name of the module we're currently obfuscating
If *table* is provided, replacements for import methods will be attempted
to be looked up there before generating a new unique name.
"""
global_imports = analyze.enumerate_global_imports(tokens)
#print("global_imports: %s" % global_imports)
local_imports = analyze.enumerate_local_modules(tokens, os.getcwd())
#print("local_imports: %s" % local_imports)
module_methods = analyze.enumerate_import_methods(tokens)
#print("module_methods: %s" % module_methods)
# Make a 1-to-1 mapping dict of module_method<->replacement:
if table:
replacement_dict = {}
for module_method in module_methods:
if module_method in table[0].keys():
replacement_dict.update({module_method: table[0][module_method]}) # depends on [control=['if'], data=['module_method']]
else:
replacement_dict.update({module_method: next(name_generator)}) # depends on [control=['for'], data=['module_method']]
# Update the global lookup table with the new entries:
table[0].update(replacement_dict) # depends on [control=['if'], data=[]]
else:
method_map = [next(name_generator) for i in module_methods]
replacement_dict = dict(zip(module_methods, method_map))
import_line = False
# Replace module methods with our obfuscated names in *tokens*
for module_method in module_methods:
for (index, tok) in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
if token_type != tokenize.NAME:
continue # Speedup # depends on [control=['if'], data=[]]
tokens[index + 1][1]
if token_string == module_method.split('.')[0]:
if tokens[index + 1][1] == '.':
if tokens[index + 2][1] == module_method.split('.')[1]:
if table: # Attempt to use an existing value
tokens[index][1] = table[0][module_method]
tokens[index + 1][1] = ''
tokens[index + 2][1] = '' # depends on [control=['if'], data=[]]
else:
tokens[index][1] = replacement_dict[module_method]
tokens[index + 1][1] = ''
tokens[index + 2][1] = '' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['module_method']]
# Insert our map of replacement=what after each respective module import
for (module_method, replacement) in replacement_dict.items():
indents = []
index = 0
for tok in tokens[:]:
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.NEWLINE:
import_line = False # depends on [control=['if'], data=[]]
elif token_type == tokenize.INDENT:
indents.append(tok) # depends on [control=['if'], data=[]]
elif token_type == tokenize.DEDENT:
indents.pop() # depends on [control=['if'], data=[]]
elif token_string == 'import':
import_line = True # depends on [control=['if'], data=[]]
elif import_line:
if token_string == module_method.split('.')[0]:
# Insert the obfuscation assignment after the import
imported_module = '.'.join(module_method.split('.')[:-1])
if table and imported_module in local_imports: # This ends up being 6 tokens
line = '%s=%s.%s\n' % (replacement_dict[module_method], imported_module, replacement_dict[module_method]) # depends on [control=['if'], data=[]]
else: # This ends up being 6 tokens
line = '%s=%s\n' % (replacement_dict[module_method], module_method)
for indent in indents: # Fix indentation
line = '%s%s' % (indent[1], line)
index += 1 # depends on [control=['for'], data=['indent']]
insert_in_next_line(tokens, index, line)
index += 6 # To make up for the six tokens we inserted # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
index += 1 # depends on [control=['for'], data=['tok']] # depends on [control=['for'], data=[]] |
def initialize_options(self):
"""Find all files of all locales."""
self.paths = []
self.separators = (',', ':')
self.data_dir = join(here, 'mimesis', 'data')
self.before_total = 0
self.after_total = 0
for root, _, files in os.walk(self.data_dir):
for file in sorted(files):
if splitext(file)[1] == '.json':
self.paths.append(join(
relpath(root, self.data_dir), file)) | def function[initialize_options, parameter[self]]:
constant[Find all files of all locales.]
name[self].paths assign[=] list[[]]
name[self].separators assign[=] tuple[[<ast.Constant object at 0x7da20c6c7280>, <ast.Constant object at 0x7da20c6c5540>]]
name[self].data_dir assign[=] call[name[join], parameter[name[here], constant[mimesis], constant[data]]]
name[self].before_total assign[=] constant[0]
name[self].after_total assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da20c6c6080>, <ast.Name object at 0x7da20c6c5390>, <ast.Name object at 0x7da20c6c4160>]]] in starred[call[name[os].walk, parameter[name[self].data_dir]]] begin[:]
for taget[name[file]] in starred[call[name[sorted], parameter[name[files]]]] begin[:]
if compare[call[call[name[splitext], parameter[name[file]]]][constant[1]] equal[==] constant[.json]] begin[:]
call[name[self].paths.append, parameter[call[name[join], parameter[call[name[relpath], parameter[name[root], name[self].data_dir]], name[file]]]]] | keyword[def] identifier[initialize_options] ( identifier[self] ):
literal[string]
identifier[self] . identifier[paths] =[]
identifier[self] . identifier[separators] =( literal[string] , literal[string] )
identifier[self] . identifier[data_dir] = identifier[join] ( identifier[here] , literal[string] , literal[string] )
identifier[self] . identifier[before_total] = literal[int]
identifier[self] . identifier[after_total] = literal[int]
keyword[for] identifier[root] , identifier[_] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[self] . identifier[data_dir] ):
keyword[for] identifier[file] keyword[in] identifier[sorted] ( identifier[files] ):
keyword[if] identifier[splitext] ( identifier[file] )[ literal[int] ]== literal[string] :
identifier[self] . identifier[paths] . identifier[append] ( identifier[join] (
identifier[relpath] ( identifier[root] , identifier[self] . identifier[data_dir] ), identifier[file] )) | def initialize_options(self):
"""Find all files of all locales."""
self.paths = []
self.separators = (',', ':')
self.data_dir = join(here, 'mimesis', 'data')
self.before_total = 0
self.after_total = 0
for (root, _, files) in os.walk(self.data_dir):
for file in sorted(files):
if splitext(file)[1] == '.json':
self.paths.append(join(relpath(root, self.data_dir), file)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file']] # depends on [control=['for'], data=[]] |
def as_dict(df, ix=':'):
""" converts df to dict and adds a datetime field if df is datetime """
if isinstance(df.index, pd.DatetimeIndex):
df['datetime'] = df.index
return df.to_dict(orient='records')[ix] | def function[as_dict, parameter[df, ix]]:
constant[ converts df to dict and adds a datetime field if df is datetime ]
if call[name[isinstance], parameter[name[df].index, name[pd].DatetimeIndex]] begin[:]
call[name[df]][constant[datetime]] assign[=] name[df].index
return[call[call[name[df].to_dict, parameter[]]][name[ix]]] | keyword[def] identifier[as_dict] ( identifier[df] , identifier[ix] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[df] . identifier[index] , identifier[pd] . identifier[DatetimeIndex] ):
identifier[df] [ literal[string] ]= identifier[df] . identifier[index]
keyword[return] identifier[df] . identifier[to_dict] ( identifier[orient] = literal[string] )[ identifier[ix] ] | def as_dict(df, ix=':'):
""" converts df to dict and adds a datetime field if df is datetime """
if isinstance(df.index, pd.DatetimeIndex):
df['datetime'] = df.index # depends on [control=['if'], data=[]]
return df.to_dict(orient='records')[ix] |
def soap_fault(message=None, actor=None, code=None, detail=None):
""" Create a SOAP Fault message
:param message: Human readable error message
:param actor: Who discovered the error
:param code: Error code
:param detail: More specific error message
:return: A SOAP Fault message as a string
"""
_string = _actor = _code = _detail = None
if message:
_string = soapenv.Fault_faultstring(text=message)
if actor:
_actor = soapenv.Fault_faultactor(text=actor)
if code:
_code = soapenv.Fault_faultcode(text=code)
if detail:
_detail = soapenv.Fault_detail(text=detail)
fault = soapenv.Fault(
faultcode=_code,
faultstring=_string,
faultactor=_actor,
detail=_detail,
)
return "%s" % fault | def function[soap_fault, parameter[message, actor, code, detail]]:
constant[ Create a SOAP Fault message
:param message: Human readable error message
:param actor: Who discovered the error
:param code: Error code
:param detail: More specific error message
:return: A SOAP Fault message as a string
]
variable[_string] assign[=] constant[None]
if name[message] begin[:]
variable[_string] assign[=] call[name[soapenv].Fault_faultstring, parameter[]]
if name[actor] begin[:]
variable[_actor] assign[=] call[name[soapenv].Fault_faultactor, parameter[]]
if name[code] begin[:]
variable[_code] assign[=] call[name[soapenv].Fault_faultcode, parameter[]]
if name[detail] begin[:]
variable[_detail] assign[=] call[name[soapenv].Fault_detail, parameter[]]
variable[fault] assign[=] call[name[soapenv].Fault, parameter[]]
return[binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> name[fault]]] | keyword[def] identifier[soap_fault] ( identifier[message] = keyword[None] , identifier[actor] = keyword[None] , identifier[code] = keyword[None] , identifier[detail] = keyword[None] ):
literal[string]
identifier[_string] = identifier[_actor] = identifier[_code] = identifier[_detail] = keyword[None]
keyword[if] identifier[message] :
identifier[_string] = identifier[soapenv] . identifier[Fault_faultstring] ( identifier[text] = identifier[message] )
keyword[if] identifier[actor] :
identifier[_actor] = identifier[soapenv] . identifier[Fault_faultactor] ( identifier[text] = identifier[actor] )
keyword[if] identifier[code] :
identifier[_code] = identifier[soapenv] . identifier[Fault_faultcode] ( identifier[text] = identifier[code] )
keyword[if] identifier[detail] :
identifier[_detail] = identifier[soapenv] . identifier[Fault_detail] ( identifier[text] = identifier[detail] )
identifier[fault] = identifier[soapenv] . identifier[Fault] (
identifier[faultcode] = identifier[_code] ,
identifier[faultstring] = identifier[_string] ,
identifier[faultactor] = identifier[_actor] ,
identifier[detail] = identifier[_detail] ,
)
keyword[return] literal[string] % identifier[fault] | def soap_fault(message=None, actor=None, code=None, detail=None):
""" Create a SOAP Fault message
:param message: Human readable error message
:param actor: Who discovered the error
:param code: Error code
:param detail: More specific error message
:return: A SOAP Fault message as a string
"""
_string = _actor = _code = _detail = None
if message:
_string = soapenv.Fault_faultstring(text=message) # depends on [control=['if'], data=[]]
if actor:
_actor = soapenv.Fault_faultactor(text=actor) # depends on [control=['if'], data=[]]
if code:
_code = soapenv.Fault_faultcode(text=code) # depends on [control=['if'], data=[]]
if detail:
_detail = soapenv.Fault_detail(text=detail) # depends on [control=['if'], data=[]]
fault = soapenv.Fault(faultcode=_code, faultstring=_string, faultactor=_actor, detail=_detail)
return '%s' % fault |
def scan_diff(
self,
diff,
baseline_filename='',
last_commit_hash='',
repo_name='',
):
"""For optimization purposes, our scanning strategy focuses on looking
at incremental differences, rather than re-scanning the codebase every time.
This function supports this, and adds information to self.data.
:type diff: str
:param diff: diff string.
e.g. The output of `git diff <fileA> <fileB>`
:type baseline_filename: str
:param baseline_filename: if there are any baseline secrets, then the baseline
file will have hashes in them. By specifying it, we
can skip this clear exception.
:type last_commit_hash: str
:param last_commit_hash: used for logging only -- the last commit hash we saved
:type repo_name: str
:param repo_name: used for logging only -- the name of the repo
"""
# Local imports, so that we don't need to require unidiff for versions of
# detect-secrets that don't use it.
from unidiff import PatchSet
from unidiff.errors import UnidiffParseError
try:
patch_set = PatchSet.from_string(diff)
except UnidiffParseError: # pragma: no cover
alert = {
'alert': 'UnidiffParseError',
'hash': last_commit_hash,
'repo_name': repo_name,
}
log.error(alert)
raise
if self.exclude_files:
regex = re.compile(self.exclude_files, re.IGNORECASE)
for patch_file in patch_set:
filename = patch_file.path
# If the file matches the exclude_files, we skip it
if self.exclude_files and regex.search(filename):
continue
if filename == baseline_filename:
continue
for results, plugin in self._results_accumulator(filename):
results.update(
self._extract_secrets_from_patch(
patch_file,
plugin,
filename,
),
) | def function[scan_diff, parameter[self, diff, baseline_filename, last_commit_hash, repo_name]]:
constant[For optimization purposes, our scanning strategy focuses on looking
at incremental differences, rather than re-scanning the codebase every time.
This function supports this, and adds information to self.data.
:type diff: str
:param diff: diff string.
e.g. The output of `git diff <fileA> <fileB>`
:type baseline_filename: str
:param baseline_filename: if there are any baseline secrets, then the baseline
file will have hashes in them. By specifying it, we
can skip this clear exception.
:type last_commit_hash: str
:param last_commit_hash: used for logging only -- the last commit hash we saved
:type repo_name: str
:param repo_name: used for logging only -- the name of the repo
]
from relative_module[unidiff] import module[PatchSet]
from relative_module[unidiff.errors] import module[UnidiffParseError]
<ast.Try object at 0x7da20c6e4af0>
if name[self].exclude_files begin[:]
variable[regex] assign[=] call[name[re].compile, parameter[name[self].exclude_files, name[re].IGNORECASE]]
for taget[name[patch_file]] in starred[name[patch_set]] begin[:]
variable[filename] assign[=] name[patch_file].path
if <ast.BoolOp object at 0x7da20c6e76a0> begin[:]
continue
if compare[name[filename] equal[==] name[baseline_filename]] begin[:]
continue
for taget[tuple[[<ast.Name object at 0x7da18f58e050>, <ast.Name object at 0x7da18f58c580>]]] in starred[call[name[self]._results_accumulator, parameter[name[filename]]]] begin[:]
call[name[results].update, parameter[call[name[self]._extract_secrets_from_patch, parameter[name[patch_file], name[plugin], name[filename]]]]] | keyword[def] identifier[scan_diff] (
identifier[self] ,
identifier[diff] ,
identifier[baseline_filename] = literal[string] ,
identifier[last_commit_hash] = literal[string] ,
identifier[repo_name] = literal[string] ,
):
literal[string]
keyword[from] identifier[unidiff] keyword[import] identifier[PatchSet]
keyword[from] identifier[unidiff] . identifier[errors] keyword[import] identifier[UnidiffParseError]
keyword[try] :
identifier[patch_set] = identifier[PatchSet] . identifier[from_string] ( identifier[diff] )
keyword[except] identifier[UnidiffParseError] :
identifier[alert] ={
literal[string] : literal[string] ,
literal[string] : identifier[last_commit_hash] ,
literal[string] : identifier[repo_name] ,
}
identifier[log] . identifier[error] ( identifier[alert] )
keyword[raise]
keyword[if] identifier[self] . identifier[exclude_files] :
identifier[regex] = identifier[re] . identifier[compile] ( identifier[self] . identifier[exclude_files] , identifier[re] . identifier[IGNORECASE] )
keyword[for] identifier[patch_file] keyword[in] identifier[patch_set] :
identifier[filename] = identifier[patch_file] . identifier[path]
keyword[if] identifier[self] . identifier[exclude_files] keyword[and] identifier[regex] . identifier[search] ( identifier[filename] ):
keyword[continue]
keyword[if] identifier[filename] == identifier[baseline_filename] :
keyword[continue]
keyword[for] identifier[results] , identifier[plugin] keyword[in] identifier[self] . identifier[_results_accumulator] ( identifier[filename] ):
identifier[results] . identifier[update] (
identifier[self] . identifier[_extract_secrets_from_patch] (
identifier[patch_file] ,
identifier[plugin] ,
identifier[filename] ,
),
) | def scan_diff(self, diff, baseline_filename='', last_commit_hash='', repo_name=''):
"""For optimization purposes, our scanning strategy focuses on looking
at incremental differences, rather than re-scanning the codebase every time.
This function supports this, and adds information to self.data.
:type diff: str
:param diff: diff string.
e.g. The output of `git diff <fileA> <fileB>`
:type baseline_filename: str
:param baseline_filename: if there are any baseline secrets, then the baseline
file will have hashes in them. By specifying it, we
can skip this clear exception.
:type last_commit_hash: str
:param last_commit_hash: used for logging only -- the last commit hash we saved
:type repo_name: str
:param repo_name: used for logging only -- the name of the repo
"""
# Local imports, so that we don't need to require unidiff for versions of
# detect-secrets that don't use it.
from unidiff import PatchSet
from unidiff.errors import UnidiffParseError
try:
patch_set = PatchSet.from_string(diff) # depends on [control=['try'], data=[]]
except UnidiffParseError: # pragma: no cover
alert = {'alert': 'UnidiffParseError', 'hash': last_commit_hash, 'repo_name': repo_name}
log.error(alert)
raise # depends on [control=['except'], data=[]]
if self.exclude_files:
regex = re.compile(self.exclude_files, re.IGNORECASE) # depends on [control=['if'], data=[]]
for patch_file in patch_set:
filename = patch_file.path
# If the file matches the exclude_files, we skip it
if self.exclude_files and regex.search(filename):
continue # depends on [control=['if'], data=[]]
if filename == baseline_filename:
continue # depends on [control=['if'], data=[]]
for (results, plugin) in self._results_accumulator(filename):
results.update(self._extract_secrets_from_patch(patch_file, plugin, filename)) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['patch_file']] |
def show_bgp_peer(self, peer_id, **_params):
"""Fetches information of a certain BGP peer."""
return self.get(self.bgp_peer_path % peer_id,
params=_params) | def function[show_bgp_peer, parameter[self, peer_id]]:
constant[Fetches information of a certain BGP peer.]
return[call[name[self].get, parameter[binary_operation[name[self].bgp_peer_path <ast.Mod object at 0x7da2590d6920> name[peer_id]]]]] | keyword[def] identifier[show_bgp_peer] ( identifier[self] , identifier[peer_id] ,** identifier[_params] ):
literal[string]
keyword[return] identifier[self] . identifier[get] ( identifier[self] . identifier[bgp_peer_path] % identifier[peer_id] ,
identifier[params] = identifier[_params] ) | def show_bgp_peer(self, peer_id, **_params):
"""Fetches information of a certain BGP peer."""
return self.get(self.bgp_peer_path % peer_id, params=_params) |
def pip_compile(*packages: str):
"""Run pip-compile to pin down packages, also resolve their transitive dependencies."""
result = None
packages = "\n".join(packages)
with tempfile.TemporaryDirectory() as tmp_dirname, cwd(tmp_dirname):
with open("requirements.in", "w") as requirements_file:
requirements_file.write(packages)
runner = CliRunner()
try:
result = runner.invoke(cli, ["requirements.in"], catch_exceptions=False)
except Exception as exc:
raise ThothPipCompileError(str(exc)) from exc
if result.exit_code != 0:
error_msg = (
f"pip-compile returned non-zero ({result.exit_code:d}) " f"output: {result.output_bytes.decode():s}"
)
raise ThothPipCompileError(error_msg)
return result.output_bytes.decode() | def function[pip_compile, parameter[]]:
constant[Run pip-compile to pin down packages, also resolve their transitive dependencies.]
variable[result] assign[=] constant[None]
variable[packages] assign[=] call[constant[
].join, parameter[name[packages]]]
with call[name[tempfile].TemporaryDirectory, parameter[]] begin[:]
with call[name[open], parameter[constant[requirements.in], constant[w]]] begin[:]
call[name[requirements_file].write, parameter[name[packages]]]
variable[runner] assign[=] call[name[CliRunner], parameter[]]
<ast.Try object at 0x7da1b11a7df0>
if compare[name[result].exit_code not_equal[!=] constant[0]] begin[:]
variable[error_msg] assign[=] <ast.JoinedStr object at 0x7da1b11a62c0>
<ast.Raise object at 0x7da1b11a5330>
return[call[name[result].output_bytes.decode, parameter[]]] | keyword[def] identifier[pip_compile] (* identifier[packages] : identifier[str] ):
literal[string]
identifier[result] = keyword[None]
identifier[packages] = literal[string] . identifier[join] ( identifier[packages] )
keyword[with] identifier[tempfile] . identifier[TemporaryDirectory] () keyword[as] identifier[tmp_dirname] , identifier[cwd] ( identifier[tmp_dirname] ):
keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[requirements_file] :
identifier[requirements_file] . identifier[write] ( identifier[packages] )
identifier[runner] = identifier[CliRunner] ()
keyword[try] :
identifier[result] = identifier[runner] . identifier[invoke] ( identifier[cli] ,[ literal[string] ], identifier[catch_exceptions] = keyword[False] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
keyword[raise] identifier[ThothPipCompileError] ( identifier[str] ( identifier[exc] )) keyword[from] identifier[exc]
keyword[if] identifier[result] . identifier[exit_code] != literal[int] :
identifier[error_msg] =(
literal[string] literal[string]
)
keyword[raise] identifier[ThothPipCompileError] ( identifier[error_msg] )
keyword[return] identifier[result] . identifier[output_bytes] . identifier[decode] () | def pip_compile(*packages: str):
"""Run pip-compile to pin down packages, also resolve their transitive dependencies."""
result = None
packages = '\n'.join(packages)
with tempfile.TemporaryDirectory() as tmp_dirname, cwd(tmp_dirname):
with open('requirements.in', 'w') as requirements_file:
requirements_file.write(packages) # depends on [control=['with'], data=['requirements_file']]
runner = CliRunner()
try:
result = runner.invoke(cli, ['requirements.in'], catch_exceptions=False) # depends on [control=['try'], data=[]]
except Exception as exc:
raise ThothPipCompileError(str(exc)) from exc # depends on [control=['except'], data=['exc']]
if result.exit_code != 0:
error_msg = f'pip-compile returned non-zero ({result.exit_code:d}) output: {result.output_bytes.decode():s}'
raise ThothPipCompileError(error_msg) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
return result.output_bytes.decode() |
def error(self, msg, repeat_at_end=True):
"""Log a non-fatal error message to stderr, which is repeated at program termination.
:param msg: Message to be printed.
:param repeat_at_end: Set to false if the message should be printed, but not repeated at program termination."""
print(msg, file=sys.stderr)
if repeat_at_end:
self.error_log.append(msg) | def function[error, parameter[self, msg, repeat_at_end]]:
constant[Log a non-fatal error message to stderr, which is repeated at program termination.
:param msg: Message to be printed.
:param repeat_at_end: Set to false if the message should be printed, but not repeated at program termination.]
call[name[print], parameter[name[msg]]]
if name[repeat_at_end] begin[:]
call[name[self].error_log.append, parameter[name[msg]]] | keyword[def] identifier[error] ( identifier[self] , identifier[msg] , identifier[repeat_at_end] = keyword[True] ):
literal[string]
identifier[print] ( identifier[msg] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[if] identifier[repeat_at_end] :
identifier[self] . identifier[error_log] . identifier[append] ( identifier[msg] ) | def error(self, msg, repeat_at_end=True):
"""Log a non-fatal error message to stderr, which is repeated at program termination.
:param msg: Message to be printed.
:param repeat_at_end: Set to false if the message should be printed, but not repeated at program termination."""
print(msg, file=sys.stderr)
if repeat_at_end:
self.error_log.append(msg) # depends on [control=['if'], data=[]] |
def _zeep_to_dict(cls, obj):
"""Convert a zeep object to a dictionary."""
res = serialize_object(obj)
res = cls._get_non_empty_dict(res)
return res | def function[_zeep_to_dict, parameter[cls, obj]]:
constant[Convert a zeep object to a dictionary.]
variable[res] assign[=] call[name[serialize_object], parameter[name[obj]]]
variable[res] assign[=] call[name[cls]._get_non_empty_dict, parameter[name[res]]]
return[name[res]] | keyword[def] identifier[_zeep_to_dict] ( identifier[cls] , identifier[obj] ):
literal[string]
identifier[res] = identifier[serialize_object] ( identifier[obj] )
identifier[res] = identifier[cls] . identifier[_get_non_empty_dict] ( identifier[res] )
keyword[return] identifier[res] | def _zeep_to_dict(cls, obj):
"""Convert a zeep object to a dictionary."""
res = serialize_object(obj)
res = cls._get_non_empty_dict(res)
return res |
def rename(self, new_name):
"""
Rename this collection
"""
new_collection = Collection(self.db, new_name, create=False)
assert not new_collection.exists()
self.db.execute("alter table %s rename to %s" % (self.name, new_name))
self.name = new_name | def function[rename, parameter[self, new_name]]:
constant[
Rename this collection
]
variable[new_collection] assign[=] call[name[Collection], parameter[name[self].db, name[new_name]]]
assert[<ast.UnaryOp object at 0x7da18dc9bd00>]
call[name[self].db.execute, parameter[binary_operation[constant[alter table %s rename to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18dc98be0>, <ast.Name object at 0x7da1b0a48670>]]]]]
name[self].name assign[=] name[new_name] | keyword[def] identifier[rename] ( identifier[self] , identifier[new_name] ):
literal[string]
identifier[new_collection] = identifier[Collection] ( identifier[self] . identifier[db] , identifier[new_name] , identifier[create] = keyword[False] )
keyword[assert] keyword[not] identifier[new_collection] . identifier[exists] ()
identifier[self] . identifier[db] . identifier[execute] ( literal[string] %( identifier[self] . identifier[name] , identifier[new_name] ))
identifier[self] . identifier[name] = identifier[new_name] | def rename(self, new_name):
"""
Rename this collection
"""
new_collection = Collection(self.db, new_name, create=False)
assert not new_collection.exists()
self.db.execute('alter table %s rename to %s' % (self.name, new_name))
self.name = new_name |
def idgen(idbase, tint=None, bits=64):
'''
Generate an IRI as a hash of given information, or just make one up if None given
idbase -- Base URI for generating links
tint -- String that affects the sequence of IDs generated if sent None
>>> from bibframe.contrib.datachefids import idgen
>>> g = idgen(None)
>>> next(g) #Or g.send(None)
'gKNG1b7eySo'
>>> next(g)
'cXx7iv67-3E'
>>> g.send('spam')
'OZxOEos8e-k'
>>> next(g)
'mCFhsaWQ1_0'
>>> g.send('spam')
'OZxOEos8e-k'
>>> g.send('eggs')
'xQAd4Guk040'
>>> g.send('')
'AAAAAAAAAAA'
'''
counter = -1
to_hash = None
while True:
if to_hash is None:
to_hash = str(counter)
if tint: to_hash += tint
to_hash = simple_hashstring(to_hash, bits=bits)
to_hash = yield iri.absolutize(to_hash, idbase) if idbase else to_hash
counter += 1 | def function[idgen, parameter[idbase, tint, bits]]:
constant[
Generate an IRI as a hash of given information, or just make one up if None given
idbase -- Base URI for generating links
tint -- String that affects the sequence of IDs generated if sent None
>>> from bibframe.contrib.datachefids import idgen
>>> g = idgen(None)
>>> next(g) #Or g.send(None)
'gKNG1b7eySo'
>>> next(g)
'cXx7iv67-3E'
>>> g.send('spam')
'OZxOEos8e-k'
>>> next(g)
'mCFhsaWQ1_0'
>>> g.send('spam')
'OZxOEos8e-k'
>>> g.send('eggs')
'xQAd4Guk040'
>>> g.send('')
'AAAAAAAAAAA'
]
variable[counter] assign[=] <ast.UnaryOp object at 0x7da20e955d50>
variable[to_hash] assign[=] constant[None]
while constant[True] begin[:]
if compare[name[to_hash] is constant[None]] begin[:]
variable[to_hash] assign[=] call[name[str], parameter[name[counter]]]
if name[tint] begin[:]
<ast.AugAssign object at 0x7da20e957880>
variable[to_hash] assign[=] call[name[simple_hashstring], parameter[name[to_hash]]]
variable[to_hash] assign[=] <ast.Yield object at 0x7da20e957c40>
<ast.AugAssign object at 0x7da20e957b20> | keyword[def] identifier[idgen] ( identifier[idbase] , identifier[tint] = keyword[None] , identifier[bits] = literal[int] ):
literal[string]
identifier[counter] =- literal[int]
identifier[to_hash] = keyword[None]
keyword[while] keyword[True] :
keyword[if] identifier[to_hash] keyword[is] keyword[None] :
identifier[to_hash] = identifier[str] ( identifier[counter] )
keyword[if] identifier[tint] : identifier[to_hash] += identifier[tint]
identifier[to_hash] = identifier[simple_hashstring] ( identifier[to_hash] , identifier[bits] = identifier[bits] )
identifier[to_hash] = keyword[yield] identifier[iri] . identifier[absolutize] ( identifier[to_hash] , identifier[idbase] ) keyword[if] identifier[idbase] keyword[else] identifier[to_hash]
identifier[counter] += literal[int] | def idgen(idbase, tint=None, bits=64):
"""
Generate an IRI as a hash of given information, or just make one up if None given
idbase -- Base URI for generating links
tint -- String that affects the sequence of IDs generated if sent None
>>> from bibframe.contrib.datachefids import idgen
>>> g = idgen(None)
>>> next(g) #Or g.send(None)
'gKNG1b7eySo'
>>> next(g)
'cXx7iv67-3E'
>>> g.send('spam')
'OZxOEos8e-k'
>>> next(g)
'mCFhsaWQ1_0'
>>> g.send('spam')
'OZxOEos8e-k'
>>> g.send('eggs')
'xQAd4Guk040'
>>> g.send('')
'AAAAAAAAAAA'
"""
counter = -1
to_hash = None
while True:
if to_hash is None:
to_hash = str(counter)
if tint:
to_hash += tint # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['to_hash']]
to_hash = simple_hashstring(to_hash, bits=bits)
to_hash = (yield (iri.absolutize(to_hash, idbase) if idbase else to_hash))
counter += 1 # depends on [control=['while'], data=[]] |
def fail_steamid(channel):
"""Creates an embed UI for invalid SteamIDs
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object
"""
gui = ui_embed.UI(
channel,
"That SteamID doesn't exist.",
"You can get your SteamID by going to your profile page and looking at the url, "
"or you can set a custom ID by going to edit profile on your profile page.",
modulename=modulename,
colour=0x0088FF
)
return gui | def function[fail_steamid, parameter[channel]]:
constant[Creates an embed UI for invalid SteamIDs
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object
]
variable[gui] assign[=] call[name[ui_embed].UI, parameter[name[channel], constant[That SteamID doesn't exist.], constant[You can get your SteamID by going to your profile page and looking at the url, or you can set a custom ID by going to edit profile on your profile page.]]]
return[name[gui]] | keyword[def] identifier[fail_steamid] ( identifier[channel] ):
literal[string]
identifier[gui] = identifier[ui_embed] . identifier[UI] (
identifier[channel] ,
literal[string] ,
literal[string]
literal[string] ,
identifier[modulename] = identifier[modulename] ,
identifier[colour] = literal[int]
)
keyword[return] identifier[gui] | def fail_steamid(channel):
"""Creates an embed UI for invalid SteamIDs
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object
"""
gui = ui_embed.UI(channel, "That SteamID doesn't exist.", 'You can get your SteamID by going to your profile page and looking at the url, or you can set a custom ID by going to edit profile on your profile page.', modulename=modulename, colour=35071)
return gui |
def poll_ignore_interrupts(fds, timeout=None):
'''Simple wrapper around poll to register file descriptors and
ignore signals.'''
if timeout is not None:
end_time = time.time() + timeout
poller = select.poll()
for fd in fds:
poller.register(fd, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR)
while True:
try:
timeout_ms = None if timeout is None else timeout * 1000
results = poller.poll(timeout_ms)
return [afd for afd, _ in results]
except InterruptedError:
err = sys.exc_info()[1]
if err.args[0] == errno.EINTR:
# if we loop back we have to subtract the
# amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return []
else:
# something else caused the select.error, so
# this actually is an exception.
raise | def function[poll_ignore_interrupts, parameter[fds, timeout]]:
constant[Simple wrapper around poll to register file descriptors and
ignore signals.]
if compare[name[timeout] is_not constant[None]] begin[:]
variable[end_time] assign[=] binary_operation[call[name[time].time, parameter[]] + name[timeout]]
variable[poller] assign[=] call[name[select].poll, parameter[]]
for taget[name[fd]] in starred[name[fds]] begin[:]
call[name[poller].register, parameter[name[fd], binary_operation[binary_operation[binary_operation[name[select].POLLIN <ast.BitOr object at 0x7da2590d6aa0> name[select].POLLPRI] <ast.BitOr object at 0x7da2590d6aa0> name[select].POLLHUP] <ast.BitOr object at 0x7da2590d6aa0> name[select].POLLERR]]]
while constant[True] begin[:]
<ast.Try object at 0x7da1b2347520> | keyword[def] identifier[poll_ignore_interrupts] ( identifier[fds] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[if] identifier[timeout] keyword[is] keyword[not] keyword[None] :
identifier[end_time] = identifier[time] . identifier[time] ()+ identifier[timeout]
identifier[poller] = identifier[select] . identifier[poll] ()
keyword[for] identifier[fd] keyword[in] identifier[fds] :
identifier[poller] . identifier[register] ( identifier[fd] , identifier[select] . identifier[POLLIN] | identifier[select] . identifier[POLLPRI] | identifier[select] . identifier[POLLHUP] | identifier[select] . identifier[POLLERR] )
keyword[while] keyword[True] :
keyword[try] :
identifier[timeout_ms] = keyword[None] keyword[if] identifier[timeout] keyword[is] keyword[None] keyword[else] identifier[timeout] * literal[int]
identifier[results] = identifier[poller] . identifier[poll] ( identifier[timeout_ms] )
keyword[return] [ identifier[afd] keyword[for] identifier[afd] , identifier[_] keyword[in] identifier[results] ]
keyword[except] identifier[InterruptedError] :
identifier[err] = identifier[sys] . identifier[exc_info] ()[ literal[int] ]
keyword[if] identifier[err] . identifier[args] [ literal[int] ]== identifier[errno] . identifier[EINTR] :
keyword[if] identifier[timeout] keyword[is] keyword[not] keyword[None] :
identifier[timeout] = identifier[end_time] - identifier[time] . identifier[time] ()
keyword[if] identifier[timeout] < literal[int] :
keyword[return] []
keyword[else] :
keyword[raise] | def poll_ignore_interrupts(fds, timeout=None):
"""Simple wrapper around poll to register file descriptors and
ignore signals."""
if timeout is not None:
end_time = time.time() + timeout # depends on [control=['if'], data=['timeout']]
poller = select.poll()
for fd in fds:
poller.register(fd, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR) # depends on [control=['for'], data=['fd']]
while True:
try:
timeout_ms = None if timeout is None else timeout * 1000
results = poller.poll(timeout_ms)
return [afd for (afd, _) in results] # depends on [control=['try'], data=[]]
except InterruptedError:
err = sys.exc_info()[1]
if err.args[0] == errno.EINTR:
# if we loop back we have to subtract the
# amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['timeout']] # depends on [control=['if'], data=[]]
else:
# something else caused the select.error, so
# this actually is an exception.
raise # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def get_info(self):
"""
Return plugin information.
"""
plugin_infos = {}
for pc in self.plugins:
plugin_infos.update(pc.get_info())
return {
self.get_plugin_name() : {
"version" : self.get_version(),
"sub-plugins" : plugin_infos,
"params" : {
"multi_plugins" : self.conf['multi_plugins']
},
}
} | def function[get_info, parameter[self]]:
constant[
Return plugin information.
]
variable[plugin_infos] assign[=] dictionary[[], []]
for taget[name[pc]] in starred[name[self].plugins] begin[:]
call[name[plugin_infos].update, parameter[call[name[pc].get_info, parameter[]]]]
return[dictionary[[<ast.Call object at 0x7da18dc997e0>], [<ast.Dict object at 0x7da18dc983d0>]]] | keyword[def] identifier[get_info] ( identifier[self] ):
literal[string]
identifier[plugin_infos] ={}
keyword[for] identifier[pc] keyword[in] identifier[self] . identifier[plugins] :
identifier[plugin_infos] . identifier[update] ( identifier[pc] . identifier[get_info] ())
keyword[return] {
identifier[self] . identifier[get_plugin_name] ():{
literal[string] : identifier[self] . identifier[get_version] (),
literal[string] : identifier[plugin_infos] ,
literal[string] :{
literal[string] : identifier[self] . identifier[conf] [ literal[string] ]
},
}
} | def get_info(self):
"""
Return plugin information.
"""
plugin_infos = {}
for pc in self.plugins:
plugin_infos.update(pc.get_info()) # depends on [control=['for'], data=['pc']]
return {self.get_plugin_name(): {'version': self.get_version(), 'sub-plugins': plugin_infos, 'params': {'multi_plugins': self.conf['multi_plugins']}}} |
def find_by_extension(extension):
"""
Find and return a format by extension.
:param extension: A string describing the extension of the format.
"""
for format in FORMATS:
if extension in format.extensions:
return format
raise UnknownFormat('No format found with extension "%s"' % extension) | def function[find_by_extension, parameter[extension]]:
constant[
Find and return a format by extension.
:param extension: A string describing the extension of the format.
]
for taget[name[format]] in starred[name[FORMATS]] begin[:]
if compare[name[extension] in name[format].extensions] begin[:]
return[name[format]]
<ast.Raise object at 0x7da1b27a6d40> | keyword[def] identifier[find_by_extension] ( identifier[extension] ):
literal[string]
keyword[for] identifier[format] keyword[in] identifier[FORMATS] :
keyword[if] identifier[extension] keyword[in] identifier[format] . identifier[extensions] :
keyword[return] identifier[format]
keyword[raise] identifier[UnknownFormat] ( literal[string] % identifier[extension] ) | def find_by_extension(extension):
"""
Find and return a format by extension.
:param extension: A string describing the extension of the format.
"""
for format in FORMATS:
if extension in format.extensions:
return format # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['format']]
raise UnknownFormat('No format found with extension "%s"' % extension) |
def context_complete(self, ctxt):
"""Check for missing data for the required context data.
Set self.missing_data if it exists and return False.
Set self.complete if no missing data and return True.
"""
# Fresh start
self.complete = False
self.missing_data = []
for k, v in six.iteritems(ctxt):
if v is None or v == '':
if k not in self.missing_data:
self.missing_data.append(k)
if self.missing_data:
self.complete = False
log('Missing required data: %s' % ' '.join(self.missing_data),
level=INFO)
else:
self.complete = True
return self.complete | def function[context_complete, parameter[self, ctxt]]:
constant[Check for missing data for the required context data.
Set self.missing_data if it exists and return False.
Set self.complete if no missing data and return True.
]
name[self].complete assign[=] constant[False]
name[self].missing_data assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b11a85e0>, <ast.Name object at 0x7da1b11aa170>]]] in starred[call[name[six].iteritems, parameter[name[ctxt]]]] begin[:]
if <ast.BoolOp object at 0x7da18bc71330> begin[:]
if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[self].missing_data] begin[:]
call[name[self].missing_data.append, parameter[name[k]]]
if name[self].missing_data begin[:]
name[self].complete assign[=] constant[False]
call[name[log], parameter[binary_operation[constant[Missing required data: %s] <ast.Mod object at 0x7da2590d6920> call[constant[ ].join, parameter[name[self].missing_data]]]]]
return[name[self].complete] | keyword[def] identifier[context_complete] ( identifier[self] , identifier[ctxt] ):
literal[string]
identifier[self] . identifier[complete] = keyword[False]
identifier[self] . identifier[missing_data] =[]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[ctxt] ):
keyword[if] identifier[v] keyword[is] keyword[None] keyword[or] identifier[v] == literal[string] :
keyword[if] identifier[k] keyword[not] keyword[in] identifier[self] . identifier[missing_data] :
identifier[self] . identifier[missing_data] . identifier[append] ( identifier[k] )
keyword[if] identifier[self] . identifier[missing_data] :
identifier[self] . identifier[complete] = keyword[False]
identifier[log] ( literal[string] % literal[string] . identifier[join] ( identifier[self] . identifier[missing_data] ),
identifier[level] = identifier[INFO] )
keyword[else] :
identifier[self] . identifier[complete] = keyword[True]
keyword[return] identifier[self] . identifier[complete] | def context_complete(self, ctxt):
"""Check for missing data for the required context data.
Set self.missing_data if it exists and return False.
Set self.complete if no missing data and return True.
"""
# Fresh start
self.complete = False
self.missing_data = []
for (k, v) in six.iteritems(ctxt):
if v is None or v == '':
if k not in self.missing_data:
self.missing_data.append(k) # depends on [control=['if'], data=['k']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if self.missing_data:
self.complete = False
log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) # depends on [control=['if'], data=[]]
else:
self.complete = True
return self.complete |
def Save(self, token=None):
"""Generate a histogram object and store in the specified attribute."""
graph_series_by_label = {}
for active_time in self.active_days:
for label in self.categories[active_time]:
graphs_for_label = graph_series_by_label.setdefault(
label, rdf_stats.ClientGraphSeries(report_type=self._report_type))
graph = rdf_stats.Graph(title="%s day actives for %s label" %
(active_time, label))
for k, v in sorted(iteritems(self.categories[active_time][label])):
graph.Append(label=k, y_value=v)
graphs_for_label.graphs.Append(graph)
for label, graph_series in iteritems(graph_series_by_label):
client_report_utils.WriteGraphSeries(graph_series, label, token=token) | def function[Save, parameter[self, token]]:
constant[Generate a histogram object and store in the specified attribute.]
variable[graph_series_by_label] assign[=] dictionary[[], []]
for taget[name[active_time]] in starred[name[self].active_days] begin[:]
for taget[name[label]] in starred[call[name[self].categories][name[active_time]]] begin[:]
variable[graphs_for_label] assign[=] call[name[graph_series_by_label].setdefault, parameter[name[label], call[name[rdf_stats].ClientGraphSeries, parameter[]]]]
variable[graph] assign[=] call[name[rdf_stats].Graph, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1ced8a0>, <ast.Name object at 0x7da1b1cedff0>]]] in starred[call[name[sorted], parameter[call[name[iteritems], parameter[call[call[name[self].categories][name[active_time]]][name[label]]]]]]] begin[:]
call[name[graph].Append, parameter[]]
call[name[graphs_for_label].graphs.Append, parameter[name[graph]]]
for taget[tuple[[<ast.Name object at 0x7da1b1cedcc0>, <ast.Name object at 0x7da1b1cef010>]]] in starred[call[name[iteritems], parameter[name[graph_series_by_label]]]] begin[:]
call[name[client_report_utils].WriteGraphSeries, parameter[name[graph_series], name[label]]] | keyword[def] identifier[Save] ( identifier[self] , identifier[token] = keyword[None] ):
literal[string]
identifier[graph_series_by_label] ={}
keyword[for] identifier[active_time] keyword[in] identifier[self] . identifier[active_days] :
keyword[for] identifier[label] keyword[in] identifier[self] . identifier[categories] [ identifier[active_time] ]:
identifier[graphs_for_label] = identifier[graph_series_by_label] . identifier[setdefault] (
identifier[label] , identifier[rdf_stats] . identifier[ClientGraphSeries] ( identifier[report_type] = identifier[self] . identifier[_report_type] ))
identifier[graph] = identifier[rdf_stats] . identifier[Graph] ( identifier[title] = literal[string] %
( identifier[active_time] , identifier[label] ))
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[sorted] ( identifier[iteritems] ( identifier[self] . identifier[categories] [ identifier[active_time] ][ identifier[label] ])):
identifier[graph] . identifier[Append] ( identifier[label] = identifier[k] , identifier[y_value] = identifier[v] )
identifier[graphs_for_label] . identifier[graphs] . identifier[Append] ( identifier[graph] )
keyword[for] identifier[label] , identifier[graph_series] keyword[in] identifier[iteritems] ( identifier[graph_series_by_label] ):
identifier[client_report_utils] . identifier[WriteGraphSeries] ( identifier[graph_series] , identifier[label] , identifier[token] = identifier[token] ) | def Save(self, token=None):
"""Generate a histogram object and store in the specified attribute."""
graph_series_by_label = {}
for active_time in self.active_days:
for label in self.categories[active_time]:
graphs_for_label = graph_series_by_label.setdefault(label, rdf_stats.ClientGraphSeries(report_type=self._report_type))
graph = rdf_stats.Graph(title='%s day actives for %s label' % (active_time, label))
for (k, v) in sorted(iteritems(self.categories[active_time][label])):
graph.Append(label=k, y_value=v) # depends on [control=['for'], data=[]]
graphs_for_label.graphs.Append(graph) # depends on [control=['for'], data=['label']] # depends on [control=['for'], data=['active_time']]
for (label, graph_series) in iteritems(graph_series_by_label):
client_report_utils.WriteGraphSeries(graph_series, label, token=token) # depends on [control=['for'], data=[]] |
def key_pair(i, region):
"""Returns the ith default (aws_key_pair_name, key_pair_path)."""
if i == 0:
return ("{}_{}".format(RAY, region),
os.path.expanduser("~/.ssh/{}_{}.pem".format(RAY, region)))
return ("{}_{}_{}".format(RAY, i, region),
os.path.expanduser("~/.ssh/{}_{}_{}.pem".format(RAY, i, region))) | def function[key_pair, parameter[i, region]]:
constant[Returns the ith default (aws_key_pair_name, key_pair_path).]
if compare[name[i] equal[==] constant[0]] begin[:]
return[tuple[[<ast.Call object at 0x7da20e963880>, <ast.Call object at 0x7da20e962980>]]]
return[tuple[[<ast.Call object at 0x7da20e962bc0>, <ast.Call object at 0x7da20e963df0>]]] | keyword[def] identifier[key_pair] ( identifier[i] , identifier[region] ):
literal[string]
keyword[if] identifier[i] == literal[int] :
keyword[return] ( literal[string] . identifier[format] ( identifier[RAY] , identifier[region] ),
identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] . identifier[format] ( identifier[RAY] , identifier[region] )))
keyword[return] ( literal[string] . identifier[format] ( identifier[RAY] , identifier[i] , identifier[region] ),
identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] . identifier[format] ( identifier[RAY] , identifier[i] , identifier[region] ))) | def key_pair(i, region):
"""Returns the ith default (aws_key_pair_name, key_pair_path)."""
if i == 0:
return ('{}_{}'.format(RAY, region), os.path.expanduser('~/.ssh/{}_{}.pem'.format(RAY, region))) # depends on [control=['if'], data=[]]
return ('{}_{}_{}'.format(RAY, i, region), os.path.expanduser('~/.ssh/{}_{}_{}.pem'.format(RAY, i, region))) |
def get_poll_option_formset(self, formset_class):
""" Returns an instance of the poll option formset to be used in the view. """
if self.request.forum_permission_handler.can_create_polls(
self.get_forum(), self.request.user,
):
return formset_class(**self.get_poll_option_formset_kwargs()) | def function[get_poll_option_formset, parameter[self, formset_class]]:
constant[ Returns an instance of the poll option formset to be used in the view. ]
if call[name[self].request.forum_permission_handler.can_create_polls, parameter[call[name[self].get_forum, parameter[]], name[self].request.user]] begin[:]
return[call[name[formset_class], parameter[]]] | keyword[def] identifier[get_poll_option_formset] ( identifier[self] , identifier[formset_class] ):
literal[string]
keyword[if] identifier[self] . identifier[request] . identifier[forum_permission_handler] . identifier[can_create_polls] (
identifier[self] . identifier[get_forum] (), identifier[self] . identifier[request] . identifier[user] ,
):
keyword[return] identifier[formset_class] (** identifier[self] . identifier[get_poll_option_formset_kwargs] ()) | def get_poll_option_formset(self, formset_class):
""" Returns an instance of the poll option formset to be used in the view. """
if self.request.forum_permission_handler.can_create_polls(self.get_forum(), self.request.user):
return formset_class(**self.get_poll_option_formset_kwargs()) # depends on [control=['if'], data=[]] |
def custodian_archive(packages=None):
"""Create a lambda code archive for running custodian.
Lambda archive currently always includes `c7n` and
`pkg_resources`. Add additional packages in the mode block.
Example policy that includes additional packages
.. code-block:: yaml
policy:
name: lambda-archive-example
resource: s3
mode:
packages:
- botocore
packages: List of additional packages to include in the lambda archive.
"""
modules = {'c7n', 'pkg_resources'}
if packages:
modules = filter(None, modules.union(packages))
return PythonPackageArchive(*sorted(modules)) | def function[custodian_archive, parameter[packages]]:
constant[Create a lambda code archive for running custodian.
Lambda archive currently always includes `c7n` and
`pkg_resources`. Add additional packages in the mode block.
Example policy that includes additional packages
.. code-block:: yaml
policy:
name: lambda-archive-example
resource: s3
mode:
packages:
- botocore
packages: List of additional packages to include in the lambda archive.
]
variable[modules] assign[=] <ast.Set object at 0x7da2054a4310>
if name[packages] begin[:]
variable[modules] assign[=] call[name[filter], parameter[constant[None], call[name[modules].union, parameter[name[packages]]]]]
return[call[name[PythonPackageArchive], parameter[<ast.Starred object at 0x7da2054a5600>]]] | keyword[def] identifier[custodian_archive] ( identifier[packages] = keyword[None] ):
literal[string]
identifier[modules] ={ literal[string] , literal[string] }
keyword[if] identifier[packages] :
identifier[modules] = identifier[filter] ( keyword[None] , identifier[modules] . identifier[union] ( identifier[packages] ))
keyword[return] identifier[PythonPackageArchive] (* identifier[sorted] ( identifier[modules] )) | def custodian_archive(packages=None):
"""Create a lambda code archive for running custodian.
Lambda archive currently always includes `c7n` and
`pkg_resources`. Add additional packages in the mode block.
Example policy that includes additional packages
.. code-block:: yaml
policy:
name: lambda-archive-example
resource: s3
mode:
packages:
- botocore
packages: List of additional packages to include in the lambda archive.
"""
modules = {'c7n', 'pkg_resources'}
if packages:
modules = filter(None, modules.union(packages)) # depends on [control=['if'], data=[]]
return PythonPackageArchive(*sorted(modules)) |
def _frame_title(self, key, group_size=2, separator='\n'):
"""
Returns the formatted dimension group strings
for a particular frame.
"""
if self.layout_dimensions is not None:
dimensions, key = zip(*self.layout_dimensions.items())
elif not self.dynamic and (not self.uniform or len(self) == 1) or self.subplot:
return ''
else:
key = key if isinstance(key, tuple) else (key,)
dimensions = self.dimensions
dimension_labels = [dim.pprint_value_string(k) for dim, k in
zip(dimensions, key)]
groups = [', '.join(dimension_labels[i*group_size:(i+1)*group_size])
for i in range(len(dimension_labels))]
return util.bytes_to_unicode(separator.join(g for g in groups if g)) | def function[_frame_title, parameter[self, key, group_size, separator]]:
constant[
Returns the formatted dimension group strings
for a particular frame.
]
if compare[name[self].layout_dimensions is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da20c6ab430> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da20c6a82e0>]]
variable[dimension_labels] assign[=] <ast.ListComp object at 0x7da20c6a8ca0>
variable[groups] assign[=] <ast.ListComp object at 0x7da20c6a94e0>
return[call[name[util].bytes_to_unicode, parameter[call[name[separator].join, parameter[<ast.GeneratorExp object at 0x7da20c6a97e0>]]]]] | keyword[def] identifier[_frame_title] ( identifier[self] , identifier[key] , identifier[group_size] = literal[int] , identifier[separator] = literal[string] ):
literal[string]
keyword[if] identifier[self] . identifier[layout_dimensions] keyword[is] keyword[not] keyword[None] :
identifier[dimensions] , identifier[key] = identifier[zip] (* identifier[self] . identifier[layout_dimensions] . identifier[items] ())
keyword[elif] keyword[not] identifier[self] . identifier[dynamic] keyword[and] ( keyword[not] identifier[self] . identifier[uniform] keyword[or] identifier[len] ( identifier[self] )== literal[int] ) keyword[or] identifier[self] . identifier[subplot] :
keyword[return] literal[string]
keyword[else] :
identifier[key] = identifier[key] keyword[if] identifier[isinstance] ( identifier[key] , identifier[tuple] ) keyword[else] ( identifier[key] ,)
identifier[dimensions] = identifier[self] . identifier[dimensions]
identifier[dimension_labels] =[ identifier[dim] . identifier[pprint_value_string] ( identifier[k] ) keyword[for] identifier[dim] , identifier[k] keyword[in]
identifier[zip] ( identifier[dimensions] , identifier[key] )]
identifier[groups] =[ literal[string] . identifier[join] ( identifier[dimension_labels] [ identifier[i] * identifier[group_size] :( identifier[i] + literal[int] )* identifier[group_size] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[dimension_labels] ))]
keyword[return] identifier[util] . identifier[bytes_to_unicode] ( identifier[separator] . identifier[join] ( identifier[g] keyword[for] identifier[g] keyword[in] identifier[groups] keyword[if] identifier[g] )) | def _frame_title(self, key, group_size=2, separator='\n'):
"""
Returns the formatted dimension group strings
for a particular frame.
"""
if self.layout_dimensions is not None:
(dimensions, key) = zip(*self.layout_dimensions.items()) # depends on [control=['if'], data=[]]
elif not self.dynamic and (not self.uniform or len(self) == 1) or self.subplot:
return '' # depends on [control=['if'], data=[]]
else:
key = key if isinstance(key, tuple) else (key,)
dimensions = self.dimensions
dimension_labels = [dim.pprint_value_string(k) for (dim, k) in zip(dimensions, key)]
groups = [', '.join(dimension_labels[i * group_size:(i + 1) * group_size]) for i in range(len(dimension_labels))]
return util.bytes_to_unicode(separator.join((g for g in groups if g))) |
def load_vcf(
path,
genome=None,
reference_vcf_key="reference",
only_passing=True,
allow_extended_nucleotides=False,
include_info=True,
chunk_size=10 ** 5,
max_variants=None,
sort_key=variant_ascending_position_sort_key,
distinct=True):
"""
Load reference name and Variant objects from the given VCF filename.
Currently only local files are supported by this function (no http). If you
call this on an HTTP URL, it will fall back to `load_vcf`.
Parameters
----------
path : str
Path to VCF (*.vcf) or compressed VCF (*.vcf.gz).
genome : {pyensembl.Genome, reference name, Ensembl version int}, optional
Optionally pass in a PyEnsembl Genome object, name of reference, or
PyEnsembl release version to specify the reference associated with a
VCF (otherwise infer reference from VCF using reference_vcf_key)
reference_vcf_key : str, optional
Name of metadata field which contains path to reference FASTA
file (default = 'reference')
only_passing : boolean, optional
If true, any entries whose FILTER field is not one of "." or "PASS" is
dropped.
allow_extended_nucleotides : boolean, default False
Allow characters other that A,C,T,G in the ref and alt strings.
include_info : boolean, default True
Whether to parse the INFO and per-sample columns. If you don't need
these, set to False for faster parsing.
chunk_size: int, optional
Number of records to load in memory at once.
max_variants : int, optional
If specified, return only the first max_variants variants.
sort_key : fn
Function which maps each element to a sorting criterion.
Set to None to not to sort the variants.
distinct : boolean, default True
Don't keep repeated variants
"""
require_string(path, "Path or URL to VCF")
parsed_path = parse_url_or_path(path)
if parsed_path.scheme and parsed_path.scheme.lower() != "file":
# pandas.read_table nominally supports HTTP, but it tends to crash on
# large files and does not support gzip. Switching to the python-based
# implementation of read_table (with engine="python") helps with some
# issues but introduces a new set of problems (e.g. the dtype parameter
# is not accepted). For these reasons, we're currently not attempting
# to load VCFs over HTTP with pandas directly, and instead download it
# to a temporary file and open that.
(filename, headers) = urllib.request.urlretrieve(path)
try:
# The downloaded file has no file extension, which confuses pyvcf
# for gziped files in Python 3. We rename it to have the correct
# file extension.
new_filename = "%s.%s" % (
filename, parsed_path.path.split(".")[-1])
os.rename(filename, new_filename)
filename = new_filename
return load_vcf(
filename,
genome=genome,
reference_vcf_key=reference_vcf_key,
only_passing=only_passing,
allow_extended_nucleotides=allow_extended_nucleotides,
include_info=include_info,
chunk_size=chunk_size,
max_variants=max_variants,
sort_key=sort_key,
distinct=distinct)
finally:
logger.info("Removing temporary file: %s", filename)
os.unlink(filename)
# Loading a local file.
# The file will be opened twice: first to parse the header with pyvcf, then
# by pandas to read the data.
# PyVCF reads the metadata immediately and stops at the first line with
# data. We can close the file after that.
handle = PyVCFReaderFromPathOrURL(path)
handle.close()
genome = infer_genome_from_vcf(
genome,
handle.vcf_reader,
reference_vcf_key)
df_iterator = read_vcf_into_dataframe(
path,
include_info=include_info,
sample_names=handle.vcf_reader.samples if include_info else None,
chunk_size=chunk_size)
if include_info:
def sample_info_parser(unparsed_sample_info_strings, format_string):
"""
Given a format string like "GT:AD:ADP:DP:FS"
and a list of sample info strings where each entry is like
"0/1:3,22:T=3,G=22:25:33", return a dict that maps:
sample name -> field name -> value. Uses pyvcf to parse the fields.
"""
return pyvcf_calls_to_sample_info_list(
handle.vcf_reader._parse_samples(
unparsed_sample_info_strings, format_string, None))
else:
sample_info_parser = None
return dataframes_to_variant_collection(
df_iterator,
source_path=path,
info_parser=handle.vcf_reader._parse_info if include_info else None,
only_passing=only_passing,
max_variants=max_variants,
sample_names=handle.vcf_reader.samples if include_info else None,
sample_info_parser=sample_info_parser,
variant_kwargs={
'ensembl': genome,
'allow_extended_nucleotides': allow_extended_nucleotides},
variant_collection_kwargs={
'sort_key': sort_key,
'distinct': distinct}) | def function[load_vcf, parameter[path, genome, reference_vcf_key, only_passing, allow_extended_nucleotides, include_info, chunk_size, max_variants, sort_key, distinct]]:
constant[
Load reference name and Variant objects from the given VCF filename.
Currently only local files are supported by this function (no http). If you
call this on an HTTP URL, it will fall back to `load_vcf`.
Parameters
----------
path : str
Path to VCF (*.vcf) or compressed VCF (*.vcf.gz).
genome : {pyensembl.Genome, reference name, Ensembl version int}, optional
Optionally pass in a PyEnsembl Genome object, name of reference, or
PyEnsembl release version to specify the reference associated with a
VCF (otherwise infer reference from VCF using reference_vcf_key)
reference_vcf_key : str, optional
Name of metadata field which contains path to reference FASTA
file (default = 'reference')
only_passing : boolean, optional
If true, any entries whose FILTER field is not one of "." or "PASS" is
dropped.
allow_extended_nucleotides : boolean, default False
Allow characters other that A,C,T,G in the ref and alt strings.
include_info : boolean, default True
Whether to parse the INFO and per-sample columns. If you don't need
these, set to False for faster parsing.
chunk_size: int, optional
Number of records to load in memory at once.
max_variants : int, optional
If specified, return only the first max_variants variants.
sort_key : fn
Function which maps each element to a sorting criterion.
Set to None to not to sort the variants.
distinct : boolean, default True
Don't keep repeated variants
]
call[name[require_string], parameter[name[path], constant[Path or URL to VCF]]]
variable[parsed_path] assign[=] call[name[parse_url_or_path], parameter[name[path]]]
if <ast.BoolOp object at 0x7da1b04ec5b0> begin[:]
<ast.Tuple object at 0x7da1b04ed300> assign[=] call[name[urllib].request.urlretrieve, parameter[name[path]]]
<ast.Try object at 0x7da1b04ec670>
variable[handle] assign[=] call[name[PyVCFReaderFromPathOrURL], parameter[name[path]]]
call[name[handle].close, parameter[]]
variable[genome] assign[=] call[name[infer_genome_from_vcf], parameter[name[genome], name[handle].vcf_reader, name[reference_vcf_key]]]
variable[df_iterator] assign[=] call[name[read_vcf_into_dataframe], parameter[name[path]]]
if name[include_info] begin[:]
def function[sample_info_parser, parameter[unparsed_sample_info_strings, format_string]]:
constant[
Given a format string like "GT:AD:ADP:DP:FS"
and a list of sample info strings where each entry is like
"0/1:3,22:T=3,G=22:25:33", return a dict that maps:
sample name -> field name -> value. Uses pyvcf to parse the fields.
]
return[call[name[pyvcf_calls_to_sample_info_list], parameter[call[name[handle].vcf_reader._parse_samples, parameter[name[unparsed_sample_info_strings], name[format_string], constant[None]]]]]]
return[call[name[dataframes_to_variant_collection], parameter[name[df_iterator]]]] | keyword[def] identifier[load_vcf] (
identifier[path] ,
identifier[genome] = keyword[None] ,
identifier[reference_vcf_key] = literal[string] ,
identifier[only_passing] = keyword[True] ,
identifier[allow_extended_nucleotides] = keyword[False] ,
identifier[include_info] = keyword[True] ,
identifier[chunk_size] = literal[int] ** literal[int] ,
identifier[max_variants] = keyword[None] ,
identifier[sort_key] = identifier[variant_ascending_position_sort_key] ,
identifier[distinct] = keyword[True] ):
literal[string]
identifier[require_string] ( identifier[path] , literal[string] )
identifier[parsed_path] = identifier[parse_url_or_path] ( identifier[path] )
keyword[if] identifier[parsed_path] . identifier[scheme] keyword[and] identifier[parsed_path] . identifier[scheme] . identifier[lower] ()!= literal[string] :
( identifier[filename] , identifier[headers] )= identifier[urllib] . identifier[request] . identifier[urlretrieve] ( identifier[path] )
keyword[try] :
identifier[new_filename] = literal[string] %(
identifier[filename] , identifier[parsed_path] . identifier[path] . identifier[split] ( literal[string] )[- literal[int] ])
identifier[os] . identifier[rename] ( identifier[filename] , identifier[new_filename] )
identifier[filename] = identifier[new_filename]
keyword[return] identifier[load_vcf] (
identifier[filename] ,
identifier[genome] = identifier[genome] ,
identifier[reference_vcf_key] = identifier[reference_vcf_key] ,
identifier[only_passing] = identifier[only_passing] ,
identifier[allow_extended_nucleotides] = identifier[allow_extended_nucleotides] ,
identifier[include_info] = identifier[include_info] ,
identifier[chunk_size] = identifier[chunk_size] ,
identifier[max_variants] = identifier[max_variants] ,
identifier[sort_key] = identifier[sort_key] ,
identifier[distinct] = identifier[distinct] )
keyword[finally] :
identifier[logger] . identifier[info] ( literal[string] , identifier[filename] )
identifier[os] . identifier[unlink] ( identifier[filename] )
identifier[handle] = identifier[PyVCFReaderFromPathOrURL] ( identifier[path] )
identifier[handle] . identifier[close] ()
identifier[genome] = identifier[infer_genome_from_vcf] (
identifier[genome] ,
identifier[handle] . identifier[vcf_reader] ,
identifier[reference_vcf_key] )
identifier[df_iterator] = identifier[read_vcf_into_dataframe] (
identifier[path] ,
identifier[include_info] = identifier[include_info] ,
identifier[sample_names] = identifier[handle] . identifier[vcf_reader] . identifier[samples] keyword[if] identifier[include_info] keyword[else] keyword[None] ,
identifier[chunk_size] = identifier[chunk_size] )
keyword[if] identifier[include_info] :
keyword[def] identifier[sample_info_parser] ( identifier[unparsed_sample_info_strings] , identifier[format_string] ):
literal[string]
keyword[return] identifier[pyvcf_calls_to_sample_info_list] (
identifier[handle] . identifier[vcf_reader] . identifier[_parse_samples] (
identifier[unparsed_sample_info_strings] , identifier[format_string] , keyword[None] ))
keyword[else] :
identifier[sample_info_parser] = keyword[None]
keyword[return] identifier[dataframes_to_variant_collection] (
identifier[df_iterator] ,
identifier[source_path] = identifier[path] ,
identifier[info_parser] = identifier[handle] . identifier[vcf_reader] . identifier[_parse_info] keyword[if] identifier[include_info] keyword[else] keyword[None] ,
identifier[only_passing] = identifier[only_passing] ,
identifier[max_variants] = identifier[max_variants] ,
identifier[sample_names] = identifier[handle] . identifier[vcf_reader] . identifier[samples] keyword[if] identifier[include_info] keyword[else] keyword[None] ,
identifier[sample_info_parser] = identifier[sample_info_parser] ,
identifier[variant_kwargs] ={
literal[string] : identifier[genome] ,
literal[string] : identifier[allow_extended_nucleotides] },
identifier[variant_collection_kwargs] ={
literal[string] : identifier[sort_key] ,
literal[string] : identifier[distinct] }) | def load_vcf(path, genome=None, reference_vcf_key='reference', only_passing=True, allow_extended_nucleotides=False, include_info=True, chunk_size=10 ** 5, max_variants=None, sort_key=variant_ascending_position_sort_key, distinct=True):
"""
Load reference name and Variant objects from the given VCF filename.
Currently only local files are supported by this function (no http). If you
call this on an HTTP URL, it will fall back to `load_vcf`.
Parameters
----------
path : str
Path to VCF (*.vcf) or compressed VCF (*.vcf.gz).
genome : {pyensembl.Genome, reference name, Ensembl version int}, optional
Optionally pass in a PyEnsembl Genome object, name of reference, or
PyEnsembl release version to specify the reference associated with a
VCF (otherwise infer reference from VCF using reference_vcf_key)
reference_vcf_key : str, optional
Name of metadata field which contains path to reference FASTA
file (default = 'reference')
only_passing : boolean, optional
If true, any entries whose FILTER field is not one of "." or "PASS" is
dropped.
allow_extended_nucleotides : boolean, default False
Allow characters other that A,C,T,G in the ref and alt strings.
include_info : boolean, default True
Whether to parse the INFO and per-sample columns. If you don't need
these, set to False for faster parsing.
chunk_size: int, optional
Number of records to load in memory at once.
max_variants : int, optional
If specified, return only the first max_variants variants.
sort_key : fn
Function which maps each element to a sorting criterion.
Set to None to not to sort the variants.
distinct : boolean, default True
Don't keep repeated variants
"""
require_string(path, 'Path or URL to VCF')
parsed_path = parse_url_or_path(path)
if parsed_path.scheme and parsed_path.scheme.lower() != 'file':
# pandas.read_table nominally supports HTTP, but it tends to crash on
# large files and does not support gzip. Switching to the python-based
# implementation of read_table (with engine="python") helps with some
# issues but introduces a new set of problems (e.g. the dtype parameter
# is not accepted). For these reasons, we're currently not attempting
# to load VCFs over HTTP with pandas directly, and instead download it
# to a temporary file and open that.
(filename, headers) = urllib.request.urlretrieve(path)
try:
# The downloaded file has no file extension, which confuses pyvcf
# for gziped files in Python 3. We rename it to have the correct
# file extension.
new_filename = '%s.%s' % (filename, parsed_path.path.split('.')[-1])
os.rename(filename, new_filename)
filename = new_filename
return load_vcf(filename, genome=genome, reference_vcf_key=reference_vcf_key, only_passing=only_passing, allow_extended_nucleotides=allow_extended_nucleotides, include_info=include_info, chunk_size=chunk_size, max_variants=max_variants, sort_key=sort_key, distinct=distinct) # depends on [control=['try'], data=[]]
finally:
logger.info('Removing temporary file: %s', filename)
os.unlink(filename) # depends on [control=['if'], data=[]]
# Loading a local file.
# The file will be opened twice: first to parse the header with pyvcf, then
# by pandas to read the data.
# PyVCF reads the metadata immediately and stops at the first line with
# data. We can close the file after that.
handle = PyVCFReaderFromPathOrURL(path)
handle.close()
genome = infer_genome_from_vcf(genome, handle.vcf_reader, reference_vcf_key)
df_iterator = read_vcf_into_dataframe(path, include_info=include_info, sample_names=handle.vcf_reader.samples if include_info else None, chunk_size=chunk_size)
if include_info:
def sample_info_parser(unparsed_sample_info_strings, format_string):
"""
Given a format string like "GT:AD:ADP:DP:FS"
and a list of sample info strings where each entry is like
"0/1:3,22:T=3,G=22:25:33", return a dict that maps:
sample name -> field name -> value. Uses pyvcf to parse the fields.
"""
return pyvcf_calls_to_sample_info_list(handle.vcf_reader._parse_samples(unparsed_sample_info_strings, format_string, None)) # depends on [control=['if'], data=[]]
else:
sample_info_parser = None
return dataframes_to_variant_collection(df_iterator, source_path=path, info_parser=handle.vcf_reader._parse_info if include_info else None, only_passing=only_passing, max_variants=max_variants, sample_names=handle.vcf_reader.samples if include_info else None, sample_info_parser=sample_info_parser, variant_kwargs={'ensembl': genome, 'allow_extended_nucleotides': allow_extended_nucleotides}, variant_collection_kwargs={'sort_key': sort_key, 'distinct': distinct}) |
def a_send_password(password, ctx):
"""Send the password text.
Before sending the password local echo is disabled.
If password not provided it disconnects from the device and raises ConnectionAuthenticationError exception.
"""
if password:
ctx.ctrl.send_command(password, password=True)
return True
else:
ctx.ctrl.disconnect()
raise ConnectionAuthenticationError("Password not provided", ctx.ctrl.hostname) | def function[a_send_password, parameter[password, ctx]]:
constant[Send the password text.
Before sending the password local echo is disabled.
If password not provided it disconnects from the device and raises ConnectionAuthenticationError exception.
]
if name[password] begin[:]
call[name[ctx].ctrl.send_command, parameter[name[password]]]
return[constant[True]] | keyword[def] identifier[a_send_password] ( identifier[password] , identifier[ctx] ):
literal[string]
keyword[if] identifier[password] :
identifier[ctx] . identifier[ctrl] . identifier[send_command] ( identifier[password] , identifier[password] = keyword[True] )
keyword[return] keyword[True]
keyword[else] :
identifier[ctx] . identifier[ctrl] . identifier[disconnect] ()
keyword[raise] identifier[ConnectionAuthenticationError] ( literal[string] , identifier[ctx] . identifier[ctrl] . identifier[hostname] ) | def a_send_password(password, ctx):
"""Send the password text.
Before sending the password local echo is disabled.
If password not provided it disconnects from the device and raises ConnectionAuthenticationError exception.
"""
if password:
ctx.ctrl.send_command(password, password=True)
return True # depends on [control=['if'], data=[]]
else:
ctx.ctrl.disconnect()
raise ConnectionAuthenticationError('Password not provided', ctx.ctrl.hostname) |
def read_relative_file(filename):
"""Returns contents of the given file, which path is supposed relative
to this module."""
with open(join(dirname(abspath(__file__)), filename)) as f:
return f.read() | def function[read_relative_file, parameter[filename]]:
constant[Returns contents of the given file, which path is supposed relative
to this module.]
with call[name[open], parameter[call[name[join], parameter[call[name[dirname], parameter[call[name[abspath], parameter[name[__file__]]]]], name[filename]]]]] begin[:]
return[call[name[f].read, parameter[]]] | keyword[def] identifier[read_relative_file] ( identifier[filename] ):
literal[string]
keyword[with] identifier[open] ( identifier[join] ( identifier[dirname] ( identifier[abspath] ( identifier[__file__] )), identifier[filename] )) keyword[as] identifier[f] :
keyword[return] identifier[f] . identifier[read] () | def read_relative_file(filename):
"""Returns contents of the given file, which path is supposed relative
to this module."""
with open(join(dirname(abspath(__file__)), filename)) as f:
return f.read() # depends on [control=['with'], data=['f']] |
def find(self, entry_id, query=None):
"""
Gets a single entry by ID.
"""
if query is None:
query = {}
if self.content_type_id is not None:
query['content_type'] = self.content_type_id
normalize_select(query)
return super(EntriesProxy, self).find(entry_id, query=query) | def function[find, parameter[self, entry_id, query]]:
constant[
Gets a single entry by ID.
]
if compare[name[query] is constant[None]] begin[:]
variable[query] assign[=] dictionary[[], []]
if compare[name[self].content_type_id is_not constant[None]] begin[:]
call[name[query]][constant[content_type]] assign[=] name[self].content_type_id
call[name[normalize_select], parameter[name[query]]]
return[call[call[name[super], parameter[name[EntriesProxy], name[self]]].find, parameter[name[entry_id]]]] | keyword[def] identifier[find] ( identifier[self] , identifier[entry_id] , identifier[query] = keyword[None] ):
literal[string]
keyword[if] identifier[query] keyword[is] keyword[None] :
identifier[query] ={}
keyword[if] identifier[self] . identifier[content_type_id] keyword[is] keyword[not] keyword[None] :
identifier[query] [ literal[string] ]= identifier[self] . identifier[content_type_id]
identifier[normalize_select] ( identifier[query] )
keyword[return] identifier[super] ( identifier[EntriesProxy] , identifier[self] ). identifier[find] ( identifier[entry_id] , identifier[query] = identifier[query] ) | def find(self, entry_id, query=None):
"""
Gets a single entry by ID.
"""
if query is None:
query = {} # depends on [control=['if'], data=['query']]
if self.content_type_id is not None:
query['content_type'] = self.content_type_id # depends on [control=['if'], data=[]]
normalize_select(query)
return super(EntriesProxy, self).find(entry_id, query=query) |
def send_media_group(
self,
chat_id: Union[int, str],
media: List[Union["pyrogram.InputMediaPhoto", "pyrogram.InputMediaVideo"]],
disable_notification: bool = None,
reply_to_message_id: int = None
):
"""Use this method to send a group of photos or videos as an album.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
media (List of :obj:`InputMediaPhoto` and :obj:`InputMediaVideo`):
A list describing photos and videos to be sent, must include 2–10 items.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
Returns:
On success, a :obj:`Messages <pyrogram.Messages>` object is returned containing all the
single messages sent.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
multi_media = []
for i in media:
style = self.html if i.parse_mode.lower() == "html" else self.markdown
if isinstance(i, pyrogram.InputMediaPhoto):
if os.path.exists(i.media):
while True:
try:
media = self.send(
functions.messages.UploadMedia(
peer=self.resolve_peer(chat_id),
media=types.InputMediaUploadedPhoto(
file=self.save_file(i.media)
)
)
)
except FloodWait as e:
log.warning("Sleeping for {}s".format(e.x))
time.sleep(e.x)
else:
break
media = types.InputMediaPhoto(
id=types.InputPhoto(
id=media.photo.id,
access_hash=media.photo.access_hash,
file_reference=b""
)
)
else:
try:
decoded = utils.decode(i.media)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] != 2:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaPhoto(
id=types.InputPhoto(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
elif isinstance(i, pyrogram.InputMediaVideo):
if os.path.exists(i.media):
while True:
try:
media = self.send(
functions.messages.UploadMedia(
peer=self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
file=self.save_file(i.media),
thumb=None if i.thumb is None else self.save_file(i.thumb),
mime_type=self.guess_mime_type(i.media) or "video/mp4",
attributes=[
types.DocumentAttributeVideo(
supports_streaming=i.supports_streaming or None,
duration=i.duration,
w=i.width,
h=i.height
),
types.DocumentAttributeFilename(file_name=os.path.basename(i.media))
]
)
)
)
except FloodWait as e:
log.warning("Sleeping for {}s".format(e.x))
time.sleep(e.x)
else:
break
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
else:
try:
decoded = utils.decode(i.media)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] != 4:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaDocument(
id=types.InputDocument(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
multi_media.append(
types.InputSingleMedia(
media=media,
random_id=self.rnd_id(),
**style.parse(i.caption)
)
)
while True:
try:
r = self.send(
functions.messages.SendMultiMedia(
peer=self.resolve_peer(chat_id),
multi_media=multi_media,
silent=disable_notification or None,
reply_to_msg_id=reply_to_message_id
)
)
except FloodWait as e:
log.warning("Sleeping for {}s".format(e.x))
time.sleep(e.x)
else:
break
return pyrogram.Messages._parse(
self,
types.messages.Messages(
messages=[m.message for m in filter(
lambda u: isinstance(u, (types.UpdateNewMessage, types.UpdateNewChannelMessage)),
r.updates
)],
users=r.users,
chats=r.chats
)
) | def function[send_media_group, parameter[self, chat_id, media, disable_notification, reply_to_message_id]]:
constant[Use this method to send a group of photos or videos as an album.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
media (List of :obj:`InputMediaPhoto` and :obj:`InputMediaVideo`):
A list describing photos and videos to be sent, must include 2–10 items.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
Returns:
On success, a :obj:`Messages <pyrogram.Messages>` object is returned containing all the
single messages sent.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
]
variable[multi_media] assign[=] list[[]]
for taget[name[i]] in starred[name[media]] begin[:]
variable[style] assign[=] <ast.IfExp object at 0x7da1b21d7790>
if call[name[isinstance], parameter[name[i], name[pyrogram].InputMediaPhoto]] begin[:]
if call[name[os].path.exists, parameter[name[i].media]] begin[:]
while constant[True] begin[:]
<ast.Try object at 0x7da1b21d72b0>
variable[media] assign[=] call[name[types].InputMediaPhoto, parameter[]]
call[name[multi_media].append, parameter[call[name[types].InputSingleMedia, parameter[]]]]
while constant[True] begin[:]
<ast.Try object at 0x7da207f00880>
return[call[name[pyrogram].Messages._parse, parameter[name[self], call[name[types].messages.Messages, parameter[]]]]] | keyword[def] identifier[send_media_group] (
identifier[self] ,
identifier[chat_id] : identifier[Union] [ identifier[int] , identifier[str] ],
identifier[media] : identifier[List] [ identifier[Union] [ literal[string] , literal[string] ]],
identifier[disable_notification] : identifier[bool] = keyword[None] ,
identifier[reply_to_message_id] : identifier[int] = keyword[None]
):
literal[string]
identifier[multi_media] =[]
keyword[for] identifier[i] keyword[in] identifier[media] :
identifier[style] = identifier[self] . identifier[html] keyword[if] identifier[i] . identifier[parse_mode] . identifier[lower] ()== literal[string] keyword[else] identifier[self] . identifier[markdown]
keyword[if] identifier[isinstance] ( identifier[i] , identifier[pyrogram] . identifier[InputMediaPhoto] ):
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[i] . identifier[media] ):
keyword[while] keyword[True] :
keyword[try] :
identifier[media] = identifier[self] . identifier[send] (
identifier[functions] . identifier[messages] . identifier[UploadMedia] (
identifier[peer] = identifier[self] . identifier[resolve_peer] ( identifier[chat_id] ),
identifier[media] = identifier[types] . identifier[InputMediaUploadedPhoto] (
identifier[file] = identifier[self] . identifier[save_file] ( identifier[i] . identifier[media] )
)
)
)
keyword[except] identifier[FloodWait] keyword[as] identifier[e] :
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[e] . identifier[x] ))
identifier[time] . identifier[sleep] ( identifier[e] . identifier[x] )
keyword[else] :
keyword[break]
identifier[media] = identifier[types] . identifier[InputMediaPhoto] (
identifier[id] = identifier[types] . identifier[InputPhoto] (
identifier[id] = identifier[media] . identifier[photo] . identifier[id] ,
identifier[access_hash] = identifier[media] . identifier[photo] . identifier[access_hash] ,
identifier[file_reference] = literal[string]
)
)
keyword[else] :
keyword[try] :
identifier[decoded] = identifier[utils] . identifier[decode] ( identifier[i] . identifier[media] )
identifier[fmt] = literal[string] keyword[if] identifier[len] ( identifier[decoded] )> literal[int] keyword[else] literal[string]
identifier[unpacked] = identifier[struct] . identifier[unpack] ( identifier[fmt] , identifier[decoded] )
keyword[except] ( identifier[AssertionError] , identifier[binascii] . identifier[Error] , identifier[struct] . identifier[error] ):
keyword[raise] identifier[FileIdInvalid] keyword[from] keyword[None]
keyword[else] :
keyword[if] identifier[unpacked] [ literal[int] ]!= literal[int] :
identifier[media_type] = identifier[BaseClient] . identifier[MEDIA_TYPE_ID] . identifier[get] ( identifier[unpacked] [ literal[int] ], keyword[None] )
keyword[if] identifier[media_type] :
keyword[raise] identifier[FileIdInvalid] ( literal[string] . identifier[format] ( identifier[media_type] ))
keyword[else] :
keyword[raise] identifier[FileIdInvalid] ( literal[string] . identifier[format] ( identifier[unpacked] [ literal[int] ]))
identifier[media] = identifier[types] . identifier[InputMediaPhoto] (
identifier[id] = identifier[types] . identifier[InputPhoto] (
identifier[id] = identifier[unpacked] [ literal[int] ],
identifier[access_hash] = identifier[unpacked] [ literal[int] ],
identifier[file_reference] = literal[string]
)
)
keyword[elif] identifier[isinstance] ( identifier[i] , identifier[pyrogram] . identifier[InputMediaVideo] ):
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[i] . identifier[media] ):
keyword[while] keyword[True] :
keyword[try] :
identifier[media] = identifier[self] . identifier[send] (
identifier[functions] . identifier[messages] . identifier[UploadMedia] (
identifier[peer] = identifier[self] . identifier[resolve_peer] ( identifier[chat_id] ),
identifier[media] = identifier[types] . identifier[InputMediaUploadedDocument] (
identifier[file] = identifier[self] . identifier[save_file] ( identifier[i] . identifier[media] ),
identifier[thumb] = keyword[None] keyword[if] identifier[i] . identifier[thumb] keyword[is] keyword[None] keyword[else] identifier[self] . identifier[save_file] ( identifier[i] . identifier[thumb] ),
identifier[mime_type] = identifier[self] . identifier[guess_mime_type] ( identifier[i] . identifier[media] ) keyword[or] literal[string] ,
identifier[attributes] =[
identifier[types] . identifier[DocumentAttributeVideo] (
identifier[supports_streaming] = identifier[i] . identifier[supports_streaming] keyword[or] keyword[None] ,
identifier[duration] = identifier[i] . identifier[duration] ,
identifier[w] = identifier[i] . identifier[width] ,
identifier[h] = identifier[i] . identifier[height]
),
identifier[types] . identifier[DocumentAttributeFilename] ( identifier[file_name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[i] . identifier[media] ))
]
)
)
)
keyword[except] identifier[FloodWait] keyword[as] identifier[e] :
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[e] . identifier[x] ))
identifier[time] . identifier[sleep] ( identifier[e] . identifier[x] )
keyword[else] :
keyword[break]
identifier[media] = identifier[types] . identifier[InputMediaDocument] (
identifier[id] = identifier[types] . identifier[InputDocument] (
identifier[id] = identifier[media] . identifier[document] . identifier[id] ,
identifier[access_hash] = identifier[media] . identifier[document] . identifier[access_hash] ,
identifier[file_reference] = literal[string]
)
)
keyword[else] :
keyword[try] :
identifier[decoded] = identifier[utils] . identifier[decode] ( identifier[i] . identifier[media] )
identifier[fmt] = literal[string] keyword[if] identifier[len] ( identifier[decoded] )> literal[int] keyword[else] literal[string]
identifier[unpacked] = identifier[struct] . identifier[unpack] ( identifier[fmt] , identifier[decoded] )
keyword[except] ( identifier[AssertionError] , identifier[binascii] . identifier[Error] , identifier[struct] . identifier[error] ):
keyword[raise] identifier[FileIdInvalid] keyword[from] keyword[None]
keyword[else] :
keyword[if] identifier[unpacked] [ literal[int] ]!= literal[int] :
identifier[media_type] = identifier[BaseClient] . identifier[MEDIA_TYPE_ID] . identifier[get] ( identifier[unpacked] [ literal[int] ], keyword[None] )
keyword[if] identifier[media_type] :
keyword[raise] identifier[FileIdInvalid] ( literal[string] . identifier[format] ( identifier[media_type] ))
keyword[else] :
keyword[raise] identifier[FileIdInvalid] ( literal[string] . identifier[format] ( identifier[unpacked] [ literal[int] ]))
identifier[media] = identifier[types] . identifier[InputMediaDocument] (
identifier[id] = identifier[types] . identifier[InputDocument] (
identifier[id] = identifier[unpacked] [ literal[int] ],
identifier[access_hash] = identifier[unpacked] [ literal[int] ],
identifier[file_reference] = literal[string]
)
)
identifier[multi_media] . identifier[append] (
identifier[types] . identifier[InputSingleMedia] (
identifier[media] = identifier[media] ,
identifier[random_id] = identifier[self] . identifier[rnd_id] (),
** identifier[style] . identifier[parse] ( identifier[i] . identifier[caption] )
)
)
keyword[while] keyword[True] :
keyword[try] :
identifier[r] = identifier[self] . identifier[send] (
identifier[functions] . identifier[messages] . identifier[SendMultiMedia] (
identifier[peer] = identifier[self] . identifier[resolve_peer] ( identifier[chat_id] ),
identifier[multi_media] = identifier[multi_media] ,
identifier[silent] = identifier[disable_notification] keyword[or] keyword[None] ,
identifier[reply_to_msg_id] = identifier[reply_to_message_id]
)
)
keyword[except] identifier[FloodWait] keyword[as] identifier[e] :
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[e] . identifier[x] ))
identifier[time] . identifier[sleep] ( identifier[e] . identifier[x] )
keyword[else] :
keyword[break]
keyword[return] identifier[pyrogram] . identifier[Messages] . identifier[_parse] (
identifier[self] ,
identifier[types] . identifier[messages] . identifier[Messages] (
identifier[messages] =[ identifier[m] . identifier[message] keyword[for] identifier[m] keyword[in] identifier[filter] (
keyword[lambda] identifier[u] : identifier[isinstance] ( identifier[u] ,( identifier[types] . identifier[UpdateNewMessage] , identifier[types] . identifier[UpdateNewChannelMessage] )),
identifier[r] . identifier[updates]
)],
identifier[users] = identifier[r] . identifier[users] ,
identifier[chats] = identifier[r] . identifier[chats]
)
) | def send_media_group(self, chat_id: Union[int, str], media: List[Union['pyrogram.InputMediaPhoto', 'pyrogram.InputMediaVideo']], disable_notification: bool=None, reply_to_message_id: int=None):
"""Use this method to send a group of photos or videos as an album.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
media (List of :obj:`InputMediaPhoto` and :obj:`InputMediaVideo`):
A list describing photos and videos to be sent, must include 2–10 items.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
Returns:
On success, a :obj:`Messages <pyrogram.Messages>` object is returned containing all the
single messages sent.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
multi_media = []
for i in media:
style = self.html if i.parse_mode.lower() == 'html' else self.markdown
if isinstance(i, pyrogram.InputMediaPhoto):
if os.path.exists(i.media):
while True:
try:
media = self.send(functions.messages.UploadMedia(peer=self.resolve_peer(chat_id), media=types.InputMediaUploadedPhoto(file=self.save_file(i.media)))) # depends on [control=['try'], data=[]]
except FloodWait as e:
log.warning('Sleeping for {}s'.format(e.x))
time.sleep(e.x) # depends on [control=['except'], data=['e']]
else:
break # depends on [control=['while'], data=[]]
media = types.InputMediaPhoto(id=types.InputPhoto(id=media.photo.id, access_hash=media.photo.access_hash, file_reference=b'')) # depends on [control=['if'], data=[]]
else:
try:
decoded = utils.decode(i.media)
fmt = '<iiqqqqi' if len(decoded) > 24 else '<iiqq'
unpacked = struct.unpack(fmt, decoded) # depends on [control=['try'], data=[]]
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None # depends on [control=['except'], data=[]]
else:
if unpacked[0] != 2:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid('The file_id belongs to a {}'.format(media_type)) # depends on [control=['if'], data=[]]
else:
raise FileIdInvalid('Unknown media type: {}'.format(unpacked[0])) # depends on [control=['if'], data=[]]
media = types.InputMediaPhoto(id=types.InputPhoto(id=unpacked[2], access_hash=unpacked[3], file_reference=b'')) # depends on [control=['if'], data=[]]
elif isinstance(i, pyrogram.InputMediaVideo):
if os.path.exists(i.media):
while True:
try:
media = self.send(functions.messages.UploadMedia(peer=self.resolve_peer(chat_id), media=types.InputMediaUploadedDocument(file=self.save_file(i.media), thumb=None if i.thumb is None else self.save_file(i.thumb), mime_type=self.guess_mime_type(i.media) or 'video/mp4', attributes=[types.DocumentAttributeVideo(supports_streaming=i.supports_streaming or None, duration=i.duration, w=i.width, h=i.height), types.DocumentAttributeFilename(file_name=os.path.basename(i.media))]))) # depends on [control=['try'], data=[]]
except FloodWait as e:
log.warning('Sleeping for {}s'.format(e.x))
time.sleep(e.x) # depends on [control=['except'], data=['e']]
else:
break # depends on [control=['while'], data=[]]
media = types.InputMediaDocument(id=types.InputDocument(id=media.document.id, access_hash=media.document.access_hash, file_reference=b'')) # depends on [control=['if'], data=[]]
else:
try:
decoded = utils.decode(i.media)
fmt = '<iiqqqqi' if len(decoded) > 24 else '<iiqq'
unpacked = struct.unpack(fmt, decoded) # depends on [control=['try'], data=[]]
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None # depends on [control=['except'], data=[]]
else:
if unpacked[0] != 4:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid('The file_id belongs to a {}'.format(media_type)) # depends on [control=['if'], data=[]]
else:
raise FileIdInvalid('Unknown media type: {}'.format(unpacked[0])) # depends on [control=['if'], data=[]]
media = types.InputMediaDocument(id=types.InputDocument(id=unpacked[2], access_hash=unpacked[3], file_reference=b'')) # depends on [control=['if'], data=[]]
multi_media.append(types.InputSingleMedia(media=media, random_id=self.rnd_id(), **style.parse(i.caption))) # depends on [control=['for'], data=['i']]
while True:
try:
r = self.send(functions.messages.SendMultiMedia(peer=self.resolve_peer(chat_id), multi_media=multi_media, silent=disable_notification or None, reply_to_msg_id=reply_to_message_id)) # depends on [control=['try'], data=[]]
except FloodWait as e:
log.warning('Sleeping for {}s'.format(e.x))
time.sleep(e.x) # depends on [control=['except'], data=['e']]
else:
break # depends on [control=['while'], data=[]]
return pyrogram.Messages._parse(self, types.messages.Messages(messages=[m.message for m in filter(lambda u: isinstance(u, (types.UpdateNewMessage, types.UpdateNewChannelMessage)), r.updates)], users=r.users, chats=r.chats)) |
def publish(self, topic, payload=None, qos=0, retain=False):
"""Publish a message on a topic.
This causes a message to be sent to the broker and subsequently from
the broker to any clients subscribing to matching topics.
topic: The topic that the message should be published on.
payload: The actual message to send. If not given, or set to None a
zero length message will be used. Passing an int or float will result
in the payload being converted to a string representing that number. If
you wish to send a true int/float, use struct.pack() to create the
payload you require.
qos: The quality of service level to use.
retain: If set to true, the message will be set as the "last known
good"/retained message for the topic.
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS to
indicate success or MQTT_ERR_NO_CONN if the client is not currently
connected. mid is the message ID for the publish request. The mid
value can be used to track the publish request by checking against the
mid argument in the on_publish() callback if it is defined.
A ValueError will be raised if topic is None, has zero length or is
invalid (contains a wildcard), if qos is not one of 0, 1 or 2, or if
the length of the payload is greater than 268435455 bytes."""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, str) or isinstance(payload, bytearray):
local_payload = payload
elif sys.version_info[0] < 3 and isinstance(payload, unicode):
local_payload = payload
elif isinstance(payload, int) or isinstance(payload, float):
local_payload = str(payload)
elif payload is None:
local_payload = None
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
if local_payload is not None and len(local_payload) > 268435455:
raise ValueError('Payload too large.')
if self._topic_wildcard_len_check(topic) != MQTT_ERR_SUCCESS:
raise ValueError('Publish topic cannot contain wildcards.')
local_mid = self._mid_generate()
if qos == 0:
rc = self._send_publish(local_mid, topic, local_payload, qos, retain, False)
return (rc, local_mid)
else:
message = MQTTMessage()
message.timestamp = time.time()
message.mid = local_mid
message.topic = topic
if local_payload is None or len(local_payload) == 0:
message.payload = None
else:
message.payload = local_payload
message.qos = qos
message.retain = retain
message.dup = False
self._out_message_mutex.acquire()
self._out_messages.append(message)
if self._max_inflight_messages == 0 or self._inflight_messages < self._max_inflight_messages:
self._inflight_messages = self._inflight_messages+1
if qos == 1:
message.state = mqtt_ms_wait_for_puback
elif qos == 2:
message.state = mqtt_ms_wait_for_pubrec
self._out_message_mutex.release()
rc = self._send_publish(message.mid, message.topic, message.payload, message.qos, message.retain, message.dup)
# remove from inflight messages so it will be send after a connection is made
if rc is MQTT_ERR_NO_CONN:
with self._out_message_mutex:
self._inflight_messages -= 1
message.state = mqtt_ms_publish
return (rc, local_mid)
else:
message.state = mqtt_ms_queued;
self._out_message_mutex.release()
return (MQTT_ERR_SUCCESS, local_mid) | def function[publish, parameter[self, topic, payload, qos, retain]]:
constant[Publish a message on a topic.
This causes a message to be sent to the broker and subsequently from
the broker to any clients subscribing to matching topics.
topic: The topic that the message should be published on.
payload: The actual message to send. If not given, or set to None a
zero length message will be used. Passing an int or float will result
in the payload being converted to a string representing that number. If
you wish to send a true int/float, use struct.pack() to create the
payload you require.
qos: The quality of service level to use.
retain: If set to true, the message will be set as the "last known
good"/retained message for the topic.
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS to
indicate success or MQTT_ERR_NO_CONN if the client is not currently
connected. mid is the message ID for the publish request. The mid
value can be used to track the publish request by checking against the
mid argument in the on_publish() callback if it is defined.
A ValueError will be raised if topic is None, has zero length or is
invalid (contains a wildcard), if qos is not one of 0, 1 or 2, or if
the length of the payload is greater than 268435455 bytes.]
if <ast.BoolOp object at 0x7da20c6aa4a0> begin[:]
<ast.Raise object at 0x7da20c6a9e40>
if <ast.BoolOp object at 0x7da18f00d660> begin[:]
<ast.Raise object at 0x7da18f00e4d0>
if <ast.BoolOp object at 0x7da18f00de70> begin[:]
variable[local_payload] assign[=] name[payload]
if <ast.BoolOp object at 0x7da18f00ea10> begin[:]
<ast.Raise object at 0x7da18f00cb80>
if compare[call[name[self]._topic_wildcard_len_check, parameter[name[topic]]] not_equal[!=] name[MQTT_ERR_SUCCESS]] begin[:]
<ast.Raise object at 0x7da18f00d8a0>
variable[local_mid] assign[=] call[name[self]._mid_generate, parameter[]]
if compare[name[qos] equal[==] constant[0]] begin[:]
variable[rc] assign[=] call[name[self]._send_publish, parameter[name[local_mid], name[topic], name[local_payload], name[qos], name[retain], constant[False]]]
return[tuple[[<ast.Name object at 0x7da18f00ecb0>, <ast.Name object at 0x7da18f00f070>]]] | keyword[def] identifier[publish] ( identifier[self] , identifier[topic] , identifier[payload] = keyword[None] , identifier[qos] = literal[int] , identifier[retain] = keyword[False] ):
literal[string]
keyword[if] identifier[topic] keyword[is] keyword[None] keyword[or] identifier[len] ( identifier[topic] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[qos] < literal[int] keyword[or] identifier[qos] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[payload] , identifier[str] ) keyword[or] identifier[isinstance] ( identifier[payload] , identifier[bytearray] ):
identifier[local_payload] = identifier[payload]
keyword[elif] identifier[sys] . identifier[version_info] [ literal[int] ]< literal[int] keyword[and] identifier[isinstance] ( identifier[payload] , identifier[unicode] ):
identifier[local_payload] = identifier[payload]
keyword[elif] identifier[isinstance] ( identifier[payload] , identifier[int] ) keyword[or] identifier[isinstance] ( identifier[payload] , identifier[float] ):
identifier[local_payload] = identifier[str] ( identifier[payload] )
keyword[elif] identifier[payload] keyword[is] keyword[None] :
identifier[local_payload] = keyword[None]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[local_payload] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[local_payload] )> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[_topic_wildcard_len_check] ( identifier[topic] )!= identifier[MQTT_ERR_SUCCESS] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[local_mid] = identifier[self] . identifier[_mid_generate] ()
keyword[if] identifier[qos] == literal[int] :
identifier[rc] = identifier[self] . identifier[_send_publish] ( identifier[local_mid] , identifier[topic] , identifier[local_payload] , identifier[qos] , identifier[retain] , keyword[False] )
keyword[return] ( identifier[rc] , identifier[local_mid] )
keyword[else] :
identifier[message] = identifier[MQTTMessage] ()
identifier[message] . identifier[timestamp] = identifier[time] . identifier[time] ()
identifier[message] . identifier[mid] = identifier[local_mid]
identifier[message] . identifier[topic] = identifier[topic]
keyword[if] identifier[local_payload] keyword[is] keyword[None] keyword[or] identifier[len] ( identifier[local_payload] )== literal[int] :
identifier[message] . identifier[payload] = keyword[None]
keyword[else] :
identifier[message] . identifier[payload] = identifier[local_payload]
identifier[message] . identifier[qos] = identifier[qos]
identifier[message] . identifier[retain] = identifier[retain]
identifier[message] . identifier[dup] = keyword[False]
identifier[self] . identifier[_out_message_mutex] . identifier[acquire] ()
identifier[self] . identifier[_out_messages] . identifier[append] ( identifier[message] )
keyword[if] identifier[self] . identifier[_max_inflight_messages] == literal[int] keyword[or] identifier[self] . identifier[_inflight_messages] < identifier[self] . identifier[_max_inflight_messages] :
identifier[self] . identifier[_inflight_messages] = identifier[self] . identifier[_inflight_messages] + literal[int]
keyword[if] identifier[qos] == literal[int] :
identifier[message] . identifier[state] = identifier[mqtt_ms_wait_for_puback]
keyword[elif] identifier[qos] == literal[int] :
identifier[message] . identifier[state] = identifier[mqtt_ms_wait_for_pubrec]
identifier[self] . identifier[_out_message_mutex] . identifier[release] ()
identifier[rc] = identifier[self] . identifier[_send_publish] ( identifier[message] . identifier[mid] , identifier[message] . identifier[topic] , identifier[message] . identifier[payload] , identifier[message] . identifier[qos] , identifier[message] . identifier[retain] , identifier[message] . identifier[dup] )
keyword[if] identifier[rc] keyword[is] identifier[MQTT_ERR_NO_CONN] :
keyword[with] identifier[self] . identifier[_out_message_mutex] :
identifier[self] . identifier[_inflight_messages] -= literal[int]
identifier[message] . identifier[state] = identifier[mqtt_ms_publish]
keyword[return] ( identifier[rc] , identifier[local_mid] )
keyword[else] :
identifier[message] . identifier[state] = identifier[mqtt_ms_queued] ;
identifier[self] . identifier[_out_message_mutex] . identifier[release] ()
keyword[return] ( identifier[MQTT_ERR_SUCCESS] , identifier[local_mid] ) | def publish(self, topic, payload=None, qos=0, retain=False):
"""Publish a message on a topic.
This causes a message to be sent to the broker and subsequently from
the broker to any clients subscribing to matching topics.
topic: The topic that the message should be published on.
payload: The actual message to send. If not given, or set to None a
zero length message will be used. Passing an int or float will result
in the payload being converted to a string representing that number. If
you wish to send a true int/float, use struct.pack() to create the
payload you require.
qos: The quality of service level to use.
retain: If set to true, the message will be set as the "last known
good"/retained message for the topic.
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS to
indicate success or MQTT_ERR_NO_CONN if the client is not currently
connected. mid is the message ID for the publish request. The mid
value can be used to track the publish request by checking against the
mid argument in the on_publish() callback if it is defined.
A ValueError will be raised if topic is None, has zero length or is
invalid (contains a wildcard), if qos is not one of 0, 1 or 2, or if
the length of the payload is greater than 268435455 bytes."""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.') # depends on [control=['if'], data=[]]
if qos < 0 or qos > 2:
raise ValueError('Invalid QoS level.') # depends on [control=['if'], data=[]]
if isinstance(payload, str) or isinstance(payload, bytearray):
local_payload = payload # depends on [control=['if'], data=[]]
elif sys.version_info[0] < 3 and isinstance(payload, unicode):
local_payload = payload # depends on [control=['if'], data=[]]
elif isinstance(payload, int) or isinstance(payload, float):
local_payload = str(payload) # depends on [control=['if'], data=[]]
elif payload is None:
local_payload = None # depends on [control=['if'], data=[]]
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
if local_payload is not None and len(local_payload) > 268435455:
raise ValueError('Payload too large.') # depends on [control=['if'], data=[]]
if self._topic_wildcard_len_check(topic) != MQTT_ERR_SUCCESS:
raise ValueError('Publish topic cannot contain wildcards.') # depends on [control=['if'], data=[]]
local_mid = self._mid_generate()
if qos == 0:
rc = self._send_publish(local_mid, topic, local_payload, qos, retain, False)
return (rc, local_mid) # depends on [control=['if'], data=['qos']]
else:
message = MQTTMessage()
message.timestamp = time.time()
message.mid = local_mid
message.topic = topic
if local_payload is None or len(local_payload) == 0:
message.payload = None # depends on [control=['if'], data=[]]
else:
message.payload = local_payload
message.qos = qos
message.retain = retain
message.dup = False
self._out_message_mutex.acquire()
self._out_messages.append(message)
if self._max_inflight_messages == 0 or self._inflight_messages < self._max_inflight_messages:
self._inflight_messages = self._inflight_messages + 1
if qos == 1:
message.state = mqtt_ms_wait_for_puback # depends on [control=['if'], data=[]]
elif qos == 2:
message.state = mqtt_ms_wait_for_pubrec # depends on [control=['if'], data=[]]
self._out_message_mutex.release()
rc = self._send_publish(message.mid, message.topic, message.payload, message.qos, message.retain, message.dup)
# remove from inflight messages so it will be send after a connection is made
if rc is MQTT_ERR_NO_CONN:
with self._out_message_mutex:
self._inflight_messages -= 1
message.state = mqtt_ms_publish # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
return (rc, local_mid) # depends on [control=['if'], data=[]]
else:
message.state = mqtt_ms_queued
self._out_message_mutex.release()
return (MQTT_ERR_SUCCESS, local_mid) |
def safe_dump(data, stream=None, **kwds):
"""implementation of safe dumper using Ordered Dict Yaml Dumper"""
return yaml.dump(data, stream=stream, Dumper=ODYD, **kwds) | def function[safe_dump, parameter[data, stream]]:
constant[implementation of safe dumper using Ordered Dict Yaml Dumper]
return[call[name[yaml].dump, parameter[name[data]]]] | keyword[def] identifier[safe_dump] ( identifier[data] , identifier[stream] = keyword[None] ,** identifier[kwds] ):
literal[string]
keyword[return] identifier[yaml] . identifier[dump] ( identifier[data] , identifier[stream] = identifier[stream] , identifier[Dumper] = identifier[ODYD] ,** identifier[kwds] ) | def safe_dump(data, stream=None, **kwds):
"""implementation of safe dumper using Ordered Dict Yaml Dumper"""
return yaml.dump(data, stream=stream, Dumper=ODYD, **kwds) |
def write_data(X, dir='sim/test', append=False, header='',
varNames={}, Adj=np.array([]), Coupl=np.array([]),
boolRules={}, model='', modelType='', invTimeStep=1):
""" Write simulated data.
Accounts for saving at the same time an ID
and a model file.
"""
# check if output directory exists
if not os.path.exists(dir):
os.makedirs(dir)
# update file with sample ids
filename = dir+'/id.txt'
if os.path.exists(filename):
f = open(filename,'r')
id = int(f.read()) + (0 if append else 1)
f.close()
else:
id = 0
f = open(filename,'w')
id = '{:0>6}'.format(id)
f.write(str(id))
f.close()
# dimension
dim = X.shape[1]
# write files with adjacancy and coupling matrices
if not append:
if False:
if Adj.size > 0:
# due to 'update formulation' of model, there
# is always a diagonal dependence
Adj = np.copy(Adj)
if 'hill' in model:
for i in range(Adj.shape[0]):
Adj[i,i] = 1
np.savetxt(dir+'/adj_'+id+'.txt',Adj,
header=header,
fmt='%d')
if Coupl.size > 0:
np.savetxt(dir+'/coupl_'+id+'.txt',Coupl,
header=header,
fmt='%10.6f')
# write model file
if varNames and Coupl.size > 0:
f = open(dir+'/model_'+id+'.txt','w')
f.write('# For each "variable = ", there must be a right hand side: \n')
f.write('# either an empty string or a python-style logical expression \n')
f.write('# involving variable names, "or", "and", "(", ")". \n')
f.write('# The order of equations matters! \n')
f.write('# \n')
f.write('# modelType = ' + modelType + '\n')
f.write('# invTimeStep = '+ str(invTimeStep) + '\n')
f.write('# \n')
f.write('# boolean update rules: \n')
for rule in boolRules.items():
f.write(rule[0] + ' = ' + rule[1] + '\n')
# write coupling via names
f.write('# coupling list: \n')
names = list(varNames.keys())
for gp in range(dim):
for g in range(dim):
if np.abs(Coupl[gp,g]) > 1e-10:
f.write('{:10} '.format(names[gp])
+ '{:10} '.format(names[g])
+ '{:10.3} '.format(Coupl[gp,g]) + '\n')
f.close()
# write simulated data
# the binary mode option in the following line is a fix for python 3
# variable names
if varNames:
header += '{:>2} '.format('it')
for v in varNames.keys():
header += '{:>7} '.format(v)
f = open(dir+'/sim_'+id+'.txt','ab' if append else 'wb')
np.savetxt(f,np.c_[np.arange(0,X.shape[0]),X],header=('' if append else header),
fmt=['%4.f']+['%7.4f' for i in range(dim)])
f.close() | def function[write_data, parameter[X, dir, append, header, varNames, Adj, Coupl, boolRules, model, modelType, invTimeStep]]:
constant[ Write simulated data.
Accounts for saving at the same time an ID
and a model file.
]
if <ast.UnaryOp object at 0x7da18f7225f0> begin[:]
call[name[os].makedirs, parameter[name[dir]]]
variable[filename] assign[=] binary_operation[name[dir] + constant[/id.txt]]
if call[name[os].path.exists, parameter[name[filename]]] begin[:]
variable[f] assign[=] call[name[open], parameter[name[filename], constant[r]]]
variable[id] assign[=] binary_operation[call[name[int], parameter[call[name[f].read, parameter[]]]] + <ast.IfExp object at 0x7da18f723160>]
call[name[f].close, parameter[]]
variable[f] assign[=] call[name[open], parameter[name[filename], constant[w]]]
variable[id] assign[=] call[constant[{:0>6}].format, parameter[name[id]]]
call[name[f].write, parameter[call[name[str], parameter[name[id]]]]]
call[name[f].close, parameter[]]
variable[dim] assign[=] call[name[X].shape][constant[1]]
if <ast.UnaryOp object at 0x7da18f720070> begin[:]
if constant[False] begin[:]
if compare[name[Adj].size greater[>] constant[0]] begin[:]
variable[Adj] assign[=] call[name[np].copy, parameter[name[Adj]]]
if compare[constant[hill] in name[model]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[call[name[Adj].shape][constant[0]]]]] begin[:]
call[name[Adj]][tuple[[<ast.Name object at 0x7da18f721e10>, <ast.Name object at 0x7da18f723ee0>]]] assign[=] constant[1]
call[name[np].savetxt, parameter[binary_operation[binary_operation[binary_operation[name[dir] + constant[/adj_]] + name[id]] + constant[.txt]], name[Adj]]]
if compare[name[Coupl].size greater[>] constant[0]] begin[:]
call[name[np].savetxt, parameter[binary_operation[binary_operation[binary_operation[name[dir] + constant[/coupl_]] + name[id]] + constant[.txt]], name[Coupl]]]
if <ast.BoolOp object at 0x7da18f721db0> begin[:]
variable[f] assign[=] call[name[open], parameter[binary_operation[binary_operation[binary_operation[name[dir] + constant[/model_]] + name[id]] + constant[.txt]], constant[w]]]
call[name[f].write, parameter[constant[# For each "variable = ", there must be a right hand side:
]]]
call[name[f].write, parameter[constant[# either an empty string or a python-style logical expression
]]]
call[name[f].write, parameter[constant[# involving variable names, "or", "and", "(", ")".
]]]
call[name[f].write, parameter[constant[# The order of equations matters!
]]]
call[name[f].write, parameter[constant[#
]]]
call[name[f].write, parameter[binary_operation[binary_operation[constant[# modelType = ] + name[modelType]] + constant[
]]]]
call[name[f].write, parameter[binary_operation[binary_operation[constant[# invTimeStep = ] + call[name[str], parameter[name[invTimeStep]]]] + constant[
]]]]
call[name[f].write, parameter[constant[#
]]]
call[name[f].write, parameter[constant[# boolean update rules:
]]]
for taget[name[rule]] in starred[call[name[boolRules].items, parameter[]]] begin[:]
call[name[f].write, parameter[binary_operation[binary_operation[binary_operation[call[name[rule]][constant[0]] + constant[ = ]] + call[name[rule]][constant[1]]] + constant[
]]]]
call[name[f].write, parameter[constant[# coupling list:
]]]
variable[names] assign[=] call[name[list], parameter[call[name[varNames].keys, parameter[]]]]
for taget[name[gp]] in starred[call[name[range], parameter[name[dim]]]] begin[:]
for taget[name[g]] in starred[call[name[range], parameter[name[dim]]]] begin[:]
if compare[call[name[np].abs, parameter[call[name[Coupl]][tuple[[<ast.Name object at 0x7da2045648e0>, <ast.Name object at 0x7da2045642e0>]]]]] greater[>] constant[1e-10]] begin[:]
call[name[f].write, parameter[binary_operation[binary_operation[binary_operation[call[constant[{:10} ].format, parameter[call[name[names]][name[gp]]]] + call[constant[{:10} ].format, parameter[call[name[names]][name[g]]]]] + call[constant[{:10.3} ].format, parameter[call[name[Coupl]][tuple[[<ast.Name object at 0x7da2045643a0>, <ast.Name object at 0x7da204566470>]]]]]] + constant[
]]]]
call[name[f].close, parameter[]]
if name[varNames] begin[:]
<ast.AugAssign object at 0x7da204566350>
for taget[name[v]] in starred[call[name[varNames].keys, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da204565db0>
variable[f] assign[=] call[name[open], parameter[binary_operation[binary_operation[binary_operation[name[dir] + constant[/sim_]] + name[id]] + constant[.txt]], <ast.IfExp object at 0x7da204565240>]]
call[name[np].savetxt, parameter[name[f], call[name[np].c_][tuple[[<ast.Call object at 0x7da204567160>, <ast.Name object at 0x7da204566230>]]]]]
call[name[f].close, parameter[]] | keyword[def] identifier[write_data] ( identifier[X] , identifier[dir] = literal[string] , identifier[append] = keyword[False] , identifier[header] = literal[string] ,
identifier[varNames] ={}, identifier[Adj] = identifier[np] . identifier[array] ([]), identifier[Coupl] = identifier[np] . identifier[array] ([]),
identifier[boolRules] ={}, identifier[model] = literal[string] , identifier[modelType] = literal[string] , identifier[invTimeStep] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[dir] ):
identifier[os] . identifier[makedirs] ( identifier[dir] )
identifier[filename] = identifier[dir] + literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[filename] ):
identifier[f] = identifier[open] ( identifier[filename] , literal[string] )
identifier[id] = identifier[int] ( identifier[f] . identifier[read] ())+( literal[int] keyword[if] identifier[append] keyword[else] literal[int] )
identifier[f] . identifier[close] ()
keyword[else] :
identifier[id] = literal[int]
identifier[f] = identifier[open] ( identifier[filename] , literal[string] )
identifier[id] = literal[string] . identifier[format] ( identifier[id] )
identifier[f] . identifier[write] ( identifier[str] ( identifier[id] ))
identifier[f] . identifier[close] ()
identifier[dim] = identifier[X] . identifier[shape] [ literal[int] ]
keyword[if] keyword[not] identifier[append] :
keyword[if] keyword[False] :
keyword[if] identifier[Adj] . identifier[size] > literal[int] :
identifier[Adj] = identifier[np] . identifier[copy] ( identifier[Adj] )
keyword[if] literal[string] keyword[in] identifier[model] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[Adj] . identifier[shape] [ literal[int] ]):
identifier[Adj] [ identifier[i] , identifier[i] ]= literal[int]
identifier[np] . identifier[savetxt] ( identifier[dir] + literal[string] + identifier[id] + literal[string] , identifier[Adj] ,
identifier[header] = identifier[header] ,
identifier[fmt] = literal[string] )
keyword[if] identifier[Coupl] . identifier[size] > literal[int] :
identifier[np] . identifier[savetxt] ( identifier[dir] + literal[string] + identifier[id] + literal[string] , identifier[Coupl] ,
identifier[header] = identifier[header] ,
identifier[fmt] = literal[string] )
keyword[if] identifier[varNames] keyword[and] identifier[Coupl] . identifier[size] > literal[int] :
identifier[f] = identifier[open] ( identifier[dir] + literal[string] + identifier[id] + literal[string] , literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] + identifier[modelType] + literal[string] )
identifier[f] . identifier[write] ( literal[string] + identifier[str] ( identifier[invTimeStep] )+ literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] )
keyword[for] identifier[rule] keyword[in] identifier[boolRules] . identifier[items] ():
identifier[f] . identifier[write] ( identifier[rule] [ literal[int] ]+ literal[string] + identifier[rule] [ literal[int] ]+ literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[names] = identifier[list] ( identifier[varNames] . identifier[keys] ())
keyword[for] identifier[gp] keyword[in] identifier[range] ( identifier[dim] ):
keyword[for] identifier[g] keyword[in] identifier[range] ( identifier[dim] ):
keyword[if] identifier[np] . identifier[abs] ( identifier[Coupl] [ identifier[gp] , identifier[g] ])> literal[int] :
identifier[f] . identifier[write] ( literal[string] . identifier[format] ( identifier[names] [ identifier[gp] ])
+ literal[string] . identifier[format] ( identifier[names] [ identifier[g] ])
+ literal[string] . identifier[format] ( identifier[Coupl] [ identifier[gp] , identifier[g] ])+ literal[string] )
identifier[f] . identifier[close] ()
keyword[if] identifier[varNames] :
identifier[header] += literal[string] . identifier[format] ( literal[string] )
keyword[for] identifier[v] keyword[in] identifier[varNames] . identifier[keys] ():
identifier[header] += literal[string] . identifier[format] ( identifier[v] )
identifier[f] = identifier[open] ( identifier[dir] + literal[string] + identifier[id] + literal[string] , literal[string] keyword[if] identifier[append] keyword[else] literal[string] )
identifier[np] . identifier[savetxt] ( identifier[f] , identifier[np] . identifier[c_] [ identifier[np] . identifier[arange] ( literal[int] , identifier[X] . identifier[shape] [ literal[int] ]), identifier[X] ], identifier[header] =( literal[string] keyword[if] identifier[append] keyword[else] identifier[header] ),
identifier[fmt] =[ literal[string] ]+[ literal[string] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[dim] )])
identifier[f] . identifier[close] () | def write_data(X, dir='sim/test', append=False, header='', varNames={}, Adj=np.array([]), Coupl=np.array([]), boolRules={}, model='', modelType='', invTimeStep=1):
""" Write simulated data.
Accounts for saving at the same time an ID
and a model file.
"""
# check if output directory exists
if not os.path.exists(dir):
os.makedirs(dir) # depends on [control=['if'], data=[]]
# update file with sample ids
filename = dir + '/id.txt'
if os.path.exists(filename):
f = open(filename, 'r')
id = int(f.read()) + (0 if append else 1)
f.close() # depends on [control=['if'], data=[]]
else:
id = 0
f = open(filename, 'w')
id = '{:0>6}'.format(id)
f.write(str(id))
f.close()
# dimension
dim = X.shape[1]
# write files with adjacancy and coupling matrices
if not append:
if False:
if Adj.size > 0:
# due to 'update formulation' of model, there
# is always a diagonal dependence
Adj = np.copy(Adj)
if 'hill' in model:
for i in range(Adj.shape[0]):
Adj[i, i] = 1 # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
np.savetxt(dir + '/adj_' + id + '.txt', Adj, header=header, fmt='%d') # depends on [control=['if'], data=[]]
if Coupl.size > 0:
np.savetxt(dir + '/coupl_' + id + '.txt', Coupl, header=header, fmt='%10.6f') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# write model file
if varNames and Coupl.size > 0:
f = open(dir + '/model_' + id + '.txt', 'w')
f.write('# For each "variable = ", there must be a right hand side: \n')
f.write('# either an empty string or a python-style logical expression \n')
f.write('# involving variable names, "or", "and", "(", ")". \n')
f.write('# The order of equations matters! \n')
f.write('# \n')
f.write('# modelType = ' + modelType + '\n')
f.write('# invTimeStep = ' + str(invTimeStep) + '\n')
f.write('# \n')
f.write('# boolean update rules: \n')
for rule in boolRules.items():
f.write(rule[0] + ' = ' + rule[1] + '\n') # depends on [control=['for'], data=['rule']]
# write coupling via names
f.write('# coupling list: \n')
names = list(varNames.keys())
for gp in range(dim):
for g in range(dim):
if np.abs(Coupl[gp, g]) > 1e-10:
f.write('{:10} '.format(names[gp]) + '{:10} '.format(names[g]) + '{:10.3} '.format(Coupl[gp, g]) + '\n') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['g']] # depends on [control=['for'], data=['gp']]
f.close() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# write simulated data
# the binary mode option in the following line is a fix for python 3
# variable names
if varNames:
header += '{:>2} '.format('it')
for v in varNames.keys():
header += '{:>7} '.format(v) # depends on [control=['for'], data=['v']] # depends on [control=['if'], data=[]]
f = open(dir + '/sim_' + id + '.txt', 'ab' if append else 'wb')
np.savetxt(f, np.c_[np.arange(0, X.shape[0]), X], header='' if append else header, fmt=['%4.f'] + ['%7.4f' for i in range(dim)])
f.close() |
def _Close(self):
"""Closes the file-like object."""
self._cpio_archive_file_entry = None
self._cpio_archive_file = None
self._file_system.Close()
self._file_system = None | def function[_Close, parameter[self]]:
constant[Closes the file-like object.]
name[self]._cpio_archive_file_entry assign[=] constant[None]
name[self]._cpio_archive_file assign[=] constant[None]
call[name[self]._file_system.Close, parameter[]]
name[self]._file_system assign[=] constant[None] | keyword[def] identifier[_Close] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_cpio_archive_file_entry] = keyword[None]
identifier[self] . identifier[_cpio_archive_file] = keyword[None]
identifier[self] . identifier[_file_system] . identifier[Close] ()
identifier[self] . identifier[_file_system] = keyword[None] | def _Close(self):
"""Closes the file-like object."""
self._cpio_archive_file_entry = None
self._cpio_archive_file = None
self._file_system.Close()
self._file_system = None |
def downstream(self, f, n=1):
"""find n downstream features where downstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
"""
if f.strand == -1:
return self.left(f, n)
return self.right(f, n) | def function[downstream, parameter[self, f, n]]:
constant[find n downstream features where downstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
]
if compare[name[f].strand equal[==] <ast.UnaryOp object at 0x7da18dc9acb0>] begin[:]
return[call[name[self].left, parameter[name[f], name[n]]]]
return[call[name[self].right, parameter[name[f], name[n]]]] | keyword[def] identifier[downstream] ( identifier[self] , identifier[f] , identifier[n] = literal[int] ):
literal[string]
keyword[if] identifier[f] . identifier[strand] ==- literal[int] :
keyword[return] identifier[self] . identifier[left] ( identifier[f] , identifier[n] )
keyword[return] identifier[self] . identifier[right] ( identifier[f] , identifier[n] ) | def downstream(self, f, n=1):
"""find n downstream features where downstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
"""
if f.strand == -1:
return self.left(f, n) # depends on [control=['if'], data=[]]
return self.right(f, n) |
def read_igor_VJ_palindrome_parameters(self, params_file_name):
"""Read V and J palindrome parameters from file.
Sets the attributes max_delV_palindrome and max_delJ_palindrome.
Parameters
----------
params_file_name : str
File name for an IGoR parameter file of a VJ generative model.
"""
params_file = open(params_file_name, 'r')
in_delV = False
in_delJ = False
for line in params_file:
if line.startswith('#Deletion;V_gene;'):
in_delV = True
in_delJ = False
elif line.startswith('#Deletion;J_gene;'):
in_delV = False
in_delJ = True
elif any([in_delV, in_delJ]) and line.startswith('%'):
if int(line.split(';')[-1]) == 0:
if in_delV:
self.max_delV_palindrome = np.abs(int(line.lstrip('%').split(';')[0]))
elif in_delJ:
self.max_delJ_palindrome = np.abs(int(line.lstrip('%').split(';')[0]))
else:
in_delV = False
in_delJ = False | def function[read_igor_VJ_palindrome_parameters, parameter[self, params_file_name]]:
constant[Read V and J palindrome parameters from file.
Sets the attributes max_delV_palindrome and max_delJ_palindrome.
Parameters
----------
params_file_name : str
File name for an IGoR parameter file of a VJ generative model.
]
variable[params_file] assign[=] call[name[open], parameter[name[params_file_name], constant[r]]]
variable[in_delV] assign[=] constant[False]
variable[in_delJ] assign[=] constant[False]
for taget[name[line]] in starred[name[params_file]] begin[:]
if call[name[line].startswith, parameter[constant[#Deletion;V_gene;]]] begin[:]
variable[in_delV] assign[=] constant[True]
variable[in_delJ] assign[=] constant[False] | keyword[def] identifier[read_igor_VJ_palindrome_parameters] ( identifier[self] , identifier[params_file_name] ):
literal[string]
identifier[params_file] = identifier[open] ( identifier[params_file_name] , literal[string] )
identifier[in_delV] = keyword[False]
identifier[in_delJ] = keyword[False]
keyword[for] identifier[line] keyword[in] identifier[params_file] :
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[in_delV] = keyword[True]
identifier[in_delJ] = keyword[False]
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[in_delV] = keyword[False]
identifier[in_delJ] = keyword[True]
keyword[elif] identifier[any] ([ identifier[in_delV] , identifier[in_delJ] ]) keyword[and] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[int] ( identifier[line] . identifier[split] ( literal[string] )[- literal[int] ])== literal[int] :
keyword[if] identifier[in_delV] :
identifier[self] . identifier[max_delV_palindrome] = identifier[np] . identifier[abs] ( identifier[int] ( identifier[line] . identifier[lstrip] ( literal[string] ). identifier[split] ( literal[string] )[ literal[int] ]))
keyword[elif] identifier[in_delJ] :
identifier[self] . identifier[max_delJ_palindrome] = identifier[np] . identifier[abs] ( identifier[int] ( identifier[line] . identifier[lstrip] ( literal[string] ). identifier[split] ( literal[string] )[ literal[int] ]))
keyword[else] :
identifier[in_delV] = keyword[False]
identifier[in_delJ] = keyword[False] | def read_igor_VJ_palindrome_parameters(self, params_file_name):
"""Read V and J palindrome parameters from file.
Sets the attributes max_delV_palindrome and max_delJ_palindrome.
Parameters
----------
params_file_name : str
File name for an IGoR parameter file of a VJ generative model.
"""
params_file = open(params_file_name, 'r')
in_delV = False
in_delJ = False
for line in params_file:
if line.startswith('#Deletion;V_gene;'):
in_delV = True
in_delJ = False # depends on [control=['if'], data=[]]
elif line.startswith('#Deletion;J_gene;'):
in_delV = False
in_delJ = True # depends on [control=['if'], data=[]]
elif any([in_delV, in_delJ]) and line.startswith('%'):
if int(line.split(';')[-1]) == 0:
if in_delV:
self.max_delV_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) # depends on [control=['if'], data=[]]
elif in_delJ:
self.max_delJ_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
in_delV = False
in_delJ = False # depends on [control=['for'], data=['line']] |
def add_missing_particles(st, rad='calc', tries=50, **kwargs):
"""
Attempts to add missing particles to the state.
Operates by:
(1) featuring the difference image using feature_guess,
(2) attempting to add the featured positions using check_add_particles.
Parameters
----------
st : :class:`peri.states.State`
The state to check adding particles to.
rad : Float or 'calc', optional
The radius of the newly-added particles and of the feature size for
featuring. Default is 'calc', which uses the median of the state's
current radii.
tries : Int, optional
How many particles to attempt to add. Only tries to add the first
``tries`` particles, in order of mass. Default is 50.
Other Parameters
----------------
invert : Bool, optional
Whether to invert the image. Default is ``True``, i.e. dark particles
minmass : Float or None, optionals
The minimum mass/masscut of a particle. Default is ``None``=calcualted
by ``feature_guess``.
use_tp : Bool, optional
Whether to use trackpy in feature_guess. Default is False, since
trackpy cuts out particles at the edge.
do_opt : Bool, optional
Whether to optimize the particle position before checking if it
should be kept. Default is True (optimizes position).
im_change_frac : Float, optional
How good the change in error needs to be relative to the change
in the difference image. Default is 0.2; i.e. if the error does
not decrease by 20% of the change in the difference image, do
not add the particle.
min_derr : Float or '3sig', optional
The minimal improvement in error to add a particle. Default
is ``'3sig' = 3*st.sigma``.
Returns
-------
accepts : Int
The number of added particles
new_poses : [N,3] list
List of the positions of the added particles. If ``do_opt==True``,
then these positions will differ from the input 'guess'.
"""
if rad == 'calc':
rad = guess_add_radii(st)
guess, npart = feature_guess(st, rad, **kwargs)
tries = np.min([tries, npart])
accepts, new_poses = check_add_particles(
st, guess[:tries], rad=rad, **kwargs)
return accepts, new_poses | def function[add_missing_particles, parameter[st, rad, tries]]:
constant[
Attempts to add missing particles to the state.
Operates by:
(1) featuring the difference image using feature_guess,
(2) attempting to add the featured positions using check_add_particles.
Parameters
----------
st : :class:`peri.states.State`
The state to check adding particles to.
rad : Float or 'calc', optional
The radius of the newly-added particles and of the feature size for
featuring. Default is 'calc', which uses the median of the state's
current radii.
tries : Int, optional
How many particles to attempt to add. Only tries to add the first
``tries`` particles, in order of mass. Default is 50.
Other Parameters
----------------
invert : Bool, optional
Whether to invert the image. Default is ``True``, i.e. dark particles
minmass : Float or None, optionals
The minimum mass/masscut of a particle. Default is ``None``=calcualted
by ``feature_guess``.
use_tp : Bool, optional
Whether to use trackpy in feature_guess. Default is False, since
trackpy cuts out particles at the edge.
do_opt : Bool, optional
Whether to optimize the particle position before checking if it
should be kept. Default is True (optimizes position).
im_change_frac : Float, optional
How good the change in error needs to be relative to the change
in the difference image. Default is 0.2; i.e. if the error does
not decrease by 20% of the change in the difference image, do
not add the particle.
min_derr : Float or '3sig', optional
The minimal improvement in error to add a particle. Default
is ``'3sig' = 3*st.sigma``.
Returns
-------
accepts : Int
The number of added particles
new_poses : [N,3] list
List of the positions of the added particles. If ``do_opt==True``,
then these positions will differ from the input 'guess'.
]
if compare[name[rad] equal[==] constant[calc]] begin[:]
variable[rad] assign[=] call[name[guess_add_radii], parameter[name[st]]]
<ast.Tuple object at 0x7da1b00f6830> assign[=] call[name[feature_guess], parameter[name[st], name[rad]]]
variable[tries] assign[=] call[name[np].min, parameter[list[[<ast.Name object at 0x7da1b00f5360>, <ast.Name object at 0x7da1b00f5ae0>]]]]
<ast.Tuple object at 0x7da1b00f5990> assign[=] call[name[check_add_particles], parameter[name[st], call[name[guess]][<ast.Slice object at 0x7da1b00f54b0>]]]
return[tuple[[<ast.Name object at 0x7da1b00f4f40>, <ast.Name object at 0x7da1b00f4100>]]] | keyword[def] identifier[add_missing_particles] ( identifier[st] , identifier[rad] = literal[string] , identifier[tries] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[rad] == literal[string] :
identifier[rad] = identifier[guess_add_radii] ( identifier[st] )
identifier[guess] , identifier[npart] = identifier[feature_guess] ( identifier[st] , identifier[rad] ,** identifier[kwargs] )
identifier[tries] = identifier[np] . identifier[min] ([ identifier[tries] , identifier[npart] ])
identifier[accepts] , identifier[new_poses] = identifier[check_add_particles] (
identifier[st] , identifier[guess] [: identifier[tries] ], identifier[rad] = identifier[rad] ,** identifier[kwargs] )
keyword[return] identifier[accepts] , identifier[new_poses] | def add_missing_particles(st, rad='calc', tries=50, **kwargs):
"""
Attempts to add missing particles to the state.
Operates by:
(1) featuring the difference image using feature_guess,
(2) attempting to add the featured positions using check_add_particles.
Parameters
----------
st : :class:`peri.states.State`
The state to check adding particles to.
rad : Float or 'calc', optional
The radius of the newly-added particles and of the feature size for
featuring. Default is 'calc', which uses the median of the state's
current radii.
tries : Int, optional
How many particles to attempt to add. Only tries to add the first
``tries`` particles, in order of mass. Default is 50.
Other Parameters
----------------
invert : Bool, optional
Whether to invert the image. Default is ``True``, i.e. dark particles
minmass : Float or None, optionals
The minimum mass/masscut of a particle. Default is ``None``=calcualted
by ``feature_guess``.
use_tp : Bool, optional
Whether to use trackpy in feature_guess. Default is False, since
trackpy cuts out particles at the edge.
do_opt : Bool, optional
Whether to optimize the particle position before checking if it
should be kept. Default is True (optimizes position).
im_change_frac : Float, optional
How good the change in error needs to be relative to the change
in the difference image. Default is 0.2; i.e. if the error does
not decrease by 20% of the change in the difference image, do
not add the particle.
min_derr : Float or '3sig', optional
The minimal improvement in error to add a particle. Default
is ``'3sig' = 3*st.sigma``.
Returns
-------
accepts : Int
The number of added particles
new_poses : [N,3] list
List of the positions of the added particles. If ``do_opt==True``,
then these positions will differ from the input 'guess'.
"""
if rad == 'calc':
rad = guess_add_radii(st) # depends on [control=['if'], data=['rad']]
(guess, npart) = feature_guess(st, rad, **kwargs)
tries = np.min([tries, npart])
(accepts, new_poses) = check_add_particles(st, guess[:tries], rad=rad, **kwargs)
return (accepts, new_poses) |
def unbind(meta, name=None, dynamo_name=None) -> None:
"""Unconditionally remove any columns or indexes bound to the given name or dynamo_name.
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
by_email = GlobalSecondaryIndex(projection="keys", hash_key=email)
for dynamo_name in ("id", "e", "by_email"):
bloop.models.unbind(User.Meta, dynamo_name=dynamo_name)
assert not User.Meta.columns
assert not User.Meta.indexes
assert not User.Meta.keys
.. warning::
This method does not pre- or post- validate the model with the requested changes. You are responsible
for ensuring the model still has a hash key, that required columns exist for each index, etc.
:param meta: model.Meta to remove the columns or indexes from
:param name: column or index name to unbind by. Default is None.
:param dynamo_name: column or index name to unbind by. Default is None.
"""
if name is not None:
columns = {x for x in meta.columns if x.name == name}
indexes = {x for x in meta.indexes if x.name == name}
elif dynamo_name is not None:
columns = {x for x in meta.columns if x.dynamo_name == dynamo_name}
indexes = {x for x in meta.indexes if x.dynamo_name == dynamo_name}
else:
raise RuntimeError("Must provide name= or dynamo_name= to unbind from meta")
# Nothing in bloop should allow name or dynamo_name
# collisions to exist, so this is either a bug or
# the user manually hacked up meta.
assert len(columns) <= 1
assert len(indexes) <= 1
assert not (columns and indexes)
if columns:
[column] = columns
meta.columns.remove(column)
# If these don't line up, there's likely a bug in bloop
# or the user manually hacked up columns_by_name
expect_same = meta.columns_by_name[column.name]
assert expect_same is column
meta.columns_by_name.pop(column.name)
if column in meta.keys:
meta.keys.remove(column)
if meta.hash_key is column:
meta.hash_key = None
if meta.range_key is column:
meta.range_key = None
delattr(meta.model, column.name)
if indexes:
[index] = indexes
meta.indexes.remove(index)
if index in meta.gsis:
meta.gsis.remove(index)
if index in meta.lsis:
meta.lsis.remove(index)
delattr(meta.model, index.name) | def function[unbind, parameter[meta, name, dynamo_name]]:
constant[Unconditionally remove any columns or indexes bound to the given name or dynamo_name.
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
by_email = GlobalSecondaryIndex(projection="keys", hash_key=email)
for dynamo_name in ("id", "e", "by_email"):
bloop.models.unbind(User.Meta, dynamo_name=dynamo_name)
assert not User.Meta.columns
assert not User.Meta.indexes
assert not User.Meta.keys
.. warning::
This method does not pre- or post- validate the model with the requested changes. You are responsible
for ensuring the model still has a hash key, that required columns exist for each index, etc.
:param meta: model.Meta to remove the columns or indexes from
:param name: column or index name to unbind by. Default is None.
:param dynamo_name: column or index name to unbind by. Default is None.
]
if compare[name[name] is_not constant[None]] begin[:]
variable[columns] assign[=] <ast.SetComp object at 0x7da1b0f28550>
variable[indexes] assign[=] <ast.SetComp object at 0x7da1b0f29840>
assert[compare[call[name[len], parameter[name[columns]]] less_or_equal[<=] constant[1]]]
assert[compare[call[name[len], parameter[name[indexes]]] less_or_equal[<=] constant[1]]]
assert[<ast.UnaryOp object at 0x7da1b0fade10>]
if name[columns] begin[:]
<ast.List object at 0x7da1b0fae620> assign[=] name[columns]
call[name[meta].columns.remove, parameter[name[column]]]
variable[expect_same] assign[=] call[name[meta].columns_by_name][name[column].name]
assert[compare[name[expect_same] is name[column]]]
call[name[meta].columns_by_name.pop, parameter[name[column].name]]
if compare[name[column] in name[meta].keys] begin[:]
call[name[meta].keys.remove, parameter[name[column]]]
if compare[name[meta].hash_key is name[column]] begin[:]
name[meta].hash_key assign[=] constant[None]
if compare[name[meta].range_key is name[column]] begin[:]
name[meta].range_key assign[=] constant[None]
call[name[delattr], parameter[name[meta].model, name[column].name]]
if name[indexes] begin[:]
<ast.List object at 0x7da1b0fac340> assign[=] name[indexes]
call[name[meta].indexes.remove, parameter[name[index]]]
if compare[name[index] in name[meta].gsis] begin[:]
call[name[meta].gsis.remove, parameter[name[index]]]
if compare[name[index] in name[meta].lsis] begin[:]
call[name[meta].lsis.remove, parameter[name[index]]]
call[name[delattr], parameter[name[meta].model, name[index].name]] | keyword[def] identifier[unbind] ( identifier[meta] , identifier[name] = keyword[None] , identifier[dynamo_name] = keyword[None] )-> keyword[None] :
literal[string]
keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[columns] ={ identifier[x] keyword[for] identifier[x] keyword[in] identifier[meta] . identifier[columns] keyword[if] identifier[x] . identifier[name] == identifier[name] }
identifier[indexes] ={ identifier[x] keyword[for] identifier[x] keyword[in] identifier[meta] . identifier[indexes] keyword[if] identifier[x] . identifier[name] == identifier[name] }
keyword[elif] identifier[dynamo_name] keyword[is] keyword[not] keyword[None] :
identifier[columns] ={ identifier[x] keyword[for] identifier[x] keyword[in] identifier[meta] . identifier[columns] keyword[if] identifier[x] . identifier[dynamo_name] == identifier[dynamo_name] }
identifier[indexes] ={ identifier[x] keyword[for] identifier[x] keyword[in] identifier[meta] . identifier[indexes] keyword[if] identifier[x] . identifier[dynamo_name] == identifier[dynamo_name] }
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[assert] identifier[len] ( identifier[columns] )<= literal[int]
keyword[assert] identifier[len] ( identifier[indexes] )<= literal[int]
keyword[assert] keyword[not] ( identifier[columns] keyword[and] identifier[indexes] )
keyword[if] identifier[columns] :
[ identifier[column] ]= identifier[columns]
identifier[meta] . identifier[columns] . identifier[remove] ( identifier[column] )
identifier[expect_same] = identifier[meta] . identifier[columns_by_name] [ identifier[column] . identifier[name] ]
keyword[assert] identifier[expect_same] keyword[is] identifier[column]
identifier[meta] . identifier[columns_by_name] . identifier[pop] ( identifier[column] . identifier[name] )
keyword[if] identifier[column] keyword[in] identifier[meta] . identifier[keys] :
identifier[meta] . identifier[keys] . identifier[remove] ( identifier[column] )
keyword[if] identifier[meta] . identifier[hash_key] keyword[is] identifier[column] :
identifier[meta] . identifier[hash_key] = keyword[None]
keyword[if] identifier[meta] . identifier[range_key] keyword[is] identifier[column] :
identifier[meta] . identifier[range_key] = keyword[None]
identifier[delattr] ( identifier[meta] . identifier[model] , identifier[column] . identifier[name] )
keyword[if] identifier[indexes] :
[ identifier[index] ]= identifier[indexes]
identifier[meta] . identifier[indexes] . identifier[remove] ( identifier[index] )
keyword[if] identifier[index] keyword[in] identifier[meta] . identifier[gsis] :
identifier[meta] . identifier[gsis] . identifier[remove] ( identifier[index] )
keyword[if] identifier[index] keyword[in] identifier[meta] . identifier[lsis] :
identifier[meta] . identifier[lsis] . identifier[remove] ( identifier[index] )
identifier[delattr] ( identifier[meta] . identifier[model] , identifier[index] . identifier[name] ) | def unbind(meta, name=None, dynamo_name=None) -> None:
"""Unconditionally remove any columns or indexes bound to the given name or dynamo_name.
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
by_email = GlobalSecondaryIndex(projection="keys", hash_key=email)
for dynamo_name in ("id", "e", "by_email"):
bloop.models.unbind(User.Meta, dynamo_name=dynamo_name)
assert not User.Meta.columns
assert not User.Meta.indexes
assert not User.Meta.keys
.. warning::
This method does not pre- or post- validate the model with the requested changes. You are responsible
for ensuring the model still has a hash key, that required columns exist for each index, etc.
:param meta: model.Meta to remove the columns or indexes from
:param name: column or index name to unbind by. Default is None.
:param dynamo_name: column or index name to unbind by. Default is None.
"""
if name is not None:
columns = {x for x in meta.columns if x.name == name}
indexes = {x for x in meta.indexes if x.name == name} # depends on [control=['if'], data=['name']]
elif dynamo_name is not None:
columns = {x for x in meta.columns if x.dynamo_name == dynamo_name}
indexes = {x for x in meta.indexes if x.dynamo_name == dynamo_name} # depends on [control=['if'], data=['dynamo_name']]
else:
raise RuntimeError('Must provide name= or dynamo_name= to unbind from meta')
# Nothing in bloop should allow name or dynamo_name
# collisions to exist, so this is either a bug or
# the user manually hacked up meta.
assert len(columns) <= 1
assert len(indexes) <= 1
assert not (columns and indexes)
if columns:
[column] = columns
meta.columns.remove(column)
# If these don't line up, there's likely a bug in bloop
# or the user manually hacked up columns_by_name
expect_same = meta.columns_by_name[column.name]
assert expect_same is column
meta.columns_by_name.pop(column.name)
if column in meta.keys:
meta.keys.remove(column) # depends on [control=['if'], data=['column']]
if meta.hash_key is column:
meta.hash_key = None # depends on [control=['if'], data=[]]
if meta.range_key is column:
meta.range_key = None # depends on [control=['if'], data=[]]
delattr(meta.model, column.name) # depends on [control=['if'], data=[]]
if indexes:
[index] = indexes
meta.indexes.remove(index)
if index in meta.gsis:
meta.gsis.remove(index) # depends on [control=['if'], data=['index']]
if index in meta.lsis:
meta.lsis.remove(index) # depends on [control=['if'], data=['index']]
delattr(meta.model, index.name) # depends on [control=['if'], data=[]] |
def asDictionary(self):
""" returns object as dictionary """
template = {
"type" : "uniqueValue",
"field1" : self._field1,
"field2" : self._field2,
"field3" : self._field3,
"fieldDelimiter" : self._fieldDelimiter,
"defaultSymbol" : self._defaultSymbol.asDictionary,
"defaultLabel" : self._defaultLabel,
"uniqueValueInfos" : self._uniqueValueInfos,
"rotationType": self._rotationType,
"rotationExpression": self._rotationExpression
}
return template | def function[asDictionary, parameter[self]]:
constant[ returns object as dictionary ]
variable[template] assign[=] dictionary[[<ast.Constant object at 0x7da1b12286a0>, <ast.Constant object at 0x7da1b1228970>, <ast.Constant object at 0x7da1b1228160>, <ast.Constant object at 0x7da1b12282b0>, <ast.Constant object at 0x7da1b1228520>, <ast.Constant object at 0x7da1b1228910>, <ast.Constant object at 0x7da1b1228400>, <ast.Constant object at 0x7da1b12b9de0>, <ast.Constant object at 0x7da1b12bb3d0>, <ast.Constant object at 0x7da1b12b9bd0>], [<ast.Constant object at 0x7da1b12bb610>, <ast.Attribute object at 0x7da1b12b98a0>, <ast.Attribute object at 0x7da1b12b9840>, <ast.Attribute object at 0x7da1b12b8d30>, <ast.Attribute object at 0x7da1b12b92a0>, <ast.Attribute object at 0x7da1b12ba4a0>, <ast.Attribute object at 0x7da1b12b8f70>, <ast.Attribute object at 0x7da1b12ba950>, <ast.Attribute object at 0x7da1b12b95a0>, <ast.Attribute object at 0x7da1b12ba5c0>]]
return[name[template]] | keyword[def] identifier[asDictionary] ( identifier[self] ):
literal[string]
identifier[template] ={
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[_field1] ,
literal[string] : identifier[self] . identifier[_field2] ,
literal[string] : identifier[self] . identifier[_field3] ,
literal[string] : identifier[self] . identifier[_fieldDelimiter] ,
literal[string] : identifier[self] . identifier[_defaultSymbol] . identifier[asDictionary] ,
literal[string] : identifier[self] . identifier[_defaultLabel] ,
literal[string] : identifier[self] . identifier[_uniqueValueInfos] ,
literal[string] : identifier[self] . identifier[_rotationType] ,
literal[string] : identifier[self] . identifier[_rotationExpression]
}
keyword[return] identifier[template] | def asDictionary(self):
""" returns object as dictionary """
template = {'type': 'uniqueValue', 'field1': self._field1, 'field2': self._field2, 'field3': self._field3, 'fieldDelimiter': self._fieldDelimiter, 'defaultSymbol': self._defaultSymbol.asDictionary, 'defaultLabel': self._defaultLabel, 'uniqueValueInfos': self._uniqueValueInfos, 'rotationType': self._rotationType, 'rotationExpression': self._rotationExpression}
return template |
def load_remote_system(url, format=None):
'''Load a system from the remote location specified by *url*.
**Example**
::
load_remote_system('https://raw.github.com/chemlab/chemlab-testdata/master/naclwater.gro')
'''
filename, headers = urlretrieve(url)
return load_system(filename, format=format) | def function[load_remote_system, parameter[url, format]]:
constant[Load a system from the remote location specified by *url*.
**Example**
::
load_remote_system('https://raw.github.com/chemlab/chemlab-testdata/master/naclwater.gro')
]
<ast.Tuple object at 0x7da2054a7be0> assign[=] call[name[urlretrieve], parameter[name[url]]]
return[call[name[load_system], parameter[name[filename]]]] | keyword[def] identifier[load_remote_system] ( identifier[url] , identifier[format] = keyword[None] ):
literal[string]
identifier[filename] , identifier[headers] = identifier[urlretrieve] ( identifier[url] )
keyword[return] identifier[load_system] ( identifier[filename] , identifier[format] = identifier[format] ) | def load_remote_system(url, format=None):
"""Load a system from the remote location specified by *url*.
**Example**
::
load_remote_system('https://raw.github.com/chemlab/chemlab-testdata/master/naclwater.gro')
"""
(filename, headers) = urlretrieve(url)
return load_system(filename, format=format) |
def plot(self,*args,**kwargs):
"""
NAME:
plot
PURPOSE:
plot the angles vs. each other, to check whether the isochrone
approximation is good
INPUT:
Either:
a) R,vR,vT,z,vz:
floats: phase-space value for single object
b) Orbit instance
type= ('araz') type of plot to make
a) 'araz': az vs. ar, with color-coded aphi
b) 'araphi': aphi vs. ar, with color-coded az
c) 'azaphi': aphi vs. az, with color-coded ar
d) 'jr': cumulative average of jr with time, to assess convergence
e) 'lz': same as 'jr' but for lz
f) 'jz': same as 'jr' but for jz
deperiod= (False), if True, de-period the angles
downsample= (False) if True, downsample what's plotted to 400 points
+plot kwargs
OUTPUT:
plot to output
HISTORY:
2013-09-10 - Written - Bovy (IAS)
"""
#Kwargs
type= kwargs.pop('type','araz')
deperiod= kwargs.pop('deperiod',False)
downsample= kwargs.pop('downsample',False)
#Parse input
R,vR,vT,z,vz,phi= self._parse_args('a' in type,False,*args)
#Use self._aAI to calculate the actions and angles in the isochrone potential
acfs= self._aAI._actionsFreqsAngles(R.flatten(),
vR.flatten(),
vT.flatten(),
z.flatten(),
vz.flatten(),
phi.flatten())
if type == 'jr' or type == 'lz' or type == 'jz':
jrI= nu.reshape(acfs[0],R.shape)[:,:-1]
jzI= nu.reshape(acfs[2],R.shape)[:,:-1]
anglerI= nu.reshape(acfs[6],R.shape)
anglezI= nu.reshape(acfs[8],R.shape)
danglerI= ((nu.roll(anglerI,-1,axis=1)-anglerI) % _TWOPI)[:,:-1]
danglezI= ((nu.roll(anglezI,-1,axis=1)-anglezI) % _TWOPI)[:,:-1]
if True:
sumFunc= nu.cumsum
jr= sumFunc(jrI*danglerI,axis=1)/sumFunc(danglerI,axis=1)
jz= sumFunc(jzI*danglezI,axis=1)/sumFunc(danglezI,axis=1)
lzI= nu.reshape(acfs[1],R.shape)[:,:-1]
anglephiI= nu.reshape(acfs[7],R.shape)
danglephiI= ((nu.roll(anglephiI,-1,axis=1)-anglephiI) % _TWOPI)[:,:-1]
lz= sumFunc(lzI*danglephiI,axis=1)/sumFunc(danglephiI,axis=1)
from galpy.orbit import Orbit
if isinstance(args[0],Orbit) and hasattr(args[0]._orb,'t'):
ts= args[0]._orb.t[:-1]
else:
ts= self._tsJ[:-1]
if type == 'jr':
if downsample:
plotx= ts[::int(round(self._ntintJ//400))]
ploty= jr[0,::int(round(self._ntintJ//400))]/jr[0,-1]
plotz= anglerI[0,:-1:int(round(self._ntintJ//400))]
else:
plotx= ts
ploty= jr[0,:]/jr[0,-1]
plotz= anglerI[0,:-1]
bovy_plot.bovy_plot(plotx,ploty,
c=plotz,
s=20.,
scatter=True,
edgecolor='none',
xlabel=r'$t$',
ylabel=r'$J^A_R / \langle J^A_R \rangle$',
clabel=r'$\theta^A_R$',
vmin=0.,vmax=2.*nu.pi,
crange=[0.,2.*nu.pi],
colorbar=True,
**kwargs)
elif type == 'lz':
if downsample:
plotx= ts[::int(round(self._ntintJ//400))]
ploty= lz[0,::int(round(self._ntintJ//400))]/lz[0,-1]
plotz= anglephiI[0,:-1:int(round(self._ntintJ//400))]
else:
plotx= ts
ploty= lz[0,:]/lz[0,-1]
plotz= anglephiI[0,:-1]
bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,
scatter=True,
edgecolor='none',
xlabel=r'$t$',
ylabel=r'$L^A_Z / \langle L^A_Z \rangle$',
clabel=r'$\theta^A_\phi$',
vmin=0.,vmax=2.*nu.pi,
crange=[0.,2.*nu.pi],
colorbar=True,
**kwargs)
elif type == 'jz':
if downsample:
plotx= ts[::int(round(self._ntintJ//400))]
ploty= jz[0,::int(round(self._ntintJ//400))]/jz[0,-1]
plotz= anglezI[0,:-1:int(round(self._ntintJ//400))]
else:
plotx= ts
ploty= jz[0,:]/jz[0,-1]
plotz= anglezI[0,:-1]
bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,
scatter=True,
edgecolor='none',
xlabel=r'$t$',
ylabel=r'$J^A_Z / \langle J^A_Z \rangle$',
clabel=r'$\theta^A_Z$',
vmin=0.,vmax=2.*nu.pi,
crange=[0.,2.*nu.pi],
colorbar=True,
**kwargs)
else:
if deperiod:
if 'ar' in type:
angleRT= dePeriod(nu.reshape(acfs[6],R.shape))
else:
angleRT= nu.reshape(acfs[6],R.shape)
if 'aphi' in type:
acfs7= nu.reshape(acfs[7],R.shape)
negFreqIndx= nu.median(acfs7-nu.roll(acfs7,1,axis=1),axis=1) < 0. #anglephi is decreasing
anglephiT= nu.empty(acfs7.shape)
anglephiT[negFreqIndx,:]= dePeriod(_TWOPI-acfs7[negFreqIndx,:])
negFreqPhi= nu.zeros(R.shape[0],dtype='bool')
negFreqPhi[negFreqIndx]= True
anglephiT[True^negFreqIndx,:]= dePeriod(acfs7[True^negFreqIndx,:])
else:
anglephiT= nu.reshape(acfs[7],R.shape)
if 'az' in type:
angleZT= dePeriod(nu.reshape(acfs[8],R.shape))
else:
angleZT= nu.reshape(acfs[8],R.shape)
xrange= None
yrange= None
else:
angleRT= nu.reshape(acfs[6],R.shape)
anglephiT= nu.reshape(acfs[7],R.shape)
angleZT= nu.reshape(acfs[8],R.shape)
xrange= [-0.5,2.*nu.pi+0.5]
yrange= [-0.5,2.*nu.pi+0.5]
vmin, vmax= 0.,2.*nu.pi
crange= [vmin,vmax]
if type == 'araz':
if downsample:
plotx= angleRT[0,::int(round(self._ntintJ//400))]
ploty= angleZT[0,::int(round(self._ntintJ//400))]
plotz= anglephiT[0,::int(round(self._ntintJ//400))]
else:
plotx= angleRT[0,:]
ploty= angleZT[0,:]
plotz= anglephiT[0,:]
bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,
scatter=True,
edgecolor='none',
xlabel=r'$\theta^A_R$',
ylabel=r'$\theta^A_Z$',
clabel=r'$\theta^A_\phi$',
xrange=xrange,yrange=yrange,
vmin=vmin,vmax=vmax,
crange=crange,
colorbar=True,
**kwargs)
elif type == 'araphi':
if downsample:
plotx= angleRT[0,::int(round(self._ntintJ//400))]
ploty= anglephiT[0,::int(round(self._ntintJ//400))]
plotz= angleZT[0,::int(round(self._ntintJ//400))]
else:
plotx= angleRT[0,:]
ploty= anglephiT[0,:]
plotz= angleZT[0,:]
bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,
scatter=True,
edgecolor='none',
xlabel=r'$\theta^A_R$',
clabel=r'$\theta^A_Z$',
ylabel=r'$\theta^A_\phi$',
xrange=xrange,yrange=yrange,
vmin=vmin,vmax=vmax,
crange=crange,
colorbar=True,
**kwargs)
elif type == 'azaphi':
if downsample:
plotx= angleZT[0,::int(round(self._ntintJ//400))]
ploty= anglephiT[0,::int(round(self._ntintJ//400))]
plotz= angleRT[0,::int(round(self._ntintJ//400))]
else:
plotx= angleZT[0,:]
ploty= anglephiT[0,:]
plotz= angleRT[0,:]
bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,
scatter=True,
edgecolor='none',
clabel=r'$\theta^A_R$',
xlabel=r'$\theta^A_Z$',
ylabel=r'$\theta^A_\phi$',
xrange=xrange,yrange=yrange,
vmin=vmin,vmax=vmax,
crange=crange,
colorbar=True,
**kwargs)
return None | def function[plot, parameter[self]]:
constant[
NAME:
plot
PURPOSE:
plot the angles vs. each other, to check whether the isochrone
approximation is good
INPUT:
Either:
a) R,vR,vT,z,vz:
floats: phase-space value for single object
b) Orbit instance
type= ('araz') type of plot to make
a) 'araz': az vs. ar, with color-coded aphi
b) 'araphi': aphi vs. ar, with color-coded az
c) 'azaphi': aphi vs. az, with color-coded ar
d) 'jr': cumulative average of jr with time, to assess convergence
e) 'lz': same as 'jr' but for lz
f) 'jz': same as 'jr' but for jz
deperiod= (False), if True, de-period the angles
downsample= (False) if True, downsample what's plotted to 400 points
+plot kwargs
OUTPUT:
plot to output
HISTORY:
2013-09-10 - Written - Bovy (IAS)
]
variable[type] assign[=] call[name[kwargs].pop, parameter[constant[type], constant[araz]]]
variable[deperiod] assign[=] call[name[kwargs].pop, parameter[constant[deperiod], constant[False]]]
variable[downsample] assign[=] call[name[kwargs].pop, parameter[constant[downsample], constant[False]]]
<ast.Tuple object at 0x7da1b0dc7c40> assign[=] call[name[self]._parse_args, parameter[compare[constant[a] in name[type]], constant[False], <ast.Starred object at 0x7da1b0dc7910>]]
variable[acfs] assign[=] call[name[self]._aAI._actionsFreqsAngles, parameter[call[name[R].flatten, parameter[]], call[name[vR].flatten, parameter[]], call[name[vT].flatten, parameter[]], call[name[z].flatten, parameter[]], call[name[vz].flatten, parameter[]], call[name[phi].flatten, parameter[]]]]
if <ast.BoolOp object at 0x7da1b0dc74f0> begin[:]
variable[jrI] assign[=] call[call[name[nu].reshape, parameter[call[name[acfs]][constant[0]], name[R].shape]]][tuple[[<ast.Slice object at 0x7da1b0dc6b00>, <ast.Slice object at 0x7da1b0dc6aa0>]]]
variable[jzI] assign[=] call[call[name[nu].reshape, parameter[call[name[acfs]][constant[2]], name[R].shape]]][tuple[[<ast.Slice object at 0x7da1b0dc6770>, <ast.Slice object at 0x7da1b0dc6800>]]]
variable[anglerI] assign[=] call[name[nu].reshape, parameter[call[name[acfs]][constant[6]], name[R].shape]]
variable[anglezI] assign[=] call[name[nu].reshape, parameter[call[name[acfs]][constant[8]], name[R].shape]]
variable[danglerI] assign[=] call[binary_operation[binary_operation[call[name[nu].roll, parameter[name[anglerI], <ast.UnaryOp object at 0x7da1b0dc6170>]] - name[anglerI]] <ast.Mod object at 0x7da2590d6920> name[_TWOPI]]][tuple[[<ast.Slice object at 0x7da1b0dc5f60>, <ast.Slice object at 0x7da1b0dc5f90>]]]
variable[danglezI] assign[=] call[binary_operation[binary_operation[call[name[nu].roll, parameter[name[anglezI], <ast.UnaryOp object at 0x7da1b0dc5ea0>]] - name[anglezI]] <ast.Mod object at 0x7da2590d6920> name[_TWOPI]]][tuple[[<ast.Slice object at 0x7da1b0dc5a50>, <ast.Slice object at 0x7da1b0dc5c30>]]]
if constant[True] begin[:]
variable[sumFunc] assign[=] name[nu].cumsum
variable[jr] assign[=] binary_operation[call[name[sumFunc], parameter[binary_operation[name[jrI] * name[danglerI]]]] / call[name[sumFunc], parameter[name[danglerI]]]]
variable[jz] assign[=] binary_operation[call[name[sumFunc], parameter[binary_operation[name[jzI] * name[danglezI]]]] / call[name[sumFunc], parameter[name[danglezI]]]]
variable[lzI] assign[=] call[call[name[nu].reshape, parameter[call[name[acfs]][constant[1]], name[R].shape]]][tuple[[<ast.Slice object at 0x7da1b0dc7250>, <ast.Slice object at 0x7da1b0dc57b0>]]]
variable[anglephiI] assign[=] call[name[nu].reshape, parameter[call[name[acfs]][constant[7]], name[R].shape]]
variable[danglephiI] assign[=] call[binary_operation[binary_operation[call[name[nu].roll, parameter[name[anglephiI], <ast.UnaryOp object at 0x7da1b0dc5390>]] - name[anglephiI]] <ast.Mod object at 0x7da2590d6920> name[_TWOPI]]][tuple[[<ast.Slice object at 0x7da1b0dc5360>, <ast.Slice object at 0x7da1b0dc5210>]]]
variable[lz] assign[=] binary_operation[call[name[sumFunc], parameter[binary_operation[name[lzI] * name[danglephiI]]]] / call[name[sumFunc], parameter[name[danglephiI]]]]
from relative_module[galpy.orbit] import module[Orbit]
if <ast.BoolOp object at 0x7da1b0dc4e20> begin[:]
variable[ts] assign[=] call[call[name[args]][constant[0]]._orb.t][<ast.Slice object at 0x7da1b0dc4970>]
if compare[name[type] equal[==] constant[jr]] begin[:]
if name[downsample] begin[:]
variable[plotx] assign[=] call[name[ts]][<ast.Slice object at 0x7da1b0dc4700>]
variable[ploty] assign[=] binary_operation[call[name[jr]][tuple[[<ast.Constant object at 0x7da18fe932e0>, <ast.Slice object at 0x7da18fe91f00>]]] / call[name[jr]][tuple[[<ast.Constant object at 0x7da18fe92f50>, <ast.UnaryOp object at 0x7da18fe91930>]]]]
variable[plotz] assign[=] call[name[anglerI]][tuple[[<ast.Constant object at 0x7da18fe92b90>, <ast.Slice object at 0x7da18fe92ef0>]]]
call[name[bovy_plot].bovy_plot, parameter[name[plotx], name[ploty]]]
return[constant[None]] | keyword[def] identifier[plot] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[type] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[deperiod] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[downsample] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[R] , identifier[vR] , identifier[vT] , identifier[z] , identifier[vz] , identifier[phi] = identifier[self] . identifier[_parse_args] ( literal[string] keyword[in] identifier[type] , keyword[False] ,* identifier[args] )
identifier[acfs] = identifier[self] . identifier[_aAI] . identifier[_actionsFreqsAngles] ( identifier[R] . identifier[flatten] (),
identifier[vR] . identifier[flatten] (),
identifier[vT] . identifier[flatten] (),
identifier[z] . identifier[flatten] (),
identifier[vz] . identifier[flatten] (),
identifier[phi] . identifier[flatten] ())
keyword[if] identifier[type] == literal[string] keyword[or] identifier[type] == literal[string] keyword[or] identifier[type] == literal[string] :
identifier[jrI] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )[:,:- literal[int] ]
identifier[jzI] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )[:,:- literal[int] ]
identifier[anglerI] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )
identifier[anglezI] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )
identifier[danglerI] =(( identifier[nu] . identifier[roll] ( identifier[anglerI] ,- literal[int] , identifier[axis] = literal[int] )- identifier[anglerI] )% identifier[_TWOPI] )[:,:- literal[int] ]
identifier[danglezI] =(( identifier[nu] . identifier[roll] ( identifier[anglezI] ,- literal[int] , identifier[axis] = literal[int] )- identifier[anglezI] )% identifier[_TWOPI] )[:,:- literal[int] ]
keyword[if] keyword[True] :
identifier[sumFunc] = identifier[nu] . identifier[cumsum]
identifier[jr] = identifier[sumFunc] ( identifier[jrI] * identifier[danglerI] , identifier[axis] = literal[int] )/ identifier[sumFunc] ( identifier[danglerI] , identifier[axis] = literal[int] )
identifier[jz] = identifier[sumFunc] ( identifier[jzI] * identifier[danglezI] , identifier[axis] = literal[int] )/ identifier[sumFunc] ( identifier[danglezI] , identifier[axis] = literal[int] )
identifier[lzI] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )[:,:- literal[int] ]
identifier[anglephiI] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )
identifier[danglephiI] =(( identifier[nu] . identifier[roll] ( identifier[anglephiI] ,- literal[int] , identifier[axis] = literal[int] )- identifier[anglephiI] )% identifier[_TWOPI] )[:,:- literal[int] ]
identifier[lz] = identifier[sumFunc] ( identifier[lzI] * identifier[danglephiI] , identifier[axis] = literal[int] )/ identifier[sumFunc] ( identifier[danglephiI] , identifier[axis] = literal[int] )
keyword[from] identifier[galpy] . identifier[orbit] keyword[import] identifier[Orbit]
keyword[if] identifier[isinstance] ( identifier[args] [ literal[int] ], identifier[Orbit] ) keyword[and] identifier[hasattr] ( identifier[args] [ literal[int] ]. identifier[_orb] , literal[string] ):
identifier[ts] = identifier[args] [ literal[int] ]. identifier[_orb] . identifier[t] [:- literal[int] ]
keyword[else] :
identifier[ts] = identifier[self] . identifier[_tsJ] [:- literal[int] ]
keyword[if] identifier[type] == literal[string] :
keyword[if] identifier[downsample] :
identifier[plotx] = identifier[ts] [:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
identifier[ploty] = identifier[jr] [ literal[int] ,:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]/ identifier[jr] [ literal[int] ,- literal[int] ]
identifier[plotz] = identifier[anglerI] [ literal[int] ,:- literal[int] : identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
keyword[else] :
identifier[plotx] = identifier[ts]
identifier[ploty] = identifier[jr] [ literal[int] ,:]/ identifier[jr] [ literal[int] ,- literal[int] ]
identifier[plotz] = identifier[anglerI] [ literal[int] ,:- literal[int] ]
identifier[bovy_plot] . identifier[bovy_plot] ( identifier[plotx] , identifier[ploty] ,
identifier[c] = identifier[plotz] ,
identifier[s] = literal[int] ,
identifier[scatter] = keyword[True] ,
identifier[edgecolor] = literal[string] ,
identifier[xlabel] = literal[string] ,
identifier[ylabel] = literal[string] ,
identifier[clabel] = literal[string] ,
identifier[vmin] = literal[int] , identifier[vmax] = literal[int] * identifier[nu] . identifier[pi] ,
identifier[crange] =[ literal[int] , literal[int] * identifier[nu] . identifier[pi] ],
identifier[colorbar] = keyword[True] ,
** identifier[kwargs] )
keyword[elif] identifier[type] == literal[string] :
keyword[if] identifier[downsample] :
identifier[plotx] = identifier[ts] [:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
identifier[ploty] = identifier[lz] [ literal[int] ,:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]/ identifier[lz] [ literal[int] ,- literal[int] ]
identifier[plotz] = identifier[anglephiI] [ literal[int] ,:- literal[int] : identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
keyword[else] :
identifier[plotx] = identifier[ts]
identifier[ploty] = identifier[lz] [ literal[int] ,:]/ identifier[lz] [ literal[int] ,- literal[int] ]
identifier[plotz] = identifier[anglephiI] [ literal[int] ,:- literal[int] ]
identifier[bovy_plot] . identifier[bovy_plot] ( identifier[plotx] , identifier[ploty] , identifier[c] = identifier[plotz] , identifier[s] = literal[int] ,
identifier[scatter] = keyword[True] ,
identifier[edgecolor] = literal[string] ,
identifier[xlabel] = literal[string] ,
identifier[ylabel] = literal[string] ,
identifier[clabel] = literal[string] ,
identifier[vmin] = literal[int] , identifier[vmax] = literal[int] * identifier[nu] . identifier[pi] ,
identifier[crange] =[ literal[int] , literal[int] * identifier[nu] . identifier[pi] ],
identifier[colorbar] = keyword[True] ,
** identifier[kwargs] )
keyword[elif] identifier[type] == literal[string] :
keyword[if] identifier[downsample] :
identifier[plotx] = identifier[ts] [:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
identifier[ploty] = identifier[jz] [ literal[int] ,:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]/ identifier[jz] [ literal[int] ,- literal[int] ]
identifier[plotz] = identifier[anglezI] [ literal[int] ,:- literal[int] : identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
keyword[else] :
identifier[plotx] = identifier[ts]
identifier[ploty] = identifier[jz] [ literal[int] ,:]/ identifier[jz] [ literal[int] ,- literal[int] ]
identifier[plotz] = identifier[anglezI] [ literal[int] ,:- literal[int] ]
identifier[bovy_plot] . identifier[bovy_plot] ( identifier[plotx] , identifier[ploty] , identifier[c] = identifier[plotz] , identifier[s] = literal[int] ,
identifier[scatter] = keyword[True] ,
identifier[edgecolor] = literal[string] ,
identifier[xlabel] = literal[string] ,
identifier[ylabel] = literal[string] ,
identifier[clabel] = literal[string] ,
identifier[vmin] = literal[int] , identifier[vmax] = literal[int] * identifier[nu] . identifier[pi] ,
identifier[crange] =[ literal[int] , literal[int] * identifier[nu] . identifier[pi] ],
identifier[colorbar] = keyword[True] ,
** identifier[kwargs] )
keyword[else] :
keyword[if] identifier[deperiod] :
keyword[if] literal[string] keyword[in] identifier[type] :
identifier[angleRT] = identifier[dePeriod] ( identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] ))
keyword[else] :
identifier[angleRT] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )
keyword[if] literal[string] keyword[in] identifier[type] :
identifier[acfs7] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )
identifier[negFreqIndx] = identifier[nu] . identifier[median] ( identifier[acfs7] - identifier[nu] . identifier[roll] ( identifier[acfs7] , literal[int] , identifier[axis] = literal[int] ), identifier[axis] = literal[int] )< literal[int]
identifier[anglephiT] = identifier[nu] . identifier[empty] ( identifier[acfs7] . identifier[shape] )
identifier[anglephiT] [ identifier[negFreqIndx] ,:]= identifier[dePeriod] ( identifier[_TWOPI] - identifier[acfs7] [ identifier[negFreqIndx] ,:])
identifier[negFreqPhi] = identifier[nu] . identifier[zeros] ( identifier[R] . identifier[shape] [ literal[int] ], identifier[dtype] = literal[string] )
identifier[negFreqPhi] [ identifier[negFreqIndx] ]= keyword[True]
identifier[anglephiT] [ keyword[True] ^ identifier[negFreqIndx] ,:]= identifier[dePeriod] ( identifier[acfs7] [ keyword[True] ^ identifier[negFreqIndx] ,:])
keyword[else] :
identifier[anglephiT] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )
keyword[if] literal[string] keyword[in] identifier[type] :
identifier[angleZT] = identifier[dePeriod] ( identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] ))
keyword[else] :
identifier[angleZT] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )
identifier[xrange] = keyword[None]
identifier[yrange] = keyword[None]
keyword[else] :
identifier[angleRT] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )
identifier[anglephiT] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )
identifier[angleZT] = identifier[nu] . identifier[reshape] ( identifier[acfs] [ literal[int] ], identifier[R] . identifier[shape] )
identifier[xrange] =[- literal[int] , literal[int] * identifier[nu] . identifier[pi] + literal[int] ]
identifier[yrange] =[- literal[int] , literal[int] * identifier[nu] . identifier[pi] + literal[int] ]
identifier[vmin] , identifier[vmax] = literal[int] , literal[int] * identifier[nu] . identifier[pi]
identifier[crange] =[ identifier[vmin] , identifier[vmax] ]
keyword[if] identifier[type] == literal[string] :
keyword[if] identifier[downsample] :
identifier[plotx] = identifier[angleRT] [ literal[int] ,:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
identifier[ploty] = identifier[angleZT] [ literal[int] ,:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
identifier[plotz] = identifier[anglephiT] [ literal[int] ,:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
keyword[else] :
identifier[plotx] = identifier[angleRT] [ literal[int] ,:]
identifier[ploty] = identifier[angleZT] [ literal[int] ,:]
identifier[plotz] = identifier[anglephiT] [ literal[int] ,:]
identifier[bovy_plot] . identifier[bovy_plot] ( identifier[plotx] , identifier[ploty] , identifier[c] = identifier[plotz] , identifier[s] = literal[int] ,
identifier[scatter] = keyword[True] ,
identifier[edgecolor] = literal[string] ,
identifier[xlabel] = literal[string] ,
identifier[ylabel] = literal[string] ,
identifier[clabel] = literal[string] ,
identifier[xrange] = identifier[xrange] , identifier[yrange] = identifier[yrange] ,
identifier[vmin] = identifier[vmin] , identifier[vmax] = identifier[vmax] ,
identifier[crange] = identifier[crange] ,
identifier[colorbar] = keyword[True] ,
** identifier[kwargs] )
keyword[elif] identifier[type] == literal[string] :
keyword[if] identifier[downsample] :
identifier[plotx] = identifier[angleRT] [ literal[int] ,:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
identifier[ploty] = identifier[anglephiT] [ literal[int] ,:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
identifier[plotz] = identifier[angleZT] [ literal[int] ,:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
keyword[else] :
identifier[plotx] = identifier[angleRT] [ literal[int] ,:]
identifier[ploty] = identifier[anglephiT] [ literal[int] ,:]
identifier[plotz] = identifier[angleZT] [ literal[int] ,:]
identifier[bovy_plot] . identifier[bovy_plot] ( identifier[plotx] , identifier[ploty] , identifier[c] = identifier[plotz] , identifier[s] = literal[int] ,
identifier[scatter] = keyword[True] ,
identifier[edgecolor] = literal[string] ,
identifier[xlabel] = literal[string] ,
identifier[clabel] = literal[string] ,
identifier[ylabel] = literal[string] ,
identifier[xrange] = identifier[xrange] , identifier[yrange] = identifier[yrange] ,
identifier[vmin] = identifier[vmin] , identifier[vmax] = identifier[vmax] ,
identifier[crange] = identifier[crange] ,
identifier[colorbar] = keyword[True] ,
** identifier[kwargs] )
keyword[elif] identifier[type] == literal[string] :
keyword[if] identifier[downsample] :
identifier[plotx] = identifier[angleZT] [ literal[int] ,:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
identifier[ploty] = identifier[anglephiT] [ literal[int] ,:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
identifier[plotz] = identifier[angleRT] [ literal[int] ,:: identifier[int] ( identifier[round] ( identifier[self] . identifier[_ntintJ] // literal[int] ))]
keyword[else] :
identifier[plotx] = identifier[angleZT] [ literal[int] ,:]
identifier[ploty] = identifier[anglephiT] [ literal[int] ,:]
identifier[plotz] = identifier[angleRT] [ literal[int] ,:]
identifier[bovy_plot] . identifier[bovy_plot] ( identifier[plotx] , identifier[ploty] , identifier[c] = identifier[plotz] , identifier[s] = literal[int] ,
identifier[scatter] = keyword[True] ,
identifier[edgecolor] = literal[string] ,
identifier[clabel] = literal[string] ,
identifier[xlabel] = literal[string] ,
identifier[ylabel] = literal[string] ,
identifier[xrange] = identifier[xrange] , identifier[yrange] = identifier[yrange] ,
identifier[vmin] = identifier[vmin] , identifier[vmax] = identifier[vmax] ,
identifier[crange] = identifier[crange] ,
identifier[colorbar] = keyword[True] ,
** identifier[kwargs] )
keyword[return] keyword[None] | def plot(self, *args, **kwargs):
"""
NAME:
plot
PURPOSE:
plot the angles vs. each other, to check whether the isochrone
approximation is good
INPUT:
Either:
a) R,vR,vT,z,vz:
floats: phase-space value for single object
b) Orbit instance
type= ('araz') type of plot to make
a) 'araz': az vs. ar, with color-coded aphi
b) 'araphi': aphi vs. ar, with color-coded az
c) 'azaphi': aphi vs. az, with color-coded ar
d) 'jr': cumulative average of jr with time, to assess convergence
e) 'lz': same as 'jr' but for lz
f) 'jz': same as 'jr' but for jz
deperiod= (False), if True, de-period the angles
downsample= (False) if True, downsample what's plotted to 400 points
+plot kwargs
OUTPUT:
plot to output
HISTORY:
2013-09-10 - Written - Bovy (IAS)
"""
#Kwargs
type = kwargs.pop('type', 'araz')
deperiod = kwargs.pop('deperiod', False)
downsample = kwargs.pop('downsample', False)
#Parse input
(R, vR, vT, z, vz, phi) = self._parse_args('a' in type, False, *args)
#Use self._aAI to calculate the actions and angles in the isochrone potential
acfs = self._aAI._actionsFreqsAngles(R.flatten(), vR.flatten(), vT.flatten(), z.flatten(), vz.flatten(), phi.flatten())
if type == 'jr' or type == 'lz' or type == 'jz':
jrI = nu.reshape(acfs[0], R.shape)[:, :-1]
jzI = nu.reshape(acfs[2], R.shape)[:, :-1]
anglerI = nu.reshape(acfs[6], R.shape)
anglezI = nu.reshape(acfs[8], R.shape)
danglerI = ((nu.roll(anglerI, -1, axis=1) - anglerI) % _TWOPI)[:, :-1]
danglezI = ((nu.roll(anglezI, -1, axis=1) - anglezI) % _TWOPI)[:, :-1]
if True:
sumFunc = nu.cumsum # depends on [control=['if'], data=[]]
jr = sumFunc(jrI * danglerI, axis=1) / sumFunc(danglerI, axis=1)
jz = sumFunc(jzI * danglezI, axis=1) / sumFunc(danglezI, axis=1)
lzI = nu.reshape(acfs[1], R.shape)[:, :-1]
anglephiI = nu.reshape(acfs[7], R.shape)
danglephiI = ((nu.roll(anglephiI, -1, axis=1) - anglephiI) % _TWOPI)[:, :-1]
lz = sumFunc(lzI * danglephiI, axis=1) / sumFunc(danglephiI, axis=1)
from galpy.orbit import Orbit
if isinstance(args[0], Orbit) and hasattr(args[0]._orb, 't'):
ts = args[0]._orb.t[:-1] # depends on [control=['if'], data=[]]
else:
ts = self._tsJ[:-1]
if type == 'jr':
if downsample:
plotx = ts[::int(round(self._ntintJ // 400))]
ploty = jr[0, ::int(round(self._ntintJ // 400))] / jr[0, -1]
plotz = anglerI[0, :-1:int(round(self._ntintJ // 400))] # depends on [control=['if'], data=[]]
else:
plotx = ts
ploty = jr[0, :] / jr[0, -1]
plotz = anglerI[0, :-1]
bovy_plot.bovy_plot(plotx, ploty, c=plotz, s=20.0, scatter=True, edgecolor='none', xlabel='$t$', ylabel='$J^A_R / \\langle J^A_R \\rangle$', clabel='$\\theta^A_R$', vmin=0.0, vmax=2.0 * nu.pi, crange=[0.0, 2.0 * nu.pi], colorbar=True, **kwargs) # depends on [control=['if'], data=[]]
elif type == 'lz':
if downsample:
plotx = ts[::int(round(self._ntintJ // 400))]
ploty = lz[0, ::int(round(self._ntintJ // 400))] / lz[0, -1]
plotz = anglephiI[0, :-1:int(round(self._ntintJ // 400))] # depends on [control=['if'], data=[]]
else:
plotx = ts
ploty = lz[0, :] / lz[0, -1]
plotz = anglephiI[0, :-1]
bovy_plot.bovy_plot(plotx, ploty, c=plotz, s=20.0, scatter=True, edgecolor='none', xlabel='$t$', ylabel='$L^A_Z / \\langle L^A_Z \\rangle$', clabel='$\\theta^A_\\phi$', vmin=0.0, vmax=2.0 * nu.pi, crange=[0.0, 2.0 * nu.pi], colorbar=True, **kwargs) # depends on [control=['if'], data=[]]
elif type == 'jz':
if downsample:
plotx = ts[::int(round(self._ntintJ // 400))]
ploty = jz[0, ::int(round(self._ntintJ // 400))] / jz[0, -1]
plotz = anglezI[0, :-1:int(round(self._ntintJ // 400))] # depends on [control=['if'], data=[]]
else:
plotx = ts
ploty = jz[0, :] / jz[0, -1]
plotz = anglezI[0, :-1]
bovy_plot.bovy_plot(plotx, ploty, c=plotz, s=20.0, scatter=True, edgecolor='none', xlabel='$t$', ylabel='$J^A_Z / \\langle J^A_Z \\rangle$', clabel='$\\theta^A_Z$', vmin=0.0, vmax=2.0 * nu.pi, crange=[0.0, 2.0 * nu.pi], colorbar=True, **kwargs) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
if deperiod:
if 'ar' in type:
angleRT = dePeriod(nu.reshape(acfs[6], R.shape)) # depends on [control=['if'], data=[]]
else:
angleRT = nu.reshape(acfs[6], R.shape)
if 'aphi' in type:
acfs7 = nu.reshape(acfs[7], R.shape)
negFreqIndx = nu.median(acfs7 - nu.roll(acfs7, 1, axis=1), axis=1) < 0.0 #anglephi is decreasing
anglephiT = nu.empty(acfs7.shape)
anglephiT[negFreqIndx, :] = dePeriod(_TWOPI - acfs7[negFreqIndx, :])
negFreqPhi = nu.zeros(R.shape[0], dtype='bool')
negFreqPhi[negFreqIndx] = True
anglephiT[True ^ negFreqIndx, :] = dePeriod(acfs7[True ^ negFreqIndx, :]) # depends on [control=['if'], data=[]]
else:
anglephiT = nu.reshape(acfs[7], R.shape)
if 'az' in type:
angleZT = dePeriod(nu.reshape(acfs[8], R.shape)) # depends on [control=['if'], data=[]]
else:
angleZT = nu.reshape(acfs[8], R.shape)
xrange = None
yrange = None # depends on [control=['if'], data=[]]
else:
angleRT = nu.reshape(acfs[6], R.shape)
anglephiT = nu.reshape(acfs[7], R.shape)
angleZT = nu.reshape(acfs[8], R.shape)
xrange = [-0.5, 2.0 * nu.pi + 0.5]
yrange = [-0.5, 2.0 * nu.pi + 0.5]
(vmin, vmax) = (0.0, 2.0 * nu.pi)
crange = [vmin, vmax]
if type == 'araz':
if downsample:
plotx = angleRT[0, ::int(round(self._ntintJ // 400))]
ploty = angleZT[0, ::int(round(self._ntintJ // 400))]
plotz = anglephiT[0, ::int(round(self._ntintJ // 400))] # depends on [control=['if'], data=[]]
else:
plotx = angleRT[0, :]
ploty = angleZT[0, :]
plotz = anglephiT[0, :]
bovy_plot.bovy_plot(plotx, ploty, c=plotz, s=20.0, scatter=True, edgecolor='none', xlabel='$\\theta^A_R$', ylabel='$\\theta^A_Z$', clabel='$\\theta^A_\\phi$', xrange=xrange, yrange=yrange, vmin=vmin, vmax=vmax, crange=crange, colorbar=True, **kwargs) # depends on [control=['if'], data=[]]
elif type == 'araphi':
if downsample:
plotx = angleRT[0, ::int(round(self._ntintJ // 400))]
ploty = anglephiT[0, ::int(round(self._ntintJ // 400))]
plotz = angleZT[0, ::int(round(self._ntintJ // 400))] # depends on [control=['if'], data=[]]
else:
plotx = angleRT[0, :]
ploty = anglephiT[0, :]
plotz = angleZT[0, :]
bovy_plot.bovy_plot(plotx, ploty, c=plotz, s=20.0, scatter=True, edgecolor='none', xlabel='$\\theta^A_R$', clabel='$\\theta^A_Z$', ylabel='$\\theta^A_\\phi$', xrange=xrange, yrange=yrange, vmin=vmin, vmax=vmax, crange=crange, colorbar=True, **kwargs) # depends on [control=['if'], data=[]]
elif type == 'azaphi':
if downsample:
plotx = angleZT[0, ::int(round(self._ntintJ // 400))]
ploty = anglephiT[0, ::int(round(self._ntintJ // 400))]
plotz = angleRT[0, ::int(round(self._ntintJ // 400))] # depends on [control=['if'], data=[]]
else:
plotx = angleZT[0, :]
ploty = anglephiT[0, :]
plotz = angleRT[0, :]
bovy_plot.bovy_plot(plotx, ploty, c=plotz, s=20.0, scatter=True, edgecolor='none', clabel='$\\theta^A_R$', xlabel='$\\theta^A_Z$', ylabel='$\\theta^A_\\phi$', xrange=xrange, yrange=yrange, vmin=vmin, vmax=vmax, crange=crange, colorbar=True, **kwargs) # depends on [control=['if'], data=[]]
return None |
def specific_gravity(self, value):
""" Set the relative weight of the solid """
value = clean_float(value)
if value is None:
return
specific_gravity = self._calc_specific_gravity()
if specific_gravity is not None and not ct.isclose(specific_gravity, value, rel_tol=self._tolerance):
raise ModelError("specific gravity is inconsistent with set unit_dry_weight and void_ratio")
self._specific_gravity = float(value)
self.stack.append(("specific_gravity", float(value)))
self.recompute_all_weights_and_void() | def function[specific_gravity, parameter[self, value]]:
constant[ Set the relative weight of the solid ]
variable[value] assign[=] call[name[clean_float], parameter[name[value]]]
if compare[name[value] is constant[None]] begin[:]
return[None]
variable[specific_gravity] assign[=] call[name[self]._calc_specific_gravity, parameter[]]
if <ast.BoolOp object at 0x7da20e9b30a0> begin[:]
<ast.Raise object at 0x7da20e9b2710>
name[self]._specific_gravity assign[=] call[name[float], parameter[name[value]]]
call[name[self].stack.append, parameter[tuple[[<ast.Constant object at 0x7da20e9b3610>, <ast.Call object at 0x7da20e9b2800>]]]]
call[name[self].recompute_all_weights_and_void, parameter[]] | keyword[def] identifier[specific_gravity] ( identifier[self] , identifier[value] ):
literal[string]
identifier[value] = identifier[clean_float] ( identifier[value] )
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return]
identifier[specific_gravity] = identifier[self] . identifier[_calc_specific_gravity] ()
keyword[if] identifier[specific_gravity] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[ct] . identifier[isclose] ( identifier[specific_gravity] , identifier[value] , identifier[rel_tol] = identifier[self] . identifier[_tolerance] ):
keyword[raise] identifier[ModelError] ( literal[string] )
identifier[self] . identifier[_specific_gravity] = identifier[float] ( identifier[value] )
identifier[self] . identifier[stack] . identifier[append] (( literal[string] , identifier[float] ( identifier[value] )))
identifier[self] . identifier[recompute_all_weights_and_void] () | def specific_gravity(self, value):
""" Set the relative weight of the solid """
value = clean_float(value)
if value is None:
return # depends on [control=['if'], data=[]]
specific_gravity = self._calc_specific_gravity()
if specific_gravity is not None and (not ct.isclose(specific_gravity, value, rel_tol=self._tolerance)):
raise ModelError('specific gravity is inconsistent with set unit_dry_weight and void_ratio') # depends on [control=['if'], data=[]]
self._specific_gravity = float(value)
self.stack.append(('specific_gravity', float(value)))
self.recompute_all_weights_and_void() |
def set_relay_ip_list(addresses=None, server=_DEFAULT_SERVER):
'''
Set the RelayIpList list for the SMTP virtual server.
Due to the unusual way that Windows stores the relay IPs, it is advisable to retrieve
the existing list you wish to set from a pre-configured server.
For example, setting '127.0.0.1' as an allowed relay IP through the GUI would generate
an actual relay IP list similar to the following:
.. code-block:: cfg
['24.0.0.128', '32.0.0.128', '60.0.0.128', '68.0.0.128', '1.0.0.0', '76.0.0.0',
'0.0.0.0', '0.0.0.0', '1.0.0.0', '1.0.0.0', '2.0.0.0', '2.0.0.0', '4.0.0.0',
'0.0.0.0', '76.0.0.128', '0.0.0.0', '0.0.0.0', '0.0.0.0', '0.0.0.0',
'255.255.255.255', '127.0.0.1']
.. note::
Setting the list to None corresponds to the restrictive 'Only the list below' GUI parameter
with an empty access list configured, and setting an empty list/tuple corresponds to the
more permissive 'All except the list below' GUI parameter.
:param str addresses: A list of the relay IPs. The order of the list is important.
:param str server: The SMTP server name.
:return: A boolean representing whether the change succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.set_relay_ip_list addresses="['192.168.1.1', '172.16.1.1']"
'''
setting = 'RelayIpList'
formatted_addresses = list()
current_addresses = get_relay_ip_list(server)
if list(addresses) == current_addresses:
_LOG.debug('%s already contains the provided addresses.', setting)
return True
if addresses:
# The WMI input data needs to be in the format used by RelayIpList. Order
# is also important due to the way RelayIpList orders the address list.
if addresses[0] is None:
formatted_addresses = None
else:
for address in addresses:
for octet in address.split('.'):
formatted_addresses.append(octet)
_LOG.debug('Formatted %s addresses: %s', setting, formatted_addresses)
_set_wmi_setting('IIsSmtpServerSetting', setting, formatted_addresses, server)
new_addresses = get_relay_ip_list(server)
ret = list(addresses) == new_addresses
if ret:
_LOG.debug('%s configured successfully: %s', setting, addresses)
return ret
_LOG.error('Unable to configure %s with value: %s', setting, addresses)
return ret | def function[set_relay_ip_list, parameter[addresses, server]]:
constant[
Set the RelayIpList list for the SMTP virtual server.
Due to the unusual way that Windows stores the relay IPs, it is advisable to retrieve
the existing list you wish to set from a pre-configured server.
For example, setting '127.0.0.1' as an allowed relay IP through the GUI would generate
an actual relay IP list similar to the following:
.. code-block:: cfg
['24.0.0.128', '32.0.0.128', '60.0.0.128', '68.0.0.128', '1.0.0.0', '76.0.0.0',
'0.0.0.0', '0.0.0.0', '1.0.0.0', '1.0.0.0', '2.0.0.0', '2.0.0.0', '4.0.0.0',
'0.0.0.0', '76.0.0.128', '0.0.0.0', '0.0.0.0', '0.0.0.0', '0.0.0.0',
'255.255.255.255', '127.0.0.1']
.. note::
Setting the list to None corresponds to the restrictive 'Only the list below' GUI parameter
with an empty access list configured, and setting an empty list/tuple corresponds to the
more permissive 'All except the list below' GUI parameter.
:param str addresses: A list of the relay IPs. The order of the list is important.
:param str server: The SMTP server name.
:return: A boolean representing whether the change succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.set_relay_ip_list addresses="['192.168.1.1', '172.16.1.1']"
]
variable[setting] assign[=] constant[RelayIpList]
variable[formatted_addresses] assign[=] call[name[list], parameter[]]
variable[current_addresses] assign[=] call[name[get_relay_ip_list], parameter[name[server]]]
if compare[call[name[list], parameter[name[addresses]]] equal[==] name[current_addresses]] begin[:]
call[name[_LOG].debug, parameter[constant[%s already contains the provided addresses.], name[setting]]]
return[constant[True]]
if name[addresses] begin[:]
if compare[call[name[addresses]][constant[0]] is constant[None]] begin[:]
variable[formatted_addresses] assign[=] constant[None]
call[name[_LOG].debug, parameter[constant[Formatted %s addresses: %s], name[setting], name[formatted_addresses]]]
call[name[_set_wmi_setting], parameter[constant[IIsSmtpServerSetting], name[setting], name[formatted_addresses], name[server]]]
variable[new_addresses] assign[=] call[name[get_relay_ip_list], parameter[name[server]]]
variable[ret] assign[=] compare[call[name[list], parameter[name[addresses]]] equal[==] name[new_addresses]]
if name[ret] begin[:]
call[name[_LOG].debug, parameter[constant[%s configured successfully: %s], name[setting], name[addresses]]]
return[name[ret]]
call[name[_LOG].error, parameter[constant[Unable to configure %s with value: %s], name[setting], name[addresses]]]
return[name[ret]] | keyword[def] identifier[set_relay_ip_list] ( identifier[addresses] = keyword[None] , identifier[server] = identifier[_DEFAULT_SERVER] ):
literal[string]
identifier[setting] = literal[string]
identifier[formatted_addresses] = identifier[list] ()
identifier[current_addresses] = identifier[get_relay_ip_list] ( identifier[server] )
keyword[if] identifier[list] ( identifier[addresses] )== identifier[current_addresses] :
identifier[_LOG] . identifier[debug] ( literal[string] , identifier[setting] )
keyword[return] keyword[True]
keyword[if] identifier[addresses] :
keyword[if] identifier[addresses] [ literal[int] ] keyword[is] keyword[None] :
identifier[formatted_addresses] = keyword[None]
keyword[else] :
keyword[for] identifier[address] keyword[in] identifier[addresses] :
keyword[for] identifier[octet] keyword[in] identifier[address] . identifier[split] ( literal[string] ):
identifier[formatted_addresses] . identifier[append] ( identifier[octet] )
identifier[_LOG] . identifier[debug] ( literal[string] , identifier[setting] , identifier[formatted_addresses] )
identifier[_set_wmi_setting] ( literal[string] , identifier[setting] , identifier[formatted_addresses] , identifier[server] )
identifier[new_addresses] = identifier[get_relay_ip_list] ( identifier[server] )
identifier[ret] = identifier[list] ( identifier[addresses] )== identifier[new_addresses]
keyword[if] identifier[ret] :
identifier[_LOG] . identifier[debug] ( literal[string] , identifier[setting] , identifier[addresses] )
keyword[return] identifier[ret]
identifier[_LOG] . identifier[error] ( literal[string] , identifier[setting] , identifier[addresses] )
keyword[return] identifier[ret] | def set_relay_ip_list(addresses=None, server=_DEFAULT_SERVER):
"""
Set the RelayIpList list for the SMTP virtual server.
Due to the unusual way that Windows stores the relay IPs, it is advisable to retrieve
the existing list you wish to set from a pre-configured server.
For example, setting '127.0.0.1' as an allowed relay IP through the GUI would generate
an actual relay IP list similar to the following:
.. code-block:: cfg
['24.0.0.128', '32.0.0.128', '60.0.0.128', '68.0.0.128', '1.0.0.0', '76.0.0.0',
'0.0.0.0', '0.0.0.0', '1.0.0.0', '1.0.0.0', '2.0.0.0', '2.0.0.0', '4.0.0.0',
'0.0.0.0', '76.0.0.128', '0.0.0.0', '0.0.0.0', '0.0.0.0', '0.0.0.0',
'255.255.255.255', '127.0.0.1']
.. note::
Setting the list to None corresponds to the restrictive 'Only the list below' GUI parameter
with an empty access list configured, and setting an empty list/tuple corresponds to the
more permissive 'All except the list below' GUI parameter.
:param str addresses: A list of the relay IPs. The order of the list is important.
:param str server: The SMTP server name.
:return: A boolean representing whether the change succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.set_relay_ip_list addresses="['192.168.1.1', '172.16.1.1']"
"""
setting = 'RelayIpList'
formatted_addresses = list()
current_addresses = get_relay_ip_list(server)
if list(addresses) == current_addresses:
_LOG.debug('%s already contains the provided addresses.', setting)
return True # depends on [control=['if'], data=[]]
if addresses:
# The WMI input data needs to be in the format used by RelayIpList. Order
# is also important due to the way RelayIpList orders the address list.
if addresses[0] is None:
formatted_addresses = None # depends on [control=['if'], data=[]]
else:
for address in addresses:
for octet in address.split('.'):
formatted_addresses.append(octet) # depends on [control=['for'], data=['octet']] # depends on [control=['for'], data=['address']] # depends on [control=['if'], data=[]]
_LOG.debug('Formatted %s addresses: %s', setting, formatted_addresses)
_set_wmi_setting('IIsSmtpServerSetting', setting, formatted_addresses, server)
new_addresses = get_relay_ip_list(server)
ret = list(addresses) == new_addresses
if ret:
_LOG.debug('%s configured successfully: %s', setting, addresses)
return ret # depends on [control=['if'], data=[]]
_LOG.error('Unable to configure %s with value: %s', setting, addresses)
return ret |
def split(self, n):
"""Evenly split this range into contiguous, non overlapping subranges.
Args:
n: number of splits.
Returns:
a list of contiguous, non overlapping sub PropertyRanges. Maybe less than
n when not enough subranges.
"""
new_range_filters = []
name = self.start[0]
prop_cls = self.prop.__class__
if prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS:
splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls](
self.start[2], self.end[2], n,
self.start[1] == ">=", self.end[1] == "<=")
start_filter = (name, ">=", splitpoints[0])
for p in splitpoints[1:]:
end_filter = (name, "<", p)
new_range_filters.append([start_filter, end_filter])
start_filter = (name, ">=", p)
else:
splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls](
self.start[2], self.end[2], n)
start_filter = self.start
for p in splitpoints:
end_filter = (name, "<", p)
new_range_filters.append([start_filter, end_filter])
start_filter = (name, ">=", p)
new_range_filters.append([start_filter, self.end])
for f in new_range_filters:
f.extend(self._equality_filters)
return [self.__class__(f, self.model_class_path) for f in new_range_filters] | def function[split, parameter[self, n]]:
constant[Evenly split this range into contiguous, non overlapping subranges.
Args:
n: number of splits.
Returns:
a list of contiguous, non overlapping sub PropertyRanges. Maybe less than
n when not enough subranges.
]
variable[new_range_filters] assign[=] list[[]]
variable[name] assign[=] call[name[self].start][constant[0]]
variable[prop_cls] assign[=] name[self].prop.__class__
if compare[name[prop_cls] in name[_DISCRETE_PROPERTY_SPLIT_FUNCTIONS]] begin[:]
variable[splitpoints] assign[=] call[call[name[_DISCRETE_PROPERTY_SPLIT_FUNCTIONS]][name[prop_cls]], parameter[call[name[self].start][constant[2]], call[name[self].end][constant[2]], name[n], compare[call[name[self].start][constant[1]] equal[==] constant[>=]], compare[call[name[self].end][constant[1]] equal[==] constant[<=]]]]
variable[start_filter] assign[=] tuple[[<ast.Name object at 0x7da20c6c6740>, <ast.Constant object at 0x7da20c6c5870>, <ast.Subscript object at 0x7da20c6c5120>]]
for taget[name[p]] in starred[call[name[splitpoints]][<ast.Slice object at 0x7da20c6c6080>]] begin[:]
variable[end_filter] assign[=] tuple[[<ast.Name object at 0x7da20c6c62c0>, <ast.Constant object at 0x7da20c6c5060>, <ast.Name object at 0x7da20c6c4cd0>]]
call[name[new_range_filters].append, parameter[list[[<ast.Name object at 0x7da20c6c5030>, <ast.Name object at 0x7da20c9907c0>]]]]
variable[start_filter] assign[=] tuple[[<ast.Name object at 0x7da20c991f30>, <ast.Constant object at 0x7da20c993760>, <ast.Name object at 0x7da20c993880>]]
for taget[name[f]] in starred[name[new_range_filters]] begin[:]
call[name[f].extend, parameter[name[self]._equality_filters]]
return[<ast.ListComp object at 0x7da20c992710>] | keyword[def] identifier[split] ( identifier[self] , identifier[n] ):
literal[string]
identifier[new_range_filters] =[]
identifier[name] = identifier[self] . identifier[start] [ literal[int] ]
identifier[prop_cls] = identifier[self] . identifier[prop] . identifier[__class__]
keyword[if] identifier[prop_cls] keyword[in] identifier[_DISCRETE_PROPERTY_SPLIT_FUNCTIONS] :
identifier[splitpoints] = identifier[_DISCRETE_PROPERTY_SPLIT_FUNCTIONS] [ identifier[prop_cls] ](
identifier[self] . identifier[start] [ literal[int] ], identifier[self] . identifier[end] [ literal[int] ], identifier[n] ,
identifier[self] . identifier[start] [ literal[int] ]== literal[string] , identifier[self] . identifier[end] [ literal[int] ]== literal[string] )
identifier[start_filter] =( identifier[name] , literal[string] , identifier[splitpoints] [ literal[int] ])
keyword[for] identifier[p] keyword[in] identifier[splitpoints] [ literal[int] :]:
identifier[end_filter] =( identifier[name] , literal[string] , identifier[p] )
identifier[new_range_filters] . identifier[append] ([ identifier[start_filter] , identifier[end_filter] ])
identifier[start_filter] =( identifier[name] , literal[string] , identifier[p] )
keyword[else] :
identifier[splitpoints] = identifier[_CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS] [ identifier[prop_cls] ](
identifier[self] . identifier[start] [ literal[int] ], identifier[self] . identifier[end] [ literal[int] ], identifier[n] )
identifier[start_filter] = identifier[self] . identifier[start]
keyword[for] identifier[p] keyword[in] identifier[splitpoints] :
identifier[end_filter] =( identifier[name] , literal[string] , identifier[p] )
identifier[new_range_filters] . identifier[append] ([ identifier[start_filter] , identifier[end_filter] ])
identifier[start_filter] =( identifier[name] , literal[string] , identifier[p] )
identifier[new_range_filters] . identifier[append] ([ identifier[start_filter] , identifier[self] . identifier[end] ])
keyword[for] identifier[f] keyword[in] identifier[new_range_filters] :
identifier[f] . identifier[extend] ( identifier[self] . identifier[_equality_filters] )
keyword[return] [ identifier[self] . identifier[__class__] ( identifier[f] , identifier[self] . identifier[model_class_path] ) keyword[for] identifier[f] keyword[in] identifier[new_range_filters] ] | def split(self, n):
"""Evenly split this range into contiguous, non overlapping subranges.
Args:
n: number of splits.
Returns:
a list of contiguous, non overlapping sub PropertyRanges. Maybe less than
n when not enough subranges.
"""
new_range_filters = []
name = self.start[0]
prop_cls = self.prop.__class__
if prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS:
splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls](self.start[2], self.end[2], n, self.start[1] == '>=', self.end[1] == '<=')
start_filter = (name, '>=', splitpoints[0])
for p in splitpoints[1:]:
end_filter = (name, '<', p)
new_range_filters.append([start_filter, end_filter])
start_filter = (name, '>=', p) # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=['prop_cls', '_DISCRETE_PROPERTY_SPLIT_FUNCTIONS']]
else:
splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls](self.start[2], self.end[2], n)
start_filter = self.start
for p in splitpoints:
end_filter = (name, '<', p)
new_range_filters.append([start_filter, end_filter])
start_filter = (name, '>=', p) # depends on [control=['for'], data=['p']]
new_range_filters.append([start_filter, self.end])
for f in new_range_filters:
f.extend(self._equality_filters) # depends on [control=['for'], data=['f']]
return [self.__class__(f, self.model_class_path) for f in new_range_filters] |
def __run(self):
"""
Main execution method, which consists of: get an up-to-date list of WARC files, and for each of them: download
and extract articles. Each article is checked against a filter. Finally, for each valid article the method
on_valid_article_extracted will be invoked after the extraction of the article has completed.
:return:
"""
self.__setup()
local_path_name = self.__download(self.__warc_download_url)
self.__process_warc_gz_file(local_path_name) | def function[__run, parameter[self]]:
constant[
Main execution method, which consists of: get an up-to-date list of WARC files, and for each of them: download
and extract articles. Each article is checked against a filter. Finally, for each valid article the method
on_valid_article_extracted will be invoked after the extraction of the article has completed.
:return:
]
call[name[self].__setup, parameter[]]
variable[local_path_name] assign[=] call[name[self].__download, parameter[name[self].__warc_download_url]]
call[name[self].__process_warc_gz_file, parameter[name[local_path_name]]] | keyword[def] identifier[__run] ( identifier[self] ):
literal[string]
identifier[self] . identifier[__setup] ()
identifier[local_path_name] = identifier[self] . identifier[__download] ( identifier[self] . identifier[__warc_download_url] )
identifier[self] . identifier[__process_warc_gz_file] ( identifier[local_path_name] ) | def __run(self):
"""
Main execution method, which consists of: get an up-to-date list of WARC files, and for each of them: download
and extract articles. Each article is checked against a filter. Finally, for each valid article the method
on_valid_article_extracted will be invoked after the extraction of the article has completed.
:return:
"""
self.__setup()
local_path_name = self.__download(self.__warc_download_url)
self.__process_warc_gz_file(local_path_name) |
def delete(block_id):
"""Processing block detail resource."""
_url = get_root_url()
try:
DB.delete_processing_block(block_id)
response = dict(message='Deleted block',
id='{}'.format(block_id),
links=dict(list='{}/processing-blocks'.format(_url),
home='{}'.format(_url)))
return response, HTTPStatus.OK
except RuntimeError as error:
response = dict(error='Failed to delete Processing Block: {}'.
format(block_id),
reason=str(error),
links=dict(list='{}/processing-blocks'.format(_url),
home='{}'.format(_url)))
return response, HTTPStatus.OK | def function[delete, parameter[block_id]]:
constant[Processing block detail resource.]
variable[_url] assign[=] call[name[get_root_url], parameter[]]
<ast.Try object at 0x7da18f00ddb0> | keyword[def] identifier[delete] ( identifier[block_id] ):
literal[string]
identifier[_url] = identifier[get_root_url] ()
keyword[try] :
identifier[DB] . identifier[delete_processing_block] ( identifier[block_id] )
identifier[response] = identifier[dict] ( identifier[message] = literal[string] ,
identifier[id] = literal[string] . identifier[format] ( identifier[block_id] ),
identifier[links] = identifier[dict] ( identifier[list] = literal[string] . identifier[format] ( identifier[_url] ),
identifier[home] = literal[string] . identifier[format] ( identifier[_url] )))
keyword[return] identifier[response] , identifier[HTTPStatus] . identifier[OK]
keyword[except] identifier[RuntimeError] keyword[as] identifier[error] :
identifier[response] = identifier[dict] ( identifier[error] = literal[string] .
identifier[format] ( identifier[block_id] ),
identifier[reason] = identifier[str] ( identifier[error] ),
identifier[links] = identifier[dict] ( identifier[list] = literal[string] . identifier[format] ( identifier[_url] ),
identifier[home] = literal[string] . identifier[format] ( identifier[_url] )))
keyword[return] identifier[response] , identifier[HTTPStatus] . identifier[OK] | def delete(block_id):
"""Processing block detail resource."""
_url = get_root_url()
try:
DB.delete_processing_block(block_id)
response = dict(message='Deleted block', id='{}'.format(block_id), links=dict(list='{}/processing-blocks'.format(_url), home='{}'.format(_url)))
return (response, HTTPStatus.OK) # depends on [control=['try'], data=[]]
except RuntimeError as error:
response = dict(error='Failed to delete Processing Block: {}'.format(block_id), reason=str(error), links=dict(list='{}/processing-blocks'.format(_url), home='{}'.format(_url)))
return (response, HTTPStatus.OK) # depends on [control=['except'], data=['error']] |
def com_google_fonts_check_name_rfn(ttFont):
"""Name table strings must not contain the string 'Reserved Font Name'."""
failed = False
for entry in ttFont["name"].names:
string = entry.toUnicode()
if "reserved font name" in string.lower():
yield WARN, ("Name table entry (\"{}\")"
" contains \"Reserved Font Name\"."
" This is an error except in a few specific"
" rare cases.").format(string)
failed = True
if not failed:
yield PASS, ("None of the name table strings"
" contain \"Reserved Font Name\".") | def function[com_google_fonts_check_name_rfn, parameter[ttFont]]:
constant[Name table strings must not contain the string 'Reserved Font Name'.]
variable[failed] assign[=] constant[False]
for taget[name[entry]] in starred[call[name[ttFont]][constant[name]].names] begin[:]
variable[string] assign[=] call[name[entry].toUnicode, parameter[]]
if compare[constant[reserved font name] in call[name[string].lower, parameter[]]] begin[:]
<ast.Yield object at 0x7da1b1240340>
variable[failed] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da1b12417b0> begin[:]
<ast.Yield object at 0x7da1b1243f70> | keyword[def] identifier[com_google_fonts_check_name_rfn] ( identifier[ttFont] ):
literal[string]
identifier[failed] = keyword[False]
keyword[for] identifier[entry] keyword[in] identifier[ttFont] [ literal[string] ]. identifier[names] :
identifier[string] = identifier[entry] . identifier[toUnicode] ()
keyword[if] literal[string] keyword[in] identifier[string] . identifier[lower] ():
keyword[yield] identifier[WARN] ,( literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[string] )
identifier[failed] = keyword[True]
keyword[if] keyword[not] identifier[failed] :
keyword[yield] identifier[PASS] ,( literal[string]
literal[string] ) | def com_google_fonts_check_name_rfn(ttFont):
"""Name table strings must not contain the string 'Reserved Font Name'."""
failed = False
for entry in ttFont['name'].names:
string = entry.toUnicode()
if 'reserved font name' in string.lower():
yield (WARN, 'Name table entry ("{}") contains "Reserved Font Name". This is an error except in a few specific rare cases.'.format(string))
failed = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']]
if not failed:
yield (PASS, 'None of the name table strings contain "Reserved Font Name".') # depends on [control=['if'], data=[]] |
def get_live_url(con_pool,
method,
host,
url,
headers,
retries=1,
redirect=True,
body=None,
service_name=None):
"""
Return a connection from the pool and perform an HTTP request.
:param con_pool:
is the http connection pool associated with the service
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param host:
the url of the server host.
:param headers:
headers to include with the request
:param body:
the POST, PUT, PATCH body of the request
"""
timeout = con_pool.timeout.read_timeout
start_time = time.time()
response = con_pool.urlopen(method, url, body=body,
headers=headers, redirect=redirect,
retries=retries, timeout=timeout)
request_time = time.time() - start_time
rest_request.send(sender='restclients',
url=url,
request_time=request_time,
hostname=socket.gethostname(),
service_name=service_name)
rest_request_passfail.send(sender='restclients',
url=url,
success=True,
hostname=socket.gethostname(),
service_name=service_name)
return response | def function[get_live_url, parameter[con_pool, method, host, url, headers, retries, redirect, body, service_name]]:
constant[
Return a connection from the pool and perform an HTTP request.
:param con_pool:
is the http connection pool associated with the service
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param host:
the url of the server host.
:param headers:
headers to include with the request
:param body:
the POST, PUT, PATCH body of the request
]
variable[timeout] assign[=] name[con_pool].timeout.read_timeout
variable[start_time] assign[=] call[name[time].time, parameter[]]
variable[response] assign[=] call[name[con_pool].urlopen, parameter[name[method], name[url]]]
variable[request_time] assign[=] binary_operation[call[name[time].time, parameter[]] - name[start_time]]
call[name[rest_request].send, parameter[]]
call[name[rest_request_passfail].send, parameter[]]
return[name[response]] | keyword[def] identifier[get_live_url] ( identifier[con_pool] ,
identifier[method] ,
identifier[host] ,
identifier[url] ,
identifier[headers] ,
identifier[retries] = literal[int] ,
identifier[redirect] = keyword[True] ,
identifier[body] = keyword[None] ,
identifier[service_name] = keyword[None] ):
literal[string]
identifier[timeout] = identifier[con_pool] . identifier[timeout] . identifier[read_timeout]
identifier[start_time] = identifier[time] . identifier[time] ()
identifier[response] = identifier[con_pool] . identifier[urlopen] ( identifier[method] , identifier[url] , identifier[body] = identifier[body] ,
identifier[headers] = identifier[headers] , identifier[redirect] = identifier[redirect] ,
identifier[retries] = identifier[retries] , identifier[timeout] = identifier[timeout] )
identifier[request_time] = identifier[time] . identifier[time] ()- identifier[start_time]
identifier[rest_request] . identifier[send] ( identifier[sender] = literal[string] ,
identifier[url] = identifier[url] ,
identifier[request_time] = identifier[request_time] ,
identifier[hostname] = identifier[socket] . identifier[gethostname] (),
identifier[service_name] = identifier[service_name] )
identifier[rest_request_passfail] . identifier[send] ( identifier[sender] = literal[string] ,
identifier[url] = identifier[url] ,
identifier[success] = keyword[True] ,
identifier[hostname] = identifier[socket] . identifier[gethostname] (),
identifier[service_name] = identifier[service_name] )
keyword[return] identifier[response] | def get_live_url(con_pool, method, host, url, headers, retries=1, redirect=True, body=None, service_name=None):
"""
Return a connection from the pool and perform an HTTP request.
:param con_pool:
is the http connection pool associated with the service
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param host:
the url of the server host.
:param headers:
headers to include with the request
:param body:
the POST, PUT, PATCH body of the request
"""
timeout = con_pool.timeout.read_timeout
start_time = time.time()
response = con_pool.urlopen(method, url, body=body, headers=headers, redirect=redirect, retries=retries, timeout=timeout)
request_time = time.time() - start_time
rest_request.send(sender='restclients', url=url, request_time=request_time, hostname=socket.gethostname(), service_name=service_name)
rest_request_passfail.send(sender='restclients', url=url, success=True, hostname=socket.gethostname(), service_name=service_name)
return response |
def clone(self, default):
"""
Make a copy of this parameter, supplying a different default.
@type default: C{unicode} or C{NoneType}
@param default: A value which will be initially presented in the view
as the value for this parameter, or C{None} if no such value is to be
presented.
@rtype: L{Parameter}
"""
return self.__class__(
self.name,
self.type,
self.coercer,
self.label,
self.description,
default,
self.viewFactory) | def function[clone, parameter[self, default]]:
constant[
Make a copy of this parameter, supplying a different default.
@type default: C{unicode} or C{NoneType}
@param default: A value which will be initially presented in the view
as the value for this parameter, or C{None} if no such value is to be
presented.
@rtype: L{Parameter}
]
return[call[name[self].__class__, parameter[name[self].name, name[self].type, name[self].coercer, name[self].label, name[self].description, name[default], name[self].viewFactory]]] | keyword[def] identifier[clone] ( identifier[self] , identifier[default] ):
literal[string]
keyword[return] identifier[self] . identifier[__class__] (
identifier[self] . identifier[name] ,
identifier[self] . identifier[type] ,
identifier[self] . identifier[coercer] ,
identifier[self] . identifier[label] ,
identifier[self] . identifier[description] ,
identifier[default] ,
identifier[self] . identifier[viewFactory] ) | def clone(self, default):
"""
Make a copy of this parameter, supplying a different default.
@type default: C{unicode} or C{NoneType}
@param default: A value which will be initially presented in the view
as the value for this parameter, or C{None} if no such value is to be
presented.
@rtype: L{Parameter}
"""
return self.__class__(self.name, self.type, self.coercer, self.label, self.description, default, self.viewFactory) |
def set_option(name, option):
"""
Set the given LLVM "command-line" option.
For example set_option("test", "-debug-pass=Structure") would display
all optimization passes when generating code.
"""
ffi.lib.LLVMPY_SetCommandLine(_encode_string(name),
_encode_string(option)) | def function[set_option, parameter[name, option]]:
constant[
Set the given LLVM "command-line" option.
For example set_option("test", "-debug-pass=Structure") would display
all optimization passes when generating code.
]
call[name[ffi].lib.LLVMPY_SetCommandLine, parameter[call[name[_encode_string], parameter[name[name]]], call[name[_encode_string], parameter[name[option]]]]] | keyword[def] identifier[set_option] ( identifier[name] , identifier[option] ):
literal[string]
identifier[ffi] . identifier[lib] . identifier[LLVMPY_SetCommandLine] ( identifier[_encode_string] ( identifier[name] ),
identifier[_encode_string] ( identifier[option] )) | def set_option(name, option):
"""
Set the given LLVM "command-line" option.
For example set_option("test", "-debug-pass=Structure") would display
all optimization passes when generating code.
"""
ffi.lib.LLVMPY_SetCommandLine(_encode_string(name), _encode_string(option)) |
def SetPermissions(path, mode=None, uid=None, gid=None, mkdir=False):
"""Set the permissions and ownership of a path.
Args:
path: string, the path for which owner ID and group ID needs to be setup.
mode: octal string, the permissions to set on the path.
uid: int, the owner ID to be set for the path.
gid: int, the group ID to be set for the path.
mkdir: bool, True if the directory needs to be created.
"""
if mkdir and not os.path.exists(path):
os.mkdir(path, mode or 0o777)
elif mode:
os.chmod(path, mode)
if uid and gid:
os.chown(path, uid, gid)
_SetSELinuxContext(path) | def function[SetPermissions, parameter[path, mode, uid, gid, mkdir]]:
constant[Set the permissions and ownership of a path.
Args:
path: string, the path for which owner ID and group ID needs to be setup.
mode: octal string, the permissions to set on the path.
uid: int, the owner ID to be set for the path.
gid: int, the group ID to be set for the path.
mkdir: bool, True if the directory needs to be created.
]
if <ast.BoolOp object at 0x7da204960640> begin[:]
call[name[os].mkdir, parameter[name[path], <ast.BoolOp object at 0x7da204961030>]]
if <ast.BoolOp object at 0x7da204960f40> begin[:]
call[name[os].chown, parameter[name[path], name[uid], name[gid]]]
call[name[_SetSELinuxContext], parameter[name[path]]] | keyword[def] identifier[SetPermissions] ( identifier[path] , identifier[mode] = keyword[None] , identifier[uid] = keyword[None] , identifier[gid] = keyword[None] , identifier[mkdir] = keyword[False] ):
literal[string]
keyword[if] identifier[mkdir] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
identifier[os] . identifier[mkdir] ( identifier[path] , identifier[mode] keyword[or] literal[int] )
keyword[elif] identifier[mode] :
identifier[os] . identifier[chmod] ( identifier[path] , identifier[mode] )
keyword[if] identifier[uid] keyword[and] identifier[gid] :
identifier[os] . identifier[chown] ( identifier[path] , identifier[uid] , identifier[gid] )
identifier[_SetSELinuxContext] ( identifier[path] ) | def SetPermissions(path, mode=None, uid=None, gid=None, mkdir=False):
"""Set the permissions and ownership of a path.
Args:
path: string, the path for which owner ID and group ID needs to be setup.
mode: octal string, the permissions to set on the path.
uid: int, the owner ID to be set for the path.
gid: int, the group ID to be set for the path.
mkdir: bool, True if the directory needs to be created.
"""
if mkdir and (not os.path.exists(path)):
os.mkdir(path, mode or 511) # depends on [control=['if'], data=[]]
elif mode:
os.chmod(path, mode) # depends on [control=['if'], data=[]]
if uid and gid:
os.chown(path, uid, gid) # depends on [control=['if'], data=[]]
_SetSELinuxContext(path) |
def serialize_paragraph(ctx, document, par, root, embed=True):
"""Serializes paragraph element.
This is the most important serializer of them all.
"""
style = get_style(document, par)
elem = etree.Element('p')
if ctx.options['embed_styles']:
_style = get_style_css(ctx, par)
if _style != '':
elem.set('style', _style)
else:
_style = ''
if style:
elem.set('class', get_css_classes(document, style))
max_font_size = get_style_fontsize(par)
if style:
max_font_size = _get_font_size(document, style)
for el in par.elements:
_serializer = ctx.get_serializer(el)
if _serializer:
_serializer(ctx, document, el, elem)
if isinstance(el, doc.Text):
children = list(elem)
_text_style = get_style_css(ctx, el)
_text_class = el.rpr.get('style', '').lower()
if _text_class == '':
__s = get_style(document, par)
if __s is not None:
_text_class = get_style_name(__s).lower()
if get_style_fontsize(el) > max_font_size:
max_font_size = get_style_fontsize(el)
if 'superscript' in el.rpr:
new_element = etree.Element('sup')
new_element.text = el.value()
elif 'subscript' in el.rpr:
new_element = etree.Element('sub')
new_element.text = el.value()
elif 'b' in el.rpr or 'i' in el.rpr or 'u' in el.rpr:
new_element = None
_element = None
def _add_formatting(f, new_element, _element):
if f in el.rpr:
_t = etree.Element(f)
if new_element is not None:
_element.append(_t)
_element = _t
else:
new_element = _t
_element = new_element
return new_element, _element
new_element, _element = _add_formatting('b', new_element, _element)
new_element, _element = _add_formatting('i', new_element, _element)
new_element, _element = _add_formatting('u', new_element, _element)
_element.text = el.value()
for comment_id in ctx.opened_comments:
document.comments[comment_id].text += ' ' + el.value()
else:
new_element = etree.Element('span')
new_element.text = el.value()
if ctx.options['embed_styles']:
try:
new_element.set('class', _text_class)
except:
pass
for comment_id in ctx.opened_comments:
if comment_id in document.comments:
document.comments[comment_id].text += ' ' + el.value()
if ctx.options['embed_styles']:
if _text_style != '' and _style != _text_style:
new_element.set('style', _text_style)
# This is for situations when style has options and
# text is trying to unset them
# else:
# new_element.set('class', 'noformat')
was_inserted = False
if len(children) > 0:
_child_style = children[-1].get('style') or ''
_child_class = children[-1].get('class', '')
if new_element.tag == children[-1].tag and ((_text_class == _child_class or _child_class == '') and (_text_style == _child_style or _child_style == '')) and children[-1].tail is None:
txt = children[-1].text or ''
txt2 = new_element.text or ''
children[-1].text = u'{}{}'.format(txt, txt2)
was_inserted = True
if not was_inserted:
if _style == _text_style and new_element.tag == 'span' and (_text_class == _child_class or _child_class == ''):
_e = children[-1]
txt = _e.tail or ''
_e.tail = u'{}{}'.format(txt, new_element.text)
was_inserted = True
if not was_inserted and new_element.tag == 'span' and (_text_class != _child_class):
_e = children[-1]
txt = _e.tail or ''
_e.tail = u'{}{}'.format(txt, new_element.text)
was_inserted = True
if not was_inserted:
_child_class = new_element.get('class', '')
try:
_child_class = children[-1].get('class', '')
except:
_child_class = ''
if _style == _text_style and new_element.tag == 'span' and (_text_class == _child_class):
txt = elem.text or ''
elem.text = u'{}{}'.format(txt, new_element.text)
else:
if new_element.text != u'':
elem.append(new_element)
if not par.is_dropcap() and par.ilvl == None:
if style:
if ctx.header.is_header(par, max_font_size, elem, style=style):
elem.tag = ctx.header.get_header(par, style, elem)
if par.ilvl == None:
root = close_list(ctx, root)
ctx.ilvl, ctx.numid = None, None
if root is not None:
root.append(elem)
fire_hooks(ctx, document, par, elem, ctx.get_hook('h'))
return root
else:
# Commented part where we only checked for heading if font size
# was bigger than default font size. In many cases this did not
# work out well.
# if max_font_size > ctx.header.default_font_size:
if True:
if ctx.header.is_header(par, max_font_size, elem, style=style):
if elem.text != '' and len(list(elem)) != 0:
elem.tag = ctx.header.get_header(par, max_font_size, elem)
if par.ilvl == None:
root = close_list(ctx, root)
ctx.ilvl, ctx.numid = None, None
if root is not None:
root.append(elem)
fire_hooks(ctx, document, par, elem, ctx.get_hook('h'))
return root
if len(list(elem)) == 0 and elem.text is None:
if ctx.options['empty_paragraph_as_nbsp']:
elem.append(etree.Entity('nbsp'))
# Indentation is different. We are starting or closing list.
if par.ilvl != None:
root = open_list(ctx, document, par, root, elem)
return root
else:
root = close_list(ctx, root)
ctx.ilvl, ctx.numid = None, None
# Add new elements to our root element.
if root is not None:
root.append(elem)
fire_hooks(ctx, document, par, elem, ctx.get_hook('p'))
return root | def function[serialize_paragraph, parameter[ctx, document, par, root, embed]]:
constant[Serializes paragraph element.
This is the most important serializer of them all.
]
variable[style] assign[=] call[name[get_style], parameter[name[document], name[par]]]
variable[elem] assign[=] call[name[etree].Element, parameter[constant[p]]]
if call[name[ctx].options][constant[embed_styles]] begin[:]
variable[_style] assign[=] call[name[get_style_css], parameter[name[ctx], name[par]]]
if compare[name[_style] not_equal[!=] constant[]] begin[:]
call[name[elem].set, parameter[constant[style], name[_style]]]
if name[style] begin[:]
call[name[elem].set, parameter[constant[class], call[name[get_css_classes], parameter[name[document], name[style]]]]]
variable[max_font_size] assign[=] call[name[get_style_fontsize], parameter[name[par]]]
if name[style] begin[:]
variable[max_font_size] assign[=] call[name[_get_font_size], parameter[name[document], name[style]]]
for taget[name[el]] in starred[name[par].elements] begin[:]
variable[_serializer] assign[=] call[name[ctx].get_serializer, parameter[name[el]]]
if name[_serializer] begin[:]
call[name[_serializer], parameter[name[ctx], name[document], name[el], name[elem]]]
if call[name[isinstance], parameter[name[el], name[doc].Text]] begin[:]
variable[children] assign[=] call[name[list], parameter[name[elem]]]
variable[_text_style] assign[=] call[name[get_style_css], parameter[name[ctx], name[el]]]
variable[_text_class] assign[=] call[call[name[el].rpr.get, parameter[constant[style], constant[]]].lower, parameter[]]
if compare[name[_text_class] equal[==] constant[]] begin[:]
variable[__s] assign[=] call[name[get_style], parameter[name[document], name[par]]]
if compare[name[__s] is_not constant[None]] begin[:]
variable[_text_class] assign[=] call[call[name[get_style_name], parameter[name[__s]]].lower, parameter[]]
if compare[call[name[get_style_fontsize], parameter[name[el]]] greater[>] name[max_font_size]] begin[:]
variable[max_font_size] assign[=] call[name[get_style_fontsize], parameter[name[el]]]
if compare[constant[superscript] in name[el].rpr] begin[:]
variable[new_element] assign[=] call[name[etree].Element, parameter[constant[sup]]]
name[new_element].text assign[=] call[name[el].value, parameter[]]
if call[name[ctx].options][constant[embed_styles]] begin[:]
if <ast.BoolOp object at 0x7da18dc9aef0> begin[:]
call[name[new_element].set, parameter[constant[style], name[_text_style]]]
variable[was_inserted] assign[=] constant[False]
if compare[call[name[len], parameter[name[children]]] greater[>] constant[0]] begin[:]
variable[_child_style] assign[=] <ast.BoolOp object at 0x7da18dc99810>
variable[_child_class] assign[=] call[call[name[children]][<ast.UnaryOp object at 0x7da18dc9b070>].get, parameter[constant[class], constant[]]]
if <ast.BoolOp object at 0x7da18dc9b6d0> begin[:]
variable[txt] assign[=] <ast.BoolOp object at 0x7da18dc99d50>
variable[txt2] assign[=] <ast.BoolOp object at 0x7da18dc9b250>
call[name[children]][<ast.UnaryOp object at 0x7da18dc984f0>].text assign[=] call[constant[{}{}].format, parameter[name[txt], name[txt2]]]
variable[was_inserted] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da18dc9ad10> begin[:]
if <ast.BoolOp object at 0x7da18dc9a830> begin[:]
variable[_e] assign[=] call[name[children]][<ast.UnaryOp object at 0x7da18dc99300>]
variable[txt] assign[=] <ast.BoolOp object at 0x7da18dc9bd00>
name[_e].tail assign[=] call[constant[{}{}].format, parameter[name[txt], name[new_element].text]]
variable[was_inserted] assign[=] constant[True]
if <ast.BoolOp object at 0x7da18dc9a2f0> begin[:]
variable[_e] assign[=] call[name[children]][<ast.UnaryOp object at 0x7da18dc9a500>]
variable[txt] assign[=] <ast.BoolOp object at 0x7da18dc9a440>
name[_e].tail assign[=] call[constant[{}{}].format, parameter[name[txt], name[new_element].text]]
variable[was_inserted] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da18dc9b0a0> begin[:]
variable[_child_class] assign[=] call[name[new_element].get, parameter[constant[class], constant[]]]
<ast.Try object at 0x7da18dc9b2e0>
if <ast.BoolOp object at 0x7da18dc99ba0> begin[:]
variable[txt] assign[=] <ast.BoolOp object at 0x7da20c7cbd30>
name[elem].text assign[=] call[constant[{}{}].format, parameter[name[txt], name[new_element].text]]
if <ast.BoolOp object at 0x7da20c7c9960> begin[:]
if name[style] begin[:]
if call[name[ctx].header.is_header, parameter[name[par], name[max_font_size], name[elem]]] begin[:]
name[elem].tag assign[=] call[name[ctx].header.get_header, parameter[name[par], name[style], name[elem]]]
if compare[name[par].ilvl equal[==] constant[None]] begin[:]
variable[root] assign[=] call[name[close_list], parameter[name[ctx], name[root]]]
<ast.Tuple object at 0x7da20c7c9f00> assign[=] tuple[[<ast.Constant object at 0x7da20c7cb880>, <ast.Constant object at 0x7da20c7c95d0>]]
if compare[name[root] is_not constant[None]] begin[:]
call[name[root].append, parameter[name[elem]]]
call[name[fire_hooks], parameter[name[ctx], name[document], name[par], name[elem], call[name[ctx].get_hook, parameter[constant[h]]]]]
return[name[root]]
if <ast.BoolOp object at 0x7da20c7c9990> begin[:]
if call[name[ctx].options][constant[empty_paragraph_as_nbsp]] begin[:]
call[name[elem].append, parameter[call[name[etree].Entity, parameter[constant[nbsp]]]]]
if compare[name[par].ilvl not_equal[!=] constant[None]] begin[:]
variable[root] assign[=] call[name[open_list], parameter[name[ctx], name[document], name[par], name[root], name[elem]]]
return[name[root]]
if compare[name[root] is_not constant[None]] begin[:]
call[name[root].append, parameter[name[elem]]]
call[name[fire_hooks], parameter[name[ctx], name[document], name[par], name[elem], call[name[ctx].get_hook, parameter[constant[p]]]]]
return[name[root]] | keyword[def] identifier[serialize_paragraph] ( identifier[ctx] , identifier[document] , identifier[par] , identifier[root] , identifier[embed] = keyword[True] ):
literal[string]
identifier[style] = identifier[get_style] ( identifier[document] , identifier[par] )
identifier[elem] = identifier[etree] . identifier[Element] ( literal[string] )
keyword[if] identifier[ctx] . identifier[options] [ literal[string] ]:
identifier[_style] = identifier[get_style_css] ( identifier[ctx] , identifier[par] )
keyword[if] identifier[_style] != literal[string] :
identifier[elem] . identifier[set] ( literal[string] , identifier[_style] )
keyword[else] :
identifier[_style] = literal[string]
keyword[if] identifier[style] :
identifier[elem] . identifier[set] ( literal[string] , identifier[get_css_classes] ( identifier[document] , identifier[style] ))
identifier[max_font_size] = identifier[get_style_fontsize] ( identifier[par] )
keyword[if] identifier[style] :
identifier[max_font_size] = identifier[_get_font_size] ( identifier[document] , identifier[style] )
keyword[for] identifier[el] keyword[in] identifier[par] . identifier[elements] :
identifier[_serializer] = identifier[ctx] . identifier[get_serializer] ( identifier[el] )
keyword[if] identifier[_serializer] :
identifier[_serializer] ( identifier[ctx] , identifier[document] , identifier[el] , identifier[elem] )
keyword[if] identifier[isinstance] ( identifier[el] , identifier[doc] . identifier[Text] ):
identifier[children] = identifier[list] ( identifier[elem] )
identifier[_text_style] = identifier[get_style_css] ( identifier[ctx] , identifier[el] )
identifier[_text_class] = identifier[el] . identifier[rpr] . identifier[get] ( literal[string] , literal[string] ). identifier[lower] ()
keyword[if] identifier[_text_class] == literal[string] :
identifier[__s] = identifier[get_style] ( identifier[document] , identifier[par] )
keyword[if] identifier[__s] keyword[is] keyword[not] keyword[None] :
identifier[_text_class] = identifier[get_style_name] ( identifier[__s] ). identifier[lower] ()
keyword[if] identifier[get_style_fontsize] ( identifier[el] )> identifier[max_font_size] :
identifier[max_font_size] = identifier[get_style_fontsize] ( identifier[el] )
keyword[if] literal[string] keyword[in] identifier[el] . identifier[rpr] :
identifier[new_element] = identifier[etree] . identifier[Element] ( literal[string] )
identifier[new_element] . identifier[text] = identifier[el] . identifier[value] ()
keyword[elif] literal[string] keyword[in] identifier[el] . identifier[rpr] :
identifier[new_element] = identifier[etree] . identifier[Element] ( literal[string] )
identifier[new_element] . identifier[text] = identifier[el] . identifier[value] ()
keyword[elif] literal[string] keyword[in] identifier[el] . identifier[rpr] keyword[or] literal[string] keyword[in] identifier[el] . identifier[rpr] keyword[or] literal[string] keyword[in] identifier[el] . identifier[rpr] :
identifier[new_element] = keyword[None]
identifier[_element] = keyword[None]
keyword[def] identifier[_add_formatting] ( identifier[f] , identifier[new_element] , identifier[_element] ):
keyword[if] identifier[f] keyword[in] identifier[el] . identifier[rpr] :
identifier[_t] = identifier[etree] . identifier[Element] ( identifier[f] )
keyword[if] identifier[new_element] keyword[is] keyword[not] keyword[None] :
identifier[_element] . identifier[append] ( identifier[_t] )
identifier[_element] = identifier[_t]
keyword[else] :
identifier[new_element] = identifier[_t]
identifier[_element] = identifier[new_element]
keyword[return] identifier[new_element] , identifier[_element]
identifier[new_element] , identifier[_element] = identifier[_add_formatting] ( literal[string] , identifier[new_element] , identifier[_element] )
identifier[new_element] , identifier[_element] = identifier[_add_formatting] ( literal[string] , identifier[new_element] , identifier[_element] )
identifier[new_element] , identifier[_element] = identifier[_add_formatting] ( literal[string] , identifier[new_element] , identifier[_element] )
identifier[_element] . identifier[text] = identifier[el] . identifier[value] ()
keyword[for] identifier[comment_id] keyword[in] identifier[ctx] . identifier[opened_comments] :
identifier[document] . identifier[comments] [ identifier[comment_id] ]. identifier[text] += literal[string] + identifier[el] . identifier[value] ()
keyword[else] :
identifier[new_element] = identifier[etree] . identifier[Element] ( literal[string] )
identifier[new_element] . identifier[text] = identifier[el] . identifier[value] ()
keyword[if] identifier[ctx] . identifier[options] [ literal[string] ]:
keyword[try] :
identifier[new_element] . identifier[set] ( literal[string] , identifier[_text_class] )
keyword[except] :
keyword[pass]
keyword[for] identifier[comment_id] keyword[in] identifier[ctx] . identifier[opened_comments] :
keyword[if] identifier[comment_id] keyword[in] identifier[document] . identifier[comments] :
identifier[document] . identifier[comments] [ identifier[comment_id] ]. identifier[text] += literal[string] + identifier[el] . identifier[value] ()
keyword[if] identifier[ctx] . identifier[options] [ literal[string] ]:
keyword[if] identifier[_text_style] != literal[string] keyword[and] identifier[_style] != identifier[_text_style] :
identifier[new_element] . identifier[set] ( literal[string] , identifier[_text_style] )
identifier[was_inserted] = keyword[False]
keyword[if] identifier[len] ( identifier[children] )> literal[int] :
identifier[_child_style] = identifier[children] [- literal[int] ]. identifier[get] ( literal[string] ) keyword[or] literal[string]
identifier[_child_class] = identifier[children] [- literal[int] ]. identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[new_element] . identifier[tag] == identifier[children] [- literal[int] ]. identifier[tag] keyword[and] (( identifier[_text_class] == identifier[_child_class] keyword[or] identifier[_child_class] == literal[string] ) keyword[and] ( identifier[_text_style] == identifier[_child_style] keyword[or] identifier[_child_style] == literal[string] )) keyword[and] identifier[children] [- literal[int] ]. identifier[tail] keyword[is] keyword[None] :
identifier[txt] = identifier[children] [- literal[int] ]. identifier[text] keyword[or] literal[string]
identifier[txt2] = identifier[new_element] . identifier[text] keyword[or] literal[string]
identifier[children] [- literal[int] ]. identifier[text] = literal[string] . identifier[format] ( identifier[txt] , identifier[txt2] )
identifier[was_inserted] = keyword[True]
keyword[if] keyword[not] identifier[was_inserted] :
keyword[if] identifier[_style] == identifier[_text_style] keyword[and] identifier[new_element] . identifier[tag] == literal[string] keyword[and] ( identifier[_text_class] == identifier[_child_class] keyword[or] identifier[_child_class] == literal[string] ):
identifier[_e] = identifier[children] [- literal[int] ]
identifier[txt] = identifier[_e] . identifier[tail] keyword[or] literal[string]
identifier[_e] . identifier[tail] = literal[string] . identifier[format] ( identifier[txt] , identifier[new_element] . identifier[text] )
identifier[was_inserted] = keyword[True]
keyword[if] keyword[not] identifier[was_inserted] keyword[and] identifier[new_element] . identifier[tag] == literal[string] keyword[and] ( identifier[_text_class] != identifier[_child_class] ):
identifier[_e] = identifier[children] [- literal[int] ]
identifier[txt] = identifier[_e] . identifier[tail] keyword[or] literal[string]
identifier[_e] . identifier[tail] = literal[string] . identifier[format] ( identifier[txt] , identifier[new_element] . identifier[text] )
identifier[was_inserted] = keyword[True]
keyword[if] keyword[not] identifier[was_inserted] :
identifier[_child_class] = identifier[new_element] . identifier[get] ( literal[string] , literal[string] )
keyword[try] :
identifier[_child_class] = identifier[children] [- literal[int] ]. identifier[get] ( literal[string] , literal[string] )
keyword[except] :
identifier[_child_class] = literal[string]
keyword[if] identifier[_style] == identifier[_text_style] keyword[and] identifier[new_element] . identifier[tag] == literal[string] keyword[and] ( identifier[_text_class] == identifier[_child_class] ):
identifier[txt] = identifier[elem] . identifier[text] keyword[or] literal[string]
identifier[elem] . identifier[text] = literal[string] . identifier[format] ( identifier[txt] , identifier[new_element] . identifier[text] )
keyword[else] :
keyword[if] identifier[new_element] . identifier[text] != literal[string] :
identifier[elem] . identifier[append] ( identifier[new_element] )
keyword[if] keyword[not] identifier[par] . identifier[is_dropcap] () keyword[and] identifier[par] . identifier[ilvl] == keyword[None] :
keyword[if] identifier[style] :
keyword[if] identifier[ctx] . identifier[header] . identifier[is_header] ( identifier[par] , identifier[max_font_size] , identifier[elem] , identifier[style] = identifier[style] ):
identifier[elem] . identifier[tag] = identifier[ctx] . identifier[header] . identifier[get_header] ( identifier[par] , identifier[style] , identifier[elem] )
keyword[if] identifier[par] . identifier[ilvl] == keyword[None] :
identifier[root] = identifier[close_list] ( identifier[ctx] , identifier[root] )
identifier[ctx] . identifier[ilvl] , identifier[ctx] . identifier[numid] = keyword[None] , keyword[None]
keyword[if] identifier[root] keyword[is] keyword[not] keyword[None] :
identifier[root] . identifier[append] ( identifier[elem] )
identifier[fire_hooks] ( identifier[ctx] , identifier[document] , identifier[par] , identifier[elem] , identifier[ctx] . identifier[get_hook] ( literal[string] ))
keyword[return] identifier[root]
keyword[else] :
keyword[if] keyword[True] :
keyword[if] identifier[ctx] . identifier[header] . identifier[is_header] ( identifier[par] , identifier[max_font_size] , identifier[elem] , identifier[style] = identifier[style] ):
keyword[if] identifier[elem] . identifier[text] != literal[string] keyword[and] identifier[len] ( identifier[list] ( identifier[elem] ))!= literal[int] :
identifier[elem] . identifier[tag] = identifier[ctx] . identifier[header] . identifier[get_header] ( identifier[par] , identifier[max_font_size] , identifier[elem] )
keyword[if] identifier[par] . identifier[ilvl] == keyword[None] :
identifier[root] = identifier[close_list] ( identifier[ctx] , identifier[root] )
identifier[ctx] . identifier[ilvl] , identifier[ctx] . identifier[numid] = keyword[None] , keyword[None]
keyword[if] identifier[root] keyword[is] keyword[not] keyword[None] :
identifier[root] . identifier[append] ( identifier[elem] )
identifier[fire_hooks] ( identifier[ctx] , identifier[document] , identifier[par] , identifier[elem] , identifier[ctx] . identifier[get_hook] ( literal[string] ))
keyword[return] identifier[root]
keyword[if] identifier[len] ( identifier[list] ( identifier[elem] ))== literal[int] keyword[and] identifier[elem] . identifier[text] keyword[is] keyword[None] :
keyword[if] identifier[ctx] . identifier[options] [ literal[string] ]:
identifier[elem] . identifier[append] ( identifier[etree] . identifier[Entity] ( literal[string] ))
keyword[if] identifier[par] . identifier[ilvl] != keyword[None] :
identifier[root] = identifier[open_list] ( identifier[ctx] , identifier[document] , identifier[par] , identifier[root] , identifier[elem] )
keyword[return] identifier[root]
keyword[else] :
identifier[root] = identifier[close_list] ( identifier[ctx] , identifier[root] )
identifier[ctx] . identifier[ilvl] , identifier[ctx] . identifier[numid] = keyword[None] , keyword[None]
keyword[if] identifier[root] keyword[is] keyword[not] keyword[None] :
identifier[root] . identifier[append] ( identifier[elem] )
identifier[fire_hooks] ( identifier[ctx] , identifier[document] , identifier[par] , identifier[elem] , identifier[ctx] . identifier[get_hook] ( literal[string] ))
keyword[return] identifier[root] | def serialize_paragraph(ctx, document, par, root, embed=True):
"""Serializes paragraph element.
This is the most important serializer of them all.
"""
style = get_style(document, par)
elem = etree.Element('p')
if ctx.options['embed_styles']:
_style = get_style_css(ctx, par)
if _style != '':
elem.set('style', _style) # depends on [control=['if'], data=['_style']] # depends on [control=['if'], data=[]]
else:
_style = ''
if style:
elem.set('class', get_css_classes(document, style)) # depends on [control=['if'], data=[]]
max_font_size = get_style_fontsize(par)
if style:
max_font_size = _get_font_size(document, style) # depends on [control=['if'], data=[]]
for el in par.elements:
_serializer = ctx.get_serializer(el)
if _serializer:
_serializer(ctx, document, el, elem) # depends on [control=['if'], data=[]]
if isinstance(el, doc.Text):
children = list(elem)
_text_style = get_style_css(ctx, el)
_text_class = el.rpr.get('style', '').lower()
if _text_class == '':
__s = get_style(document, par)
if __s is not None:
_text_class = get_style_name(__s).lower() # depends on [control=['if'], data=['__s']] # depends on [control=['if'], data=['_text_class']]
if get_style_fontsize(el) > max_font_size:
max_font_size = get_style_fontsize(el) # depends on [control=['if'], data=['max_font_size']]
if 'superscript' in el.rpr:
new_element = etree.Element('sup')
new_element.text = el.value() # depends on [control=['if'], data=[]]
elif 'subscript' in el.rpr:
new_element = etree.Element('sub')
new_element.text = el.value() # depends on [control=['if'], data=[]]
elif 'b' in el.rpr or 'i' in el.rpr or 'u' in el.rpr:
new_element = None
_element = None
def _add_formatting(f, new_element, _element):
if f in el.rpr:
_t = etree.Element(f)
if new_element is not None:
_element.append(_t)
_element = _t # depends on [control=['if'], data=[]]
else:
new_element = _t
_element = new_element # depends on [control=['if'], data=['f']]
return (new_element, _element)
(new_element, _element) = _add_formatting('b', new_element, _element)
(new_element, _element) = _add_formatting('i', new_element, _element)
(new_element, _element) = _add_formatting('u', new_element, _element)
_element.text = el.value()
for comment_id in ctx.opened_comments:
document.comments[comment_id].text += ' ' + el.value() # depends on [control=['for'], data=['comment_id']] # depends on [control=['if'], data=[]]
else:
new_element = etree.Element('span')
new_element.text = el.value()
if ctx.options['embed_styles']:
try:
new_element.set('class', _text_class) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
for comment_id in ctx.opened_comments:
if comment_id in document.comments:
document.comments[comment_id].text += ' ' + el.value() # depends on [control=['if'], data=['comment_id']] # depends on [control=['for'], data=['comment_id']]
if ctx.options['embed_styles']:
if _text_style != '' and _style != _text_style:
new_element.set('style', _text_style) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# This is for situations when style has options and
# text is trying to unset them
# else:
# new_element.set('class', 'noformat')
was_inserted = False
if len(children) > 0:
_child_style = children[-1].get('style') or ''
_child_class = children[-1].get('class', '')
if new_element.tag == children[-1].tag and ((_text_class == _child_class or _child_class == '') and (_text_style == _child_style or _child_style == '')) and (children[-1].tail is None):
txt = children[-1].text or ''
txt2 = new_element.text or ''
children[-1].text = u'{}{}'.format(txt, txt2)
was_inserted = True # depends on [control=['if'], data=[]]
if not was_inserted:
if _style == _text_style and new_element.tag == 'span' and (_text_class == _child_class or _child_class == ''):
_e = children[-1]
txt = _e.tail or ''
_e.tail = u'{}{}'.format(txt, new_element.text)
was_inserted = True # depends on [control=['if'], data=[]]
if not was_inserted and new_element.tag == 'span' and (_text_class != _child_class):
_e = children[-1]
txt = _e.tail or ''
_e.tail = u'{}{}'.format(txt, new_element.text)
was_inserted = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not was_inserted:
_child_class = new_element.get('class', '')
try:
_child_class = children[-1].get('class', '') # depends on [control=['try'], data=[]]
except:
_child_class = '' # depends on [control=['except'], data=[]]
if _style == _text_style and new_element.tag == 'span' and (_text_class == _child_class):
txt = elem.text or ''
elem.text = u'{}{}'.format(txt, new_element.text) # depends on [control=['if'], data=[]]
elif new_element.text != u'':
elem.append(new_element) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['el']]
if not par.is_dropcap() and par.ilvl == None:
if style:
if ctx.header.is_header(par, max_font_size, elem, style=style):
elem.tag = ctx.header.get_header(par, style, elem)
if par.ilvl == None:
root = close_list(ctx, root)
(ctx.ilvl, ctx.numid) = (None, None) # depends on [control=['if'], data=[]]
if root is not None:
root.append(elem) # depends on [control=['if'], data=['root']]
fire_hooks(ctx, document, par, elem, ctx.get_hook('h'))
return root # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Commented part where we only checked for heading if font size
# was bigger than default font size. In many cases this did not
# work out well.
# if max_font_size > ctx.header.default_font_size:
elif True:
if ctx.header.is_header(par, max_font_size, elem, style=style):
if elem.text != '' and len(list(elem)) != 0:
elem.tag = ctx.header.get_header(par, max_font_size, elem)
if par.ilvl == None:
root = close_list(ctx, root)
(ctx.ilvl, ctx.numid) = (None, None) # depends on [control=['if'], data=[]]
if root is not None:
root.append(elem) # depends on [control=['if'], data=['root']]
fire_hooks(ctx, document, par, elem, ctx.get_hook('h'))
return root # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if len(list(elem)) == 0 and elem.text is None:
if ctx.options['empty_paragraph_as_nbsp']:
elem.append(etree.Entity('nbsp')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Indentation is different. We are starting or closing list.
if par.ilvl != None:
root = open_list(ctx, document, par, root, elem)
return root # depends on [control=['if'], data=[]]
else:
root = close_list(ctx, root)
(ctx.ilvl, ctx.numid) = (None, None)
# Add new elements to our root element.
if root is not None:
root.append(elem) # depends on [control=['if'], data=['root']]
fire_hooks(ctx, document, par, elem, ctx.get_hook('p'))
return root |
def convert_join(value):
"""
Fix a Join ;)
"""
if not isinstance(value, list) or len(value) != 2:
# Cowardly refuse
return value
sep, parts = value[0], value[1]
if isinstance(parts, six.string_types):
return parts
if not isinstance(parts, list):
# This looks tricky, just return the join as it was
return {
"Fn::Join": value,
}
plain_string = True
args = ODict()
new_parts = []
for part in parts:
part = clean(part)
if isinstance(part, dict):
plain_string = False
if "Ref" in part:
new_parts.append("${{{}}}".format(part["Ref"]))
elif "Fn::GetAtt" in part:
params = part["Fn::GetAtt"]
new_parts.append("${{{}}}".format(".".join(params)))
else:
for key, val in args.items():
# we want to bail if a conditional can evaluate to AWS::NoValue
if isinstance(val, dict):
if "Fn::If" in val and "AWS::NoValue" in str(val["Fn::If"]):
return {
"Fn::Join": value,
}
if val == part:
param_name = key
break
else:
param_name = "Param{}".format(len(args) + 1)
args[param_name] = part
new_parts.append("${{{}}}".format(param_name))
elif isinstance(part, six.string_types):
new_parts.append(part.replace("${", "${!"))
else:
# Doing something weird; refuse
return {
"Fn::Join": value
}
source = sep.join(new_parts)
if plain_string:
return source
if args:
return ODict((
("Fn::Sub", [source, args]),
))
return ODict((
("Fn::Sub", source),
)) | def function[convert_join, parameter[value]]:
constant[
Fix a Join ;)
]
if <ast.BoolOp object at 0x7da1b16abc40> begin[:]
return[name[value]]
<ast.Tuple object at 0x7da1b16a8970> assign[=] tuple[[<ast.Subscript object at 0x7da1b16a9780>, <ast.Subscript object at 0x7da1b16a84c0>]]
if call[name[isinstance], parameter[name[parts], name[six].string_types]] begin[:]
return[name[parts]]
if <ast.UnaryOp object at 0x7da1b16ab8b0> begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b16a8190>], [<ast.Name object at 0x7da18bc71270>]]]
variable[plain_string] assign[=] constant[True]
variable[args] assign[=] call[name[ODict], parameter[]]
variable[new_parts] assign[=] list[[]]
for taget[name[part]] in starred[name[parts]] begin[:]
variable[part] assign[=] call[name[clean], parameter[name[part]]]
if call[name[isinstance], parameter[name[part], name[dict]]] begin[:]
variable[plain_string] assign[=] constant[False]
if compare[constant[Ref] in name[part]] begin[:]
call[name[new_parts].append, parameter[call[constant[${{{}}}].format, parameter[call[name[part]][constant[Ref]]]]]]
variable[source] assign[=] call[name[sep].join, parameter[name[new_parts]]]
if name[plain_string] begin[:]
return[name[source]]
if name[args] begin[:]
return[call[name[ODict], parameter[tuple[[<ast.Tuple object at 0x7da18c4cf850>]]]]]
return[call[name[ODict], parameter[tuple[[<ast.Tuple object at 0x7da18c4cd810>]]]]] | keyword[def] identifier[convert_join] ( identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[list] ) keyword[or] identifier[len] ( identifier[value] )!= literal[int] :
keyword[return] identifier[value]
identifier[sep] , identifier[parts] = identifier[value] [ literal[int] ], identifier[value] [ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[parts] , identifier[six] . identifier[string_types] ):
keyword[return] identifier[parts]
keyword[if] keyword[not] identifier[isinstance] ( identifier[parts] , identifier[list] ):
keyword[return] {
literal[string] : identifier[value] ,
}
identifier[plain_string] = keyword[True]
identifier[args] = identifier[ODict] ()
identifier[new_parts] =[]
keyword[for] identifier[part] keyword[in] identifier[parts] :
identifier[part] = identifier[clean] ( identifier[part] )
keyword[if] identifier[isinstance] ( identifier[part] , identifier[dict] ):
identifier[plain_string] = keyword[False]
keyword[if] literal[string] keyword[in] identifier[part] :
identifier[new_parts] . identifier[append] ( literal[string] . identifier[format] ( identifier[part] [ literal[string] ]))
keyword[elif] literal[string] keyword[in] identifier[part] :
identifier[params] = identifier[part] [ literal[string] ]
identifier[new_parts] . identifier[append] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[params] )))
keyword[else] :
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[args] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[val] , identifier[dict] ):
keyword[if] literal[string] keyword[in] identifier[val] keyword[and] literal[string] keyword[in] identifier[str] ( identifier[val] [ literal[string] ]):
keyword[return] {
literal[string] : identifier[value] ,
}
keyword[if] identifier[val] == identifier[part] :
identifier[param_name] = identifier[key]
keyword[break]
keyword[else] :
identifier[param_name] = literal[string] . identifier[format] ( identifier[len] ( identifier[args] )+ literal[int] )
identifier[args] [ identifier[param_name] ]= identifier[part]
identifier[new_parts] . identifier[append] ( literal[string] . identifier[format] ( identifier[param_name] ))
keyword[elif] identifier[isinstance] ( identifier[part] , identifier[six] . identifier[string_types] ):
identifier[new_parts] . identifier[append] ( identifier[part] . identifier[replace] ( literal[string] , literal[string] ))
keyword[else] :
keyword[return] {
literal[string] : identifier[value]
}
identifier[source] = identifier[sep] . identifier[join] ( identifier[new_parts] )
keyword[if] identifier[plain_string] :
keyword[return] identifier[source]
keyword[if] identifier[args] :
keyword[return] identifier[ODict] ((
( literal[string] ,[ identifier[source] , identifier[args] ]),
))
keyword[return] identifier[ODict] ((
( literal[string] , identifier[source] ),
)) | def convert_join(value):
"""
Fix a Join ;)
"""
if not isinstance(value, list) or len(value) != 2:
# Cowardly refuse
return value # depends on [control=['if'], data=[]]
(sep, parts) = (value[0], value[1])
if isinstance(parts, six.string_types):
return parts # depends on [control=['if'], data=[]]
if not isinstance(parts, list):
# This looks tricky, just return the join as it was
return {'Fn::Join': value} # depends on [control=['if'], data=[]]
plain_string = True
args = ODict()
new_parts = []
for part in parts:
part = clean(part)
if isinstance(part, dict):
plain_string = False
if 'Ref' in part:
new_parts.append('${{{}}}'.format(part['Ref'])) # depends on [control=['if'], data=['part']]
elif 'Fn::GetAtt' in part:
params = part['Fn::GetAtt']
new_parts.append('${{{}}}'.format('.'.join(params))) # depends on [control=['if'], data=['part']]
else:
for (key, val) in args.items():
# we want to bail if a conditional can evaluate to AWS::NoValue
if isinstance(val, dict):
if 'Fn::If' in val and 'AWS::NoValue' in str(val['Fn::If']):
return {'Fn::Join': value} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if val == part:
param_name = key
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
param_name = 'Param{}'.format(len(args) + 1)
args[param_name] = part
new_parts.append('${{{}}}'.format(param_name)) # depends on [control=['if'], data=[]]
elif isinstance(part, six.string_types):
new_parts.append(part.replace('${', '${!')) # depends on [control=['if'], data=[]]
else:
# Doing something weird; refuse
return {'Fn::Join': value} # depends on [control=['for'], data=['part']]
source = sep.join(new_parts)
if plain_string:
return source # depends on [control=['if'], data=[]]
if args:
return ODict((('Fn::Sub', [source, args]),)) # depends on [control=['if'], data=[]]
return ODict((('Fn::Sub', source),)) |
def write_string(self, string):
"""Write a string to this packet."""
self.write_uint16(len(string))
self.write_bytes(string, len(string)) | def function[write_string, parameter[self, string]]:
constant[Write a string to this packet.]
call[name[self].write_uint16, parameter[call[name[len], parameter[name[string]]]]]
call[name[self].write_bytes, parameter[name[string], call[name[len], parameter[name[string]]]]] | keyword[def] identifier[write_string] ( identifier[self] , identifier[string] ):
literal[string]
identifier[self] . identifier[write_uint16] ( identifier[len] ( identifier[string] ))
identifier[self] . identifier[write_bytes] ( identifier[string] , identifier[len] ( identifier[string] )) | def write_string(self, string):
"""Write a string to this packet."""
self.write_uint16(len(string))
self.write_bytes(string, len(string)) |
def collect_yarn_application_diagnostics(self, *application_ids):
"""
DEPRECATED: use create_yarn_application_diagnostics_bundle on the Yarn service. Deprecated since v10.
Collects the Diagnostics data for Yarn applications.
@param application_ids: An array of strings containing the ids of the
yarn applications.
@return: Reference to the submitted command.
@since: API v8
"""
args = dict(applicationIds = application_ids)
return self._cmd('yarnApplicationDiagnosticsCollection', api_version=8, data=args) | def function[collect_yarn_application_diagnostics, parameter[self]]:
constant[
DEPRECATED: use create_yarn_application_diagnostics_bundle on the Yarn service. Deprecated since v10.
Collects the Diagnostics data for Yarn applications.
@param application_ids: An array of strings containing the ids of the
yarn applications.
@return: Reference to the submitted command.
@since: API v8
]
variable[args] assign[=] call[name[dict], parameter[]]
return[call[name[self]._cmd, parameter[constant[yarnApplicationDiagnosticsCollection]]]] | keyword[def] identifier[collect_yarn_application_diagnostics] ( identifier[self] ,* identifier[application_ids] ):
literal[string]
identifier[args] = identifier[dict] ( identifier[applicationIds] = identifier[application_ids] )
keyword[return] identifier[self] . identifier[_cmd] ( literal[string] , identifier[api_version] = literal[int] , identifier[data] = identifier[args] ) | def collect_yarn_application_diagnostics(self, *application_ids):
"""
DEPRECATED: use create_yarn_application_diagnostics_bundle on the Yarn service. Deprecated since v10.
Collects the Diagnostics data for Yarn applications.
@param application_ids: An array of strings containing the ids of the
yarn applications.
@return: Reference to the submitted command.
@since: API v8
"""
args = dict(applicationIds=application_ids)
return self._cmd('yarnApplicationDiagnosticsCollection', api_version=8, data=args) |
def find_analyses(ar_or_sample):
""" This function is used to find keywords that are not on the analysis
but keywords that are on the interim fields.
This function and is is_keyword function should probably be in
resultsimport.py or somewhere central where it can be used by other
instrument interfaces.
"""
bc = api.get_tool(CATALOG_ANALYSIS_REQUEST_LISTING)
ar = bc(portal_type='AnalysisRequest', id=ar_or_sample)
if len(ar) == 0:
ar = bc(portal_type='AnalysisRequest', getClientSampleID=ar_or_sample)
if len(ar) == 1:
obj = ar[0].getObject()
analyses = obj.getAnalyses(full_objects=True)
return analyses
return [] | def function[find_analyses, parameter[ar_or_sample]]:
constant[ This function is used to find keywords that are not on the analysis
but keywords that are on the interim fields.
This function and is is_keyword function should probably be in
resultsimport.py or somewhere central where it can be used by other
instrument interfaces.
]
variable[bc] assign[=] call[name[api].get_tool, parameter[name[CATALOG_ANALYSIS_REQUEST_LISTING]]]
variable[ar] assign[=] call[name[bc], parameter[]]
if compare[call[name[len], parameter[name[ar]]] equal[==] constant[0]] begin[:]
variable[ar] assign[=] call[name[bc], parameter[]]
if compare[call[name[len], parameter[name[ar]]] equal[==] constant[1]] begin[:]
variable[obj] assign[=] call[call[name[ar]][constant[0]].getObject, parameter[]]
variable[analyses] assign[=] call[name[obj].getAnalyses, parameter[]]
return[name[analyses]]
return[list[[]]] | keyword[def] identifier[find_analyses] ( identifier[ar_or_sample] ):
literal[string]
identifier[bc] = identifier[api] . identifier[get_tool] ( identifier[CATALOG_ANALYSIS_REQUEST_LISTING] )
identifier[ar] = identifier[bc] ( identifier[portal_type] = literal[string] , identifier[id] = identifier[ar_or_sample] )
keyword[if] identifier[len] ( identifier[ar] )== literal[int] :
identifier[ar] = identifier[bc] ( identifier[portal_type] = literal[string] , identifier[getClientSampleID] = identifier[ar_or_sample] )
keyword[if] identifier[len] ( identifier[ar] )== literal[int] :
identifier[obj] = identifier[ar] [ literal[int] ]. identifier[getObject] ()
identifier[analyses] = identifier[obj] . identifier[getAnalyses] ( identifier[full_objects] = keyword[True] )
keyword[return] identifier[analyses]
keyword[return] [] | def find_analyses(ar_or_sample):
""" This function is used to find keywords that are not on the analysis
but keywords that are on the interim fields.
This function and is is_keyword function should probably be in
resultsimport.py or somewhere central where it can be used by other
instrument interfaces.
"""
bc = api.get_tool(CATALOG_ANALYSIS_REQUEST_LISTING)
ar = bc(portal_type='AnalysisRequest', id=ar_or_sample)
if len(ar) == 0:
ar = bc(portal_type='AnalysisRequest', getClientSampleID=ar_or_sample) # depends on [control=['if'], data=[]]
if len(ar) == 1:
obj = ar[0].getObject()
analyses = obj.getAnalyses(full_objects=True)
return analyses # depends on [control=['if'], data=[]]
return [] |
def AdditiveGaussianNoise(loc=0, scale=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Add gaussian noise (aka white noise) to images.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the normal distribution that generates the noise.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the normal distribution that generates the noise.
Must be ``>= 0``. If 0 then only `loc` will be used.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images.
>>> aug = iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))
adds gaussian noise from the distribution ``N(0, s)`` to images,
where s is sampled per image from the range ``0 <= s <= 0.1*255``.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=True)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=0.5)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent).
"""
loc2 = iap.handle_continuous_param(loc, "loc", value_range=None, tuple_to_uniform=True, list_to_choice=True)
scale2 = iap.handle_continuous_param(scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return AddElementwise(iap.Normal(loc=loc2, scale=scale2), per_channel=per_channel, name=name,
deterministic=deterministic, random_state=random_state) | def function[AdditiveGaussianNoise, parameter[loc, scale, per_channel, name, deterministic, random_state]]:
constant[
Add gaussian noise (aka white noise) to images.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the normal distribution that generates the noise.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the normal distribution that generates the noise.
Must be ``>= 0``. If 0 then only `loc` will be used.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images.
>>> aug = iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))
adds gaussian noise from the distribution ``N(0, s)`` to images,
where s is sampled per image from the range ``0 <= s <= 0.1*255``.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=True)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=0.5)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent).
]
variable[loc2] assign[=] call[name[iap].handle_continuous_param, parameter[name[loc], constant[loc]]]
variable[scale2] assign[=] call[name[iap].handle_continuous_param, parameter[name[scale], constant[scale]]]
if compare[name[name] is constant[None]] begin[:]
variable[name] assign[=] binary_operation[constant[Unnamed%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b025eb90>]]]
return[call[name[AddElementwise], parameter[call[name[iap].Normal, parameter[]]]]] | keyword[def] identifier[AdditiveGaussianNoise] ( identifier[loc] = literal[int] , identifier[scale] = literal[int] , identifier[per_channel] = keyword[False] , identifier[name] = keyword[None] , identifier[deterministic] = keyword[False] , identifier[random_state] = keyword[None] ):
literal[string]
identifier[loc2] = identifier[iap] . identifier[handle_continuous_param] ( identifier[loc] , literal[string] , identifier[value_range] = keyword[None] , identifier[tuple_to_uniform] = keyword[True] , identifier[list_to_choice] = keyword[True] )
identifier[scale2] = identifier[iap] . identifier[handle_continuous_param] ( identifier[scale] , literal[string] , identifier[value_range] =( literal[int] , keyword[None] ), identifier[tuple_to_uniform] = keyword[True] ,
identifier[list_to_choice] = keyword[True] )
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[name] = literal[string] %( identifier[ia] . identifier[caller_name] (),)
keyword[return] identifier[AddElementwise] ( identifier[iap] . identifier[Normal] ( identifier[loc] = identifier[loc2] , identifier[scale] = identifier[scale2] ), identifier[per_channel] = identifier[per_channel] , identifier[name] = identifier[name] ,
identifier[deterministic] = identifier[deterministic] , identifier[random_state] = identifier[random_state] ) | def AdditiveGaussianNoise(loc=0, scale=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Add gaussian noise (aka white noise) to images.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the normal distribution that generates the noise.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the normal distribution that generates the noise.
Must be ``>= 0``. If 0 then only `loc` will be used.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images.
>>> aug = iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))
adds gaussian noise from the distribution ``N(0, s)`` to images,
where s is sampled per image from the range ``0 <= s <= 0.1*255``.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=True)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=0.5)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent).
"""
loc2 = iap.handle_continuous_param(loc, 'loc', value_range=None, tuple_to_uniform=True, list_to_choice=True)
scale2 = iap.handle_continuous_param(scale, 'scale', value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)
if name is None:
name = 'Unnamed%s' % (ia.caller_name(),) # depends on [control=['if'], data=['name']]
return AddElementwise(iap.Normal(loc=loc2, scale=scale2), per_channel=per_channel, name=name, deterministic=deterministic, random_state=random_state) |
def signature_cert_chain_url(url):
"""Validate URL specified by SignatureCertChainUrl.
See `validate.request` for additional info.
Args:
url: str. SignatureCertChainUrl header value sent by request.
Returns:
bool: True if valid, False otherwise.
"""
r = urlparse(url)
if not r.scheme.lower() == 'https':
warnings.warn('Certificate URL scheme is invalid.')
return False
if not r.hostname.lower() == 's3.amazonaws.com':
warnings.warn('Certificate URL hostname is invalid.')
return False
if not os.path.normpath(r.path).startswith('/echo.api/'):
warnings.warn('Certificate URL path is invalid.')
return False
if r.port and not r.port == 443:
warnings.warn('Certificate URL port is invalid.')
return False
return True | def function[signature_cert_chain_url, parameter[url]]:
constant[Validate URL specified by SignatureCertChainUrl.
See `validate.request` for additional info.
Args:
url: str. SignatureCertChainUrl header value sent by request.
Returns:
bool: True if valid, False otherwise.
]
variable[r] assign[=] call[name[urlparse], parameter[name[url]]]
if <ast.UnaryOp object at 0x7da18bc70070> begin[:]
call[name[warnings].warn, parameter[constant[Certificate URL scheme is invalid.]]]
return[constant[False]]
if <ast.UnaryOp object at 0x7da18bc73100> begin[:]
call[name[warnings].warn, parameter[constant[Certificate URL hostname is invalid.]]]
return[constant[False]]
if <ast.UnaryOp object at 0x7da18bc719c0> begin[:]
call[name[warnings].warn, parameter[constant[Certificate URL path is invalid.]]]
return[constant[False]]
if <ast.BoolOp object at 0x7da18c4cef20> begin[:]
call[name[warnings].warn, parameter[constant[Certificate URL port is invalid.]]]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[signature_cert_chain_url] ( identifier[url] ):
literal[string]
identifier[r] = identifier[urlparse] ( identifier[url] )
keyword[if] keyword[not] identifier[r] . identifier[scheme] . identifier[lower] ()== literal[string] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[r] . identifier[hostname] . identifier[lower] ()== literal[string] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[normpath] ( identifier[r] . identifier[path] ). identifier[startswith] ( literal[string] ):
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[return] keyword[False]
keyword[if] identifier[r] . identifier[port] keyword[and] keyword[not] identifier[r] . identifier[port] == literal[int] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[return] keyword[False]
keyword[return] keyword[True] | def signature_cert_chain_url(url):
"""Validate URL specified by SignatureCertChainUrl.
See `validate.request` for additional info.
Args:
url: str. SignatureCertChainUrl header value sent by request.
Returns:
bool: True if valid, False otherwise.
"""
r = urlparse(url)
if not r.scheme.lower() == 'https':
warnings.warn('Certificate URL scheme is invalid.')
return False # depends on [control=['if'], data=[]]
if not r.hostname.lower() == 's3.amazonaws.com':
warnings.warn('Certificate URL hostname is invalid.')
return False # depends on [control=['if'], data=[]]
if not os.path.normpath(r.path).startswith('/echo.api/'):
warnings.warn('Certificate URL path is invalid.')
return False # depends on [control=['if'], data=[]]
if r.port and (not r.port == 443):
warnings.warn('Certificate URL port is invalid.')
return False # depends on [control=['if'], data=[]]
return True |
def validate_payment_form(self):
"""Try to validate and then process the DirectPayment form."""
warn_untested()
form = self.payment_form_cls(self.request.POST)
if form.is_valid():
success = form.process(self.request, self.item)
if success:
return HttpResponseRedirect(self.success_url)
else:
self.context['errors'] = self.errors['processing']
self.context[self.form_context_name] = form
self.context.setdefault("errors", self.errors['form'])
return TemplateResponse(self.request, self.payment_template, self.context) | def function[validate_payment_form, parameter[self]]:
constant[Try to validate and then process the DirectPayment form.]
call[name[warn_untested], parameter[]]
variable[form] assign[=] call[name[self].payment_form_cls, parameter[name[self].request.POST]]
if call[name[form].is_valid, parameter[]] begin[:]
variable[success] assign[=] call[name[form].process, parameter[name[self].request, name[self].item]]
if name[success] begin[:]
return[call[name[HttpResponseRedirect], parameter[name[self].success_url]]]
call[name[self].context][name[self].form_context_name] assign[=] name[form]
call[name[self].context.setdefault, parameter[constant[errors], call[name[self].errors][constant[form]]]]
return[call[name[TemplateResponse], parameter[name[self].request, name[self].payment_template, name[self].context]]] | keyword[def] identifier[validate_payment_form] ( identifier[self] ):
literal[string]
identifier[warn_untested] ()
identifier[form] = identifier[self] . identifier[payment_form_cls] ( identifier[self] . identifier[request] . identifier[POST] )
keyword[if] identifier[form] . identifier[is_valid] ():
identifier[success] = identifier[form] . identifier[process] ( identifier[self] . identifier[request] , identifier[self] . identifier[item] )
keyword[if] identifier[success] :
keyword[return] identifier[HttpResponseRedirect] ( identifier[self] . identifier[success_url] )
keyword[else] :
identifier[self] . identifier[context] [ literal[string] ]= identifier[self] . identifier[errors] [ literal[string] ]
identifier[self] . identifier[context] [ identifier[self] . identifier[form_context_name] ]= identifier[form]
identifier[self] . identifier[context] . identifier[setdefault] ( literal[string] , identifier[self] . identifier[errors] [ literal[string] ])
keyword[return] identifier[TemplateResponse] ( identifier[self] . identifier[request] , identifier[self] . identifier[payment_template] , identifier[self] . identifier[context] ) | def validate_payment_form(self):
"""Try to validate and then process the DirectPayment form."""
warn_untested()
form = self.payment_form_cls(self.request.POST)
if form.is_valid():
success = form.process(self.request, self.item)
if success:
return HttpResponseRedirect(self.success_url) # depends on [control=['if'], data=[]]
else:
self.context['errors'] = self.errors['processing'] # depends on [control=['if'], data=[]]
self.context[self.form_context_name] = form
self.context.setdefault('errors', self.errors['form'])
return TemplateResponse(self.request, self.payment_template, self.context) |
def create_producer(self):
"""Context manager that yields an instance of ``Producer``."""
with current_celery_app.pool.acquire(block=True) as conn:
yield Producer(
conn,
exchange=self.mq_exchange,
routing_key=self.mq_routing_key,
auto_declare=True,
) | def function[create_producer, parameter[self]]:
constant[Context manager that yields an instance of ``Producer``.]
with call[name[current_celery_app].pool.acquire, parameter[]] begin[:]
<ast.Yield object at 0x7da1b257f040> | keyword[def] identifier[create_producer] ( identifier[self] ):
literal[string]
keyword[with] identifier[current_celery_app] . identifier[pool] . identifier[acquire] ( identifier[block] = keyword[True] ) keyword[as] identifier[conn] :
keyword[yield] identifier[Producer] (
identifier[conn] ,
identifier[exchange] = identifier[self] . identifier[mq_exchange] ,
identifier[routing_key] = identifier[self] . identifier[mq_routing_key] ,
identifier[auto_declare] = keyword[True] ,
) | def create_producer(self):
"""Context manager that yields an instance of ``Producer``."""
with current_celery_app.pool.acquire(block=True) as conn:
yield Producer(conn, exchange=self.mq_exchange, routing_key=self.mq_routing_key, auto_declare=True) # depends on [control=['with'], data=['conn']] |
def create_readme(data):
"""
Create README.md text for the given module data.
"""
out = ['<a name="top"></a>Modules\n========\n\n']
# Links
for module in sorted(data.keys()):
desc = "".join(data[module]).strip().split("\n")[0]
format_str = "\n**[{name}](#{name})** — {desc}\n"
out.append(format_str.format(name=module, desc=desc))
# details
for module in sorted(data.keys()):
out.append(
'\n---\n\n### <a name="{name}"></a>{name}\n\n{details}\n'.format(
name=module, details="".join(data[module]).strip()
)
)
return "".join(out) | def function[create_readme, parameter[data]]:
constant[
Create README.md text for the given module data.
]
variable[out] assign[=] list[[<ast.Constant object at 0x7da204963190>]]
for taget[name[module]] in starred[call[name[sorted], parameter[call[name[data].keys, parameter[]]]]] begin[:]
variable[desc] assign[=] call[call[call[call[constant[].join, parameter[call[name[data]][name[module]]]].strip, parameter[]].split, parameter[constant[
]]]][constant[0]]
variable[format_str] assign[=] constant[
**[{name}](#{name})** — {desc}
]
call[name[out].append, parameter[call[name[format_str].format, parameter[]]]]
for taget[name[module]] in starred[call[name[sorted], parameter[call[name[data].keys, parameter[]]]]] begin[:]
call[name[out].append, parameter[call[constant[
---
### <a name="{name}"></a>{name}
{details}
].format, parameter[]]]]
return[call[constant[].join, parameter[name[out]]]] | keyword[def] identifier[create_readme] ( identifier[data] ):
literal[string]
identifier[out] =[ literal[string] ]
keyword[for] identifier[module] keyword[in] identifier[sorted] ( identifier[data] . identifier[keys] ()):
identifier[desc] = literal[string] . identifier[join] ( identifier[data] [ identifier[module] ]). identifier[strip] (). identifier[split] ( literal[string] )[ literal[int] ]
identifier[format_str] = literal[string]
identifier[out] . identifier[append] ( identifier[format_str] . identifier[format] ( identifier[name] = identifier[module] , identifier[desc] = identifier[desc] ))
keyword[for] identifier[module] keyword[in] identifier[sorted] ( identifier[data] . identifier[keys] ()):
identifier[out] . identifier[append] (
literal[string] . identifier[format] (
identifier[name] = identifier[module] , identifier[details] = literal[string] . identifier[join] ( identifier[data] [ identifier[module] ]). identifier[strip] ()
)
)
keyword[return] literal[string] . identifier[join] ( identifier[out] ) | def create_readme(data):
"""
Create README.md text for the given module data.
"""
out = ['<a name="top"></a>Modules\n========\n\n']
# Links
for module in sorted(data.keys()):
desc = ''.join(data[module]).strip().split('\n')[0]
format_str = '\n**[{name}](#{name})** — {desc}\n'
out.append(format_str.format(name=module, desc=desc)) # depends on [control=['for'], data=['module']]
# details
for module in sorted(data.keys()):
out.append('\n---\n\n### <a name="{name}"></a>{name}\n\n{details}\n'.format(name=module, details=''.join(data[module]).strip())) # depends on [control=['for'], data=['module']]
return ''.join(out) |
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance') | def function[auto_find_instance_path, parameter[self]]:
constant[Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
]
<ast.Tuple object at 0x7da18bc70580> assign[=] call[name[find_package], parameter[name[self].import_name]]
if compare[name[prefix] is constant[None]] begin[:]
return[call[name[os].path.join, parameter[name[package_path], constant[instance]]]]
return[call[name[os].path.join, parameter[name[prefix], constant[var], binary_operation[name[self].name + constant[-instance]]]]] | keyword[def] identifier[auto_find_instance_path] ( identifier[self] ):
literal[string]
identifier[prefix] , identifier[package_path] = identifier[find_package] ( identifier[self] . identifier[import_name] )
keyword[if] identifier[prefix] keyword[is] keyword[None] :
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[package_path] , literal[string] )
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] , identifier[self] . identifier[name] + literal[string] ) | def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
(prefix, package_path) = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance') # depends on [control=['if'], data=[]]
return os.path.join(prefix, 'var', self.name + '-instance') |
def effective_balance(self, address: Address, block_identifier: BlockSpecification) -> Balance:
""" The user's balance with planned withdrawals deducted. """
fn = getattr(self.proxy.contract.functions, 'effectiveBalance')
balance = fn(address).call(block_identifier=block_identifier)
if balance == b'':
raise RuntimeError(f"Call to 'effectiveBalance' returned nothing")
return balance | def function[effective_balance, parameter[self, address, block_identifier]]:
constant[ The user's balance with planned withdrawals deducted. ]
variable[fn] assign[=] call[name[getattr], parameter[name[self].proxy.contract.functions, constant[effectiveBalance]]]
variable[balance] assign[=] call[call[name[fn], parameter[name[address]]].call, parameter[]]
if compare[name[balance] equal[==] constant[b'']] begin[:]
<ast.Raise object at 0x7da1b194da20>
return[name[balance]] | keyword[def] identifier[effective_balance] ( identifier[self] , identifier[address] : identifier[Address] , identifier[block_identifier] : identifier[BlockSpecification] )-> identifier[Balance] :
literal[string]
identifier[fn] = identifier[getattr] ( identifier[self] . identifier[proxy] . identifier[contract] . identifier[functions] , literal[string] )
identifier[balance] = identifier[fn] ( identifier[address] ). identifier[call] ( identifier[block_identifier] = identifier[block_identifier] )
keyword[if] identifier[balance] == literal[string] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] identifier[balance] | def effective_balance(self, address: Address, block_identifier: BlockSpecification) -> Balance:
""" The user's balance with planned withdrawals deducted. """
fn = getattr(self.proxy.contract.functions, 'effectiveBalance')
balance = fn(address).call(block_identifier=block_identifier)
if balance == b'':
raise RuntimeError(f"Call to 'effectiveBalance' returned nothing") # depends on [control=['if'], data=[]]
return balance |
def _get_parsed_url(url):
# type: (S) -> Url
"""
This is a stand-in function for `urllib3.util.parse_url`
The orignal function doesn't handle special characters very well, this simply splits
out the authentication section, creates the parsed url, then puts the authentication
section back in, bypassing validation.
:return: The new, parsed URL object
:rtype: :class:`~urllib3.util.url.Url`
"""
try:
parsed = urllib3_parse(url)
except ValueError:
scheme, _, url = url.partition("://")
auth, _, url = url.rpartition("@")
url = "{scheme}://{url}".format(scheme=scheme, url=url)
parsed = urllib3_parse(url)._replace(auth=auth)
return parsed | def function[_get_parsed_url, parameter[url]]:
constant[
This is a stand-in function for `urllib3.util.parse_url`
The orignal function doesn't handle special characters very well, this simply splits
out the authentication section, creates the parsed url, then puts the authentication
section back in, bypassing validation.
:return: The new, parsed URL object
:rtype: :class:`~urllib3.util.url.Url`
]
<ast.Try object at 0x7da20e74b850>
return[name[parsed]] | keyword[def] identifier[_get_parsed_url] ( identifier[url] ):
literal[string]
keyword[try] :
identifier[parsed] = identifier[urllib3_parse] ( identifier[url] )
keyword[except] identifier[ValueError] :
identifier[scheme] , identifier[_] , identifier[url] = identifier[url] . identifier[partition] ( literal[string] )
identifier[auth] , identifier[_] , identifier[url] = identifier[url] . identifier[rpartition] ( literal[string] )
identifier[url] = literal[string] . identifier[format] ( identifier[scheme] = identifier[scheme] , identifier[url] = identifier[url] )
identifier[parsed] = identifier[urllib3_parse] ( identifier[url] ). identifier[_replace] ( identifier[auth] = identifier[auth] )
keyword[return] identifier[parsed] | def _get_parsed_url(url):
# type: (S) -> Url
"\n This is a stand-in function for `urllib3.util.parse_url`\n\n The orignal function doesn't handle special characters very well, this simply splits\n out the authentication section, creates the parsed url, then puts the authentication\n section back in, bypassing validation.\n\n :return: The new, parsed URL object\n :rtype: :class:`~urllib3.util.url.Url`\n "
try:
parsed = urllib3_parse(url) # depends on [control=['try'], data=[]]
except ValueError:
(scheme, _, url) = url.partition('://')
(auth, _, url) = url.rpartition('@')
url = '{scheme}://{url}'.format(scheme=scheme, url=url)
parsed = urllib3_parse(url)._replace(auth=auth) # depends on [control=['except'], data=[]]
return parsed |
def logs(self):
"""获取收藏夹日志
:return: 收藏夹日志中的操作,返回生成器
:rtype: CollectActivity.Iterable
"""
import time
from datetime import datetime
from .answer import Answer
from .question import Question
from .acttype import CollectActType
self._make_soup()
gotten_feed_num = 20
offset = 0
data = {
'start': 0,
'_xsrf': self.xsrf
}
api_url = self.url + 'log'
while gotten_feed_num == 20:
data['offset'] = offset
res = self._session.post(url=api_url, data=data)
gotten_feed_num = res.json()['msg'][0]
soup = BeautifulSoup(res.json()['msg'][1])
offset += gotten_feed_num
zm_items = soup.find_all('div', class_='zm-item')
for zm_item in zm_items:
act_time = datetime.strptime(zm_item.find('time').text, "%Y-%m-%d %H:%M:%S")
if zm_item.find('ins'):
link = zm_item.find('ins').a
act_type = CollectActType.INSERT_ANSWER
elif zm_item.find('del'):
link = zm_item.find('del').a
act_type = CollectActType.DELETE_ANSWER
else:
continue
try:
answer_url = Zhihu_URL + link['href']
question_url = re_a2q.match(answer_url).group(1)
question = Question(question_url, link.text)
answer = Answer(
answer_url, question, session=self._session)
yield CollectActivity(
act_type, act_time, self.owner, self, answer)
except AttributeError:
act_type = CollectActType.CREATE_COLLECTION
yield CollectActivity(
act_type, act_time, self.owner, self)
data['start'] = zm_items[-1]['id'][8:]
time.sleep(0.5) | def function[logs, parameter[self]]:
constant[获取收藏夹日志
:return: 收藏夹日志中的操作,返回生成器
:rtype: CollectActivity.Iterable
]
import module[time]
from relative_module[datetime] import module[datetime]
from relative_module[answer] import module[Answer]
from relative_module[question] import module[Question]
from relative_module[acttype] import module[CollectActType]
call[name[self]._make_soup, parameter[]]
variable[gotten_feed_num] assign[=] constant[20]
variable[offset] assign[=] constant[0]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b26ae410>, <ast.Constant object at 0x7da1b26acb50>], [<ast.Constant object at 0x7da1b26ad090>, <ast.Attribute object at 0x7da1b26af520>]]
variable[api_url] assign[=] binary_operation[name[self].url + constant[log]]
while compare[name[gotten_feed_num] equal[==] constant[20]] begin[:]
call[name[data]][constant[offset]] assign[=] name[offset]
variable[res] assign[=] call[name[self]._session.post, parameter[]]
variable[gotten_feed_num] assign[=] call[call[call[name[res].json, parameter[]]][constant[msg]]][constant[0]]
variable[soup] assign[=] call[name[BeautifulSoup], parameter[call[call[call[name[res].json, parameter[]]][constant[msg]]][constant[1]]]]
<ast.AugAssign object at 0x7da1b26ae8c0>
variable[zm_items] assign[=] call[name[soup].find_all, parameter[constant[div]]]
for taget[name[zm_item]] in starred[name[zm_items]] begin[:]
variable[act_time] assign[=] call[name[datetime].strptime, parameter[call[name[zm_item].find, parameter[constant[time]]].text, constant[%Y-%m-%d %H:%M:%S]]]
if call[name[zm_item].find, parameter[constant[ins]]] begin[:]
variable[link] assign[=] call[name[zm_item].find, parameter[constant[ins]]].a
variable[act_type] assign[=] name[CollectActType].INSERT_ANSWER
<ast.Try object at 0x7da1b26ae2f0>
call[name[data]][constant[start]] assign[=] call[call[call[name[zm_items]][<ast.UnaryOp object at 0x7da1b26ac6a0>]][constant[id]]][<ast.Slice object at 0x7da1b26afb50>]
call[name[time].sleep, parameter[constant[0.5]]] | keyword[def] identifier[logs] ( identifier[self] ):
literal[string]
keyword[import] identifier[time]
keyword[from] identifier[datetime] keyword[import] identifier[datetime]
keyword[from] . identifier[answer] keyword[import] identifier[Answer]
keyword[from] . identifier[question] keyword[import] identifier[Question]
keyword[from] . identifier[acttype] keyword[import] identifier[CollectActType]
identifier[self] . identifier[_make_soup] ()
identifier[gotten_feed_num] = literal[int]
identifier[offset] = literal[int]
identifier[data] ={
literal[string] : literal[int] ,
literal[string] : identifier[self] . identifier[xsrf]
}
identifier[api_url] = identifier[self] . identifier[url] + literal[string]
keyword[while] identifier[gotten_feed_num] == literal[int] :
identifier[data] [ literal[string] ]= identifier[offset]
identifier[res] = identifier[self] . identifier[_session] . identifier[post] ( identifier[url] = identifier[api_url] , identifier[data] = identifier[data] )
identifier[gotten_feed_num] = identifier[res] . identifier[json] ()[ literal[string] ][ literal[int] ]
identifier[soup] = identifier[BeautifulSoup] ( identifier[res] . identifier[json] ()[ literal[string] ][ literal[int] ])
identifier[offset] += identifier[gotten_feed_num]
identifier[zm_items] = identifier[soup] . identifier[find_all] ( literal[string] , identifier[class_] = literal[string] )
keyword[for] identifier[zm_item] keyword[in] identifier[zm_items] :
identifier[act_time] = identifier[datetime] . identifier[strptime] ( identifier[zm_item] . identifier[find] ( literal[string] ). identifier[text] , literal[string] )
keyword[if] identifier[zm_item] . identifier[find] ( literal[string] ):
identifier[link] = identifier[zm_item] . identifier[find] ( literal[string] ). identifier[a]
identifier[act_type] = identifier[CollectActType] . identifier[INSERT_ANSWER]
keyword[elif] identifier[zm_item] . identifier[find] ( literal[string] ):
identifier[link] = identifier[zm_item] . identifier[find] ( literal[string] ). identifier[a]
identifier[act_type] = identifier[CollectActType] . identifier[DELETE_ANSWER]
keyword[else] :
keyword[continue]
keyword[try] :
identifier[answer_url] = identifier[Zhihu_URL] + identifier[link] [ literal[string] ]
identifier[question_url] = identifier[re_a2q] . identifier[match] ( identifier[answer_url] ). identifier[group] ( literal[int] )
identifier[question] = identifier[Question] ( identifier[question_url] , identifier[link] . identifier[text] )
identifier[answer] = identifier[Answer] (
identifier[answer_url] , identifier[question] , identifier[session] = identifier[self] . identifier[_session] )
keyword[yield] identifier[CollectActivity] (
identifier[act_type] , identifier[act_time] , identifier[self] . identifier[owner] , identifier[self] , identifier[answer] )
keyword[except] identifier[AttributeError] :
identifier[act_type] = identifier[CollectActType] . identifier[CREATE_COLLECTION]
keyword[yield] identifier[CollectActivity] (
identifier[act_type] , identifier[act_time] , identifier[self] . identifier[owner] , identifier[self] )
identifier[data] [ literal[string] ]= identifier[zm_items] [- literal[int] ][ literal[string] ][ literal[int] :]
identifier[time] . identifier[sleep] ( literal[int] ) | def logs(self):
"""获取收藏夹日志
:return: 收藏夹日志中的操作,返回生成器
:rtype: CollectActivity.Iterable
"""
import time
from datetime import datetime
from .answer import Answer
from .question import Question
from .acttype import CollectActType
self._make_soup()
gotten_feed_num = 20
offset = 0
data = {'start': 0, '_xsrf': self.xsrf}
api_url = self.url + 'log'
while gotten_feed_num == 20:
data['offset'] = offset
res = self._session.post(url=api_url, data=data)
gotten_feed_num = res.json()['msg'][0]
soup = BeautifulSoup(res.json()['msg'][1])
offset += gotten_feed_num
zm_items = soup.find_all('div', class_='zm-item')
for zm_item in zm_items:
act_time = datetime.strptime(zm_item.find('time').text, '%Y-%m-%d %H:%M:%S')
if zm_item.find('ins'):
link = zm_item.find('ins').a
act_type = CollectActType.INSERT_ANSWER # depends on [control=['if'], data=[]]
elif zm_item.find('del'):
link = zm_item.find('del').a
act_type = CollectActType.DELETE_ANSWER # depends on [control=['if'], data=[]]
else:
continue
try:
answer_url = Zhihu_URL + link['href']
question_url = re_a2q.match(answer_url).group(1)
question = Question(question_url, link.text)
answer = Answer(answer_url, question, session=self._session)
yield CollectActivity(act_type, act_time, self.owner, self, answer) # depends on [control=['try'], data=[]]
except AttributeError:
act_type = CollectActType.CREATE_COLLECTION
yield CollectActivity(act_type, act_time, self.owner, self) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['zm_item']]
data['start'] = zm_items[-1]['id'][8:]
time.sleep(0.5) # depends on [control=['while'], data=['gotten_feed_num']] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.