code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def getSets(self, **kwargs):
'''
A way to get different sets from a query.
All parameters are optional, but you should probably use some (so that you get results)
:param str query: The thing you're searching for.
:param str theme: The theme of the set.
:param str subtheme: The subtheme of the set.
:param str setNumber: The LEGO set number.
:param str year: The year in which the set came out.
:param int owned: Whether or not you own the set. Only works when logged in with :meth:`login`. Set to `1` to make true.
:param int wanted: Whether or not you want the set. Only works when logged in with :meth:`login`. Set to `1` to make true.
:param str orderBy: How you want the set ordered. Accepts 'Number', 'YearFrom', 'Pieces', 'Minifigs', 'Rating', 'UKRetailPrice', 'USRetailPrice', 'CARetailPrice', 'EURetailPrice', 'Theme', 'Subtheme', 'Name', 'Random'. Add 'DESC' to the end to sort descending, e.g. NameDESC. Case insensitive. Defaults to 'Number'.
:param int pageSize: How many results are on a page. Defaults to 20.
:param int pageNumber: The number of the page you're looking at. Defaults to 1.
:param str userName: The name of a user whose sets you want to search.
:returns: A list of :class:`brickfront.build.Build` objects.
:rtype: list
'''
# Generate a dictionary to send as parameters
params = {
'apiKey': self.apiKey,
'userHash': self.userHash,
'query': kwargs.get('query', ''),
'theme': kwargs.get('theme', ''),
'subtheme': kwargs.get('subtheme', ''),
'setNumber': kwargs.get('setNumber', ''),
'year': kwargs.get('year', ''),
'owned': kwargs.get('owned', ''),
'wanted': kwargs.get('wanted', ''),
'orderBy': kwargs.get('orderBy', 'Number'),
'pageSize': kwargs.get('pageSize', '20'),
'pageNumber': kwargs.get('pageNumber', '1'),
'userName': kwargs.get('userName', '')
}
url = Client.ENDPOINT.format('getSets')
returned = get(url, params=params)
self.checkResponse(returned)
# Construct the build objects and return them graciously
root = ET.fromstring(returned.text)
return [Build(i, self) for i in root] | def function[getSets, parameter[self]]:
constant[
A way to get different sets from a query.
All parameters are optional, but you should probably use some (so that you get results)
:param str query: The thing you're searching for.
:param str theme: The theme of the set.
:param str subtheme: The subtheme of the set.
:param str setNumber: The LEGO set number.
:param str year: The year in which the set came out.
:param int owned: Whether or not you own the set. Only works when logged in with :meth:`login`. Set to `1` to make true.
:param int wanted: Whether or not you want the set. Only works when logged in with :meth:`login`. Set to `1` to make true.
:param str orderBy: How you want the set ordered. Accepts 'Number', 'YearFrom', 'Pieces', 'Minifigs', 'Rating', 'UKRetailPrice', 'USRetailPrice', 'CARetailPrice', 'EURetailPrice', 'Theme', 'Subtheme', 'Name', 'Random'. Add 'DESC' to the end to sort descending, e.g. NameDESC. Case insensitive. Defaults to 'Number'.
:param int pageSize: How many results are on a page. Defaults to 20.
:param int pageNumber: The number of the page you're looking at. Defaults to 1.
:param str userName: The name of a user whose sets you want to search.
:returns: A list of :class:`brickfront.build.Build` objects.
:rtype: list
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18f812140>, <ast.Constant object at 0x7da18f811030>, <ast.Constant object at 0x7da18f8117b0>, <ast.Constant object at 0x7da18f813820>, <ast.Constant object at 0x7da18f8131f0>, <ast.Constant object at 0x7da18f813340>, <ast.Constant object at 0x7da18f8107c0>, <ast.Constant object at 0x7da18f813400>, <ast.Constant object at 0x7da18f812a40>, <ast.Constant object at 0x7da18f813370>, <ast.Constant object at 0x7da18f8101f0>, <ast.Constant object at 0x7da18f811c30>, <ast.Constant object at 0x7da18f810d00>], [<ast.Attribute object at 0x7da18f810d30>, <ast.Attribute object at 0x7da18f8136d0>, <ast.Call object at 0x7da18f813190>, <ast.Call object at 0x7da20c7c9840>, <ast.Call object at 0x7da20c7c99c0>, <ast.Call object at 0x7da20c7c9270>, <ast.Call object at 0x7da20c7caad0>, <ast.Call object at 0x7da20c7c91b0>, <ast.Call object at 0x7da20c7ca800>, <ast.Call object at 0x7da1b14d16f0>, <ast.Call object at 0x7da1b14d0160>, <ast.Call object at 0x7da1b14d2f80>, <ast.Call object at 0x7da1b14d1300>]]
variable[url] assign[=] call[name[Client].ENDPOINT.format, parameter[constant[getSets]]]
variable[returned] assign[=] call[name[get], parameter[name[url]]]
call[name[self].checkResponse, parameter[name[returned]]]
variable[root] assign[=] call[name[ET].fromstring, parameter[name[returned].text]]
return[<ast.ListComp object at 0x7da1b14d1270>] | keyword[def] identifier[getSets] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[params] ={
literal[string] : identifier[self] . identifier[apiKey] ,
literal[string] : identifier[self] . identifier[userHash] ,
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
}
identifier[url] = identifier[Client] . identifier[ENDPOINT] . identifier[format] ( literal[string] )
identifier[returned] = identifier[get] ( identifier[url] , identifier[params] = identifier[params] )
identifier[self] . identifier[checkResponse] ( identifier[returned] )
identifier[root] = identifier[ET] . identifier[fromstring] ( identifier[returned] . identifier[text] )
keyword[return] [ identifier[Build] ( identifier[i] , identifier[self] ) keyword[for] identifier[i] keyword[in] identifier[root] ] | def getSets(self, **kwargs):
"""
A way to get different sets from a query.
All parameters are optional, but you should probably use some (so that you get results)
:param str query: The thing you're searching for.
:param str theme: The theme of the set.
:param str subtheme: The subtheme of the set.
:param str setNumber: The LEGO set number.
:param str year: The year in which the set came out.
:param int owned: Whether or not you own the set. Only works when logged in with :meth:`login`. Set to `1` to make true.
:param int wanted: Whether or not you want the set. Only works when logged in with :meth:`login`. Set to `1` to make true.
:param str orderBy: How you want the set ordered. Accepts 'Number', 'YearFrom', 'Pieces', 'Minifigs', 'Rating', 'UKRetailPrice', 'USRetailPrice', 'CARetailPrice', 'EURetailPrice', 'Theme', 'Subtheme', 'Name', 'Random'. Add 'DESC' to the end to sort descending, e.g. NameDESC. Case insensitive. Defaults to 'Number'.
:param int pageSize: How many results are on a page. Defaults to 20.
:param int pageNumber: The number of the page you're looking at. Defaults to 1.
:param str userName: The name of a user whose sets you want to search.
:returns: A list of :class:`brickfront.build.Build` objects.
:rtype: list
"""
# Generate a dictionary to send as parameters
params = {'apiKey': self.apiKey, 'userHash': self.userHash, 'query': kwargs.get('query', ''), 'theme': kwargs.get('theme', ''), 'subtheme': kwargs.get('subtheme', ''), 'setNumber': kwargs.get('setNumber', ''), 'year': kwargs.get('year', ''), 'owned': kwargs.get('owned', ''), 'wanted': kwargs.get('wanted', ''), 'orderBy': kwargs.get('orderBy', 'Number'), 'pageSize': kwargs.get('pageSize', '20'), 'pageNumber': kwargs.get('pageNumber', '1'), 'userName': kwargs.get('userName', '')}
url = Client.ENDPOINT.format('getSets')
returned = get(url, params=params)
self.checkResponse(returned)
# Construct the build objects and return them graciously
root = ET.fromstring(returned.text)
return [Build(i, self) for i in root] |
def stats_set_value(self, key, value=1):
"""Set the specified key/value in the per-message measurements
.. versionadded:: 3.13.0
.. note:: If this method is called when there is not a message being
processed, a message will be logged at the ``warning`` level to
indicate the value is being dropped. To suppress these warnings,
set the :attr:`rejected.consumer.Consumer.IGNORE_OOB_STATS`
attribute to :data:`True`.
:param key: The key to set the value for
:type key: :class:`str`
:param value: The value
:type value: :class:`int` or :class:`float`
"""
if not self._measurement:
if not self.IGNORE_OOB_STATS:
self.logger.warning(
'stats_set_value invoked outside execution')
return
self._measurement.set_value(key, value) | def function[stats_set_value, parameter[self, key, value]]:
constant[Set the specified key/value in the per-message measurements
.. versionadded:: 3.13.0
.. note:: If this method is called when there is not a message being
processed, a message will be logged at the ``warning`` level to
indicate the value is being dropped. To suppress these warnings,
set the :attr:`rejected.consumer.Consumer.IGNORE_OOB_STATS`
attribute to :data:`True`.
:param key: The key to set the value for
:type key: :class:`str`
:param value: The value
:type value: :class:`int` or :class:`float`
]
if <ast.UnaryOp object at 0x7da18dc07f10> begin[:]
if <ast.UnaryOp object at 0x7da18dc060e0> begin[:]
call[name[self].logger.warning, parameter[constant[stats_set_value invoked outside execution]]]
return[None]
call[name[self]._measurement.set_value, parameter[name[key], name[value]]] | keyword[def] identifier[stats_set_value] ( identifier[self] , identifier[key] , identifier[value] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_measurement] :
keyword[if] keyword[not] identifier[self] . identifier[IGNORE_OOB_STATS] :
identifier[self] . identifier[logger] . identifier[warning] (
literal[string] )
keyword[return]
identifier[self] . identifier[_measurement] . identifier[set_value] ( identifier[key] , identifier[value] ) | def stats_set_value(self, key, value=1):
"""Set the specified key/value in the per-message measurements
.. versionadded:: 3.13.0
.. note:: If this method is called when there is not a message being
processed, a message will be logged at the ``warning`` level to
indicate the value is being dropped. To suppress these warnings,
set the :attr:`rejected.consumer.Consumer.IGNORE_OOB_STATS`
attribute to :data:`True`.
:param key: The key to set the value for
:type key: :class:`str`
:param value: The value
:type value: :class:`int` or :class:`float`
"""
if not self._measurement:
if not self.IGNORE_OOB_STATS:
self.logger.warning('stats_set_value invoked outside execution') # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]]
self._measurement.set_value(key, value) |
def makeStickyEdataFile(Economy,ignore_periods,description='',filename=None,save_data=False,calc_micro_stats=True,meas_err_base=None):
'''
Makes descriptive statistics and macroeconomic data file. Behaves slightly
differently for heterogeneous agents vs representative agent models.
Parameters
----------
Economy : Market or AgentType
A representation of the model economy. For heterogeneous agents specifications,
this will be an instance of a subclass of Market. For representative agent
specifications, this will be an instance of an AgentType subclass.
ignore_periods : int
Number of periods at the start of the simulation to throw out.
description : str
Description of the economy that is prepended on the output string.
filename : str
Name of the output log file, if any; .txt will be appended automatically.
save_data : bool
When True, save simulation data to filename + 'Data.txt' for use in Stata.
calc_micro_stats : bool
When True, calculate microeconomic statistics like in Table 2 of the
paper draft.
meas_err_base : float or None
Base value of measurement error standard deviation, which will be adjusted.
When None (default), value is calculated as stdev(DeltaLogC).
Returns
-------
None
'''
# Extract time series data from the economy
if hasattr(Economy,'agents'): # If this is a heterogeneous agent specification...
if len(Economy.agents) > 1:
pLvlAll_hist = np.concatenate([this_type.pLvlTrue_hist for this_type in Economy.agents],axis=1)
aLvlAll_hist = np.concatenate([this_type.aLvlNow_hist for this_type in Economy.agents],axis=1)
cLvlAll_hist = np.concatenate([this_type.cLvlNow_hist for this_type in Economy.agents],axis=1)
yLvlAll_hist = np.concatenate([this_type.yLvlNow_hist for this_type in Economy.agents],axis=1)
else: # Don't duplicate the data unless necessary (with one type, concatenating is useless)
pLvlAll_hist = Economy.agents[0].pLvlTrue_hist
aLvlAll_hist = Economy.agents[0].aLvlNow_hist
cLvlAll_hist = Economy.agents[0].cLvlNow_hist
yLvlAll_hist = Economy.agents[0].yLvlNow_hist
# PermShkAggHist needs to be shifted one period forward
PlvlAgg_hist = np.cumprod(np.concatenate(([1.0],Economy.PermShkAggHist[:-1]),axis=0))
AlvlAgg_hist = np.mean(aLvlAll_hist,axis=1) # Level of aggregate assets
AnrmAgg_hist = AlvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate assets
ClvlAgg_hist = np.mean(cLvlAll_hist,axis=1) # Level of aggregate consumption
CnrmAgg_hist = ClvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate consumption
YlvlAgg_hist = np.mean(yLvlAll_hist,axis=1) # Level of aggregate income
YnrmAgg_hist = YlvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate income
if calc_micro_stats: # Only calculate stats if requested. This is a memory hog with many simulated periods
micro_stat_periods = int((Economy.agents[0].T_sim-ignore_periods)*0.1)
not_newborns = (np.concatenate([this_type.t_age_hist[(ignore_periods+1):(ignore_periods+micro_stat_periods),:] for this_type in Economy.agents],axis=1) > 1).flatten()
Logc = np.log(cLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
DeltaLogc = (Logc[1:] - Logc[0:-1]).flatten()
DeltaLogc_trimmed = DeltaLogc[not_newborns]
Loga = np.log(aLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
DeltaLoga = (Loga[1:] - Loga[0:-1]).flatten()
DeltaLoga_trimmed = DeltaLoga[not_newborns]
Logp = np.log(pLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
DeltaLogp = (Logp[1:] - Logp[0:-1]).flatten()
DeltaLogp_trimmed = DeltaLogp[not_newborns]
Logy = np.log(yLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
Logy_trimmed = Logy
Logy_trimmed[np.isinf(Logy)] = np.nan
birth_events = np.concatenate([this_type.t_age_hist == 1 for this_type in Economy.agents],axis=1)
vBirth = calcValueAtBirth(cLvlAll_hist[ignore_periods:,:],birth_events[ignore_periods:,:],PlvlAgg_hist[ignore_periods:],Economy.MrkvNow_hist[ignore_periods:],Economy.agents[0].DiscFac,Economy.agents[0].CRRA)
BigTheta_hist = Economy.TranShkAggHist
if hasattr(Economy,'MrkvNow'):
Mrkv_hist = Economy.MrkvNow_hist
if ~hasattr(Economy,'Rfree'): # If this is a markov DSGE specification...
# Find the expected interest rate - approximate by assuming growth = expected growth
ExpectedGrowth_hist = Economy.PermGroFacAgg[Mrkv_hist]
ExpectedKLRatio_hist = AnrmAgg_hist/ExpectedGrowth_hist
ExpectedR_hist = Economy.Rfunc(ExpectedKLRatio_hist)
else: # If this is a representative agent specification...
PlvlAgg_hist = Economy.pLvlTrue_hist.flatten()
ClvlAgg_hist = Economy.cLvlNow_hist.flatten()
CnrmAgg_hist = ClvlAgg_hist/PlvlAgg_hist.flatten()
YnrmAgg_hist = Economy.yNrmTrue_hist.flatten()
YlvlAgg_hist = YnrmAgg_hist*PlvlAgg_hist.flatten()
AlvlAgg_hist = Economy.aLvlNow_hist.flatten()
AnrmAgg_hist = AlvlAgg_hist/PlvlAgg_hist.flatten()
BigTheta_hist = Economy.TranShkNow_hist.flatten()
if hasattr(Economy,'MrkvNow'):
Mrkv_hist = Economy.MrkvNow_hist
# Process aggregate data into forms used by regressions
LogC = np.log(ClvlAgg_hist[ignore_periods:])
LogA = np.log(AlvlAgg_hist[ignore_periods:])
LogY = np.log(YlvlAgg_hist[ignore_periods:])
DeltaLogC = LogC[1:] - LogC[0:-1]
DeltaLogA = LogA[1:] - LogA[0:-1]
DeltaLogY = LogY[1:] - LogY[0:-1]
A = AnrmAgg_hist[(ignore_periods+1):] # This is a relabeling for the regression code
BigTheta = BigTheta_hist[(ignore_periods+1):]
if hasattr(Economy,'MrkvNow'):
Mrkv = Mrkv_hist[(ignore_periods+1):] # This is a relabeling for the regression code
if ~hasattr(Economy,'Rfree') and hasattr(Economy,'agents'): # If this is a markov DSGE specification...
R = ExpectedR_hist[(ignore_periods+1):]
Delta8LogC = (np.log(ClvlAgg_hist[8:]) - np.log(ClvlAgg_hist[:-8]))[(ignore_periods-7):]
Delta8LogY = (np.log(YlvlAgg_hist[8:]) - np.log(YlvlAgg_hist[:-8]))[(ignore_periods-7):]
# Add measurement error to LogC
if meas_err_base is None:
meas_err_base = np.std(DeltaLogC)
sigma_meas_err = meas_err_base*0.375 # This approximately matches the change in IV vs OLS in U.S. empirical coefficients
np.random.seed(10)
Measurement_Error = sigma_meas_err*np.random.normal(0.,1.,LogC.size)
LogC_me = LogC + Measurement_Error
DeltaLogC_me = LogC_me[1:] - LogC_me[0:-1]
# Apply measurement error to long delta LogC
LogC_long = np.log(ClvlAgg_hist)
LogC_long_me = LogC_long + sigma_meas_err*np.random.normal(0.,1.,LogC_long.size)
Delta8LogC_me = (LogC_long_me[8:] - LogC_long_me[:-8])[(ignore_periods-7):]
# Make summary statistics for the results file
csv_output_string = str(np.mean(AnrmAgg_hist[ignore_periods:])) +","+ str(np.mean(CnrmAgg_hist[ignore_periods:]))+ ","+str(np.std(np.log(AnrmAgg_hist[ignore_periods:])))+ ","+str(np.std(DeltaLogC))+ ","+str(np.std(DeltaLogY)) +","+ str(np.std(DeltaLogA))
if hasattr(Economy,'agents') and calc_micro_stats: # This block only runs for heterogeneous agents specifications
csv_output_string += ","+str(np.mean(np.std(Loga,axis=1)))+ ","+str(np.mean(np.std(Logc,axis=1))) + ","+str(np.mean(np.std(Logp,axis=1))) +","+ str(np.mean(np.nanstd(Logy_trimmed,axis=1))) +","+ str(np.std(DeltaLoga_trimmed))+","+ str(np.std(DeltaLogc_trimmed))+ ","+str(np.std(DeltaLogp_trimmed))
# Save the results to a logfile if requested
if filename is not None:
with open(results_dir + filename + 'Results.csv','w') as f:
f.write(csv_output_string)
f.close()
if calc_micro_stats and hasattr(Economy,'agents'):
with open(results_dir + filename + 'BirthValue.csv','w') as f:
my_writer = csv.writer(f, delimiter = ',')
my_writer.writerow(vBirth)
f.close()
if save_data:
DataArray = (np.vstack((np.arange(DeltaLogC.size),DeltaLogC_me,DeltaLogC,DeltaLogY,A,BigTheta,Delta8LogC,Delta8LogY,Delta8LogC_me,Measurement_Error[1:]))).transpose()
VarNames = ['time_period','DeltaLogC_me','DeltaLogC','DeltaLogY','A','BigTheta','Delta8LogC','Delta8LogY','Delta8LogC_me','Measurement_Error']
if hasattr(Economy,'MrkvNow'):
DataArray = np.hstack((DataArray,np.reshape(Mrkv,(Mrkv.size,1))))
VarNames.append('MrkvState')
if hasattr(Economy,'MrkvNow') & ~hasattr(Economy,'Rfree') and hasattr(Economy,'agents'):
DataArray = np.hstack((DataArray,np.reshape(R,(R.size,1))))
VarNames.append('R')
with open(results_dir + filename + 'Data.txt','w') as f:
my_writer = csv.writer(f, delimiter = '\t')
my_writer.writerow(VarNames)
for i in range(DataArray.shape[0]):
my_writer.writerow(DataArray[i,:])
f.close() | def function[makeStickyEdataFile, parameter[Economy, ignore_periods, description, filename, save_data, calc_micro_stats, meas_err_base]]:
constant[
Makes descriptive statistics and macroeconomic data file. Behaves slightly
differently for heterogeneous agents vs representative agent models.
Parameters
----------
Economy : Market or AgentType
A representation of the model economy. For heterogeneous agents specifications,
this will be an instance of a subclass of Market. For representative agent
specifications, this will be an instance of an AgentType subclass.
ignore_periods : int
Number of periods at the start of the simulation to throw out.
description : str
Description of the economy that is prepended on the output string.
filename : str
Name of the output log file, if any; .txt will be appended automatically.
save_data : bool
When True, save simulation data to filename + 'Data.txt' for use in Stata.
calc_micro_stats : bool
When True, calculate microeconomic statistics like in Table 2 of the
paper draft.
meas_err_base : float or None
Base value of measurement error standard deviation, which will be adjusted.
When None (default), value is calculated as stdev(DeltaLogC).
Returns
-------
None
]
if call[name[hasattr], parameter[name[Economy], constant[agents]]] begin[:]
if compare[call[name[len], parameter[name[Economy].agents]] greater[>] constant[1]] begin[:]
variable[pLvlAll_hist] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da1b26afdf0>]]
variable[aLvlAll_hist] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da1b26ac5e0>]]
variable[cLvlAll_hist] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da1b26af1c0>]]
variable[yLvlAll_hist] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da1b26ad2a0>]]
variable[PlvlAgg_hist] assign[=] call[name[np].cumprod, parameter[call[name[np].concatenate, parameter[tuple[[<ast.List object at 0x7da1b26ae230>, <ast.Subscript object at 0x7da1b26afb80>]]]]]]
variable[AlvlAgg_hist] assign[=] call[name[np].mean, parameter[name[aLvlAll_hist]]]
variable[AnrmAgg_hist] assign[=] binary_operation[name[AlvlAgg_hist] / name[PlvlAgg_hist]]
variable[ClvlAgg_hist] assign[=] call[name[np].mean, parameter[name[cLvlAll_hist]]]
variable[CnrmAgg_hist] assign[=] binary_operation[name[ClvlAgg_hist] / name[PlvlAgg_hist]]
variable[YlvlAgg_hist] assign[=] call[name[np].mean, parameter[name[yLvlAll_hist]]]
variable[YnrmAgg_hist] assign[=] binary_operation[name[YlvlAgg_hist] / name[PlvlAgg_hist]]
if name[calc_micro_stats] begin[:]
variable[micro_stat_periods] assign[=] call[name[int], parameter[binary_operation[binary_operation[call[name[Economy].agents][constant[0]].T_sim - name[ignore_periods]] * constant[0.1]]]]
variable[not_newborns] assign[=] call[compare[call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da18c4cc040>]] greater[>] constant[1]].flatten, parameter[]]
variable[Logc] assign[=] call[name[np].log, parameter[call[name[cLvlAll_hist]][tuple[[<ast.Slice object at 0x7da18c4cefb0>, <ast.Slice object at 0x7da18c4cec50>]]]]]
variable[DeltaLogc] assign[=] call[binary_operation[call[name[Logc]][<ast.Slice object at 0x7da18c4cdc60>] - call[name[Logc]][<ast.Slice object at 0x7da18c4cc2e0>]].flatten, parameter[]]
variable[DeltaLogc_trimmed] assign[=] call[name[DeltaLogc]][name[not_newborns]]
variable[Loga] assign[=] call[name[np].log, parameter[call[name[aLvlAll_hist]][tuple[[<ast.Slice object at 0x7da18ede6470>, <ast.Slice object at 0x7da18ede7190>]]]]]
variable[DeltaLoga] assign[=] call[binary_operation[call[name[Loga]][<ast.Slice object at 0x7da18ede5090>] - call[name[Loga]][<ast.Slice object at 0x7da18ede6650>]].flatten, parameter[]]
variable[DeltaLoga_trimmed] assign[=] call[name[DeltaLoga]][name[not_newborns]]
variable[Logp] assign[=] call[name[np].log, parameter[call[name[pLvlAll_hist]][tuple[[<ast.Slice object at 0x7da18ede6bf0>, <ast.Slice object at 0x7da18ede5a20>]]]]]
variable[DeltaLogp] assign[=] call[binary_operation[call[name[Logp]][<ast.Slice object at 0x7da18ede71c0>] - call[name[Logp]][<ast.Slice object at 0x7da18ede5000>]].flatten, parameter[]]
variable[DeltaLogp_trimmed] assign[=] call[name[DeltaLogp]][name[not_newborns]]
variable[Logy] assign[=] call[name[np].log, parameter[call[name[yLvlAll_hist]][tuple[[<ast.Slice object at 0x7da18ede5750>, <ast.Slice object at 0x7da18ede79d0>]]]]]
variable[Logy_trimmed] assign[=] name[Logy]
call[name[Logy_trimmed]][call[name[np].isinf, parameter[name[Logy]]]] assign[=] name[np].nan
variable[birth_events] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da18ede4a00>]]
variable[vBirth] assign[=] call[name[calcValueAtBirth], parameter[call[name[cLvlAll_hist]][tuple[[<ast.Slice object at 0x7da18ede5d80>, <ast.Slice object at 0x7da18ede7280>]]], call[name[birth_events]][tuple[[<ast.Slice object at 0x7da18ede5330>, <ast.Slice object at 0x7da18ede7fd0>]]], call[name[PlvlAgg_hist]][<ast.Slice object at 0x7da18ede7d60>], call[name[Economy].MrkvNow_hist][<ast.Slice object at 0x7da18ede7eb0>], call[name[Economy].agents][constant[0]].DiscFac, call[name[Economy].agents][constant[0]].CRRA]]
variable[BigTheta_hist] assign[=] name[Economy].TranShkAggHist
if call[name[hasattr], parameter[name[Economy], constant[MrkvNow]]] begin[:]
variable[Mrkv_hist] assign[=] name[Economy].MrkvNow_hist
if <ast.UnaryOp object at 0x7da18ede6e90> begin[:]
variable[ExpectedGrowth_hist] assign[=] call[name[Economy].PermGroFacAgg][name[Mrkv_hist]]
variable[ExpectedKLRatio_hist] assign[=] binary_operation[name[AnrmAgg_hist] / name[ExpectedGrowth_hist]]
variable[ExpectedR_hist] assign[=] call[name[Economy].Rfunc, parameter[name[ExpectedKLRatio_hist]]]
variable[LogC] assign[=] call[name[np].log, parameter[call[name[ClvlAgg_hist]][<ast.Slice object at 0x7da20c991c90>]]]
variable[LogA] assign[=] call[name[np].log, parameter[call[name[AlvlAgg_hist]][<ast.Slice object at 0x7da20c992ad0>]]]
variable[LogY] assign[=] call[name[np].log, parameter[call[name[YlvlAgg_hist]][<ast.Slice object at 0x7da20c990850>]]]
variable[DeltaLogC] assign[=] binary_operation[call[name[LogC]][<ast.Slice object at 0x7da20c991090>] - call[name[LogC]][<ast.Slice object at 0x7da20c992500>]]
variable[DeltaLogA] assign[=] binary_operation[call[name[LogA]][<ast.Slice object at 0x7da20c9930a0>] - call[name[LogA]][<ast.Slice object at 0x7da20c990760>]]
variable[DeltaLogY] assign[=] binary_operation[call[name[LogY]][<ast.Slice object at 0x7da20c990e20>] - call[name[LogY]][<ast.Slice object at 0x7da20c991d50>]]
variable[A] assign[=] call[name[AnrmAgg_hist]][<ast.Slice object at 0x7da20c993460>]
variable[BigTheta] assign[=] call[name[BigTheta_hist]][<ast.Slice object at 0x7da20c992290>]
if call[name[hasattr], parameter[name[Economy], constant[MrkvNow]]] begin[:]
variable[Mrkv] assign[=] call[name[Mrkv_hist]][<ast.Slice object at 0x7da20c991ed0>]
if <ast.BoolOp object at 0x7da20c991de0> begin[:]
variable[R] assign[=] call[name[ExpectedR_hist]][<ast.Slice object at 0x7da20c9927a0>]
variable[Delta8LogC] assign[=] call[binary_operation[call[name[np].log, parameter[call[name[ClvlAgg_hist]][<ast.Slice object at 0x7da20c9938b0>]]] - call[name[np].log, parameter[call[name[ClvlAgg_hist]][<ast.Slice object at 0x7da20c992830>]]]]][<ast.Slice object at 0x7da20c991450>]
variable[Delta8LogY] assign[=] call[binary_operation[call[name[np].log, parameter[call[name[YlvlAgg_hist]][<ast.Slice object at 0x7da20c991f60>]]] - call[name[np].log, parameter[call[name[YlvlAgg_hist]][<ast.Slice object at 0x7da20c9926b0>]]]]][<ast.Slice object at 0x7da20c9900d0>]
if compare[name[meas_err_base] is constant[None]] begin[:]
variable[meas_err_base] assign[=] call[name[np].std, parameter[name[DeltaLogC]]]
variable[sigma_meas_err] assign[=] binary_operation[name[meas_err_base] * constant[0.375]]
call[name[np].random.seed, parameter[constant[10]]]
variable[Measurement_Error] assign[=] binary_operation[name[sigma_meas_err] * call[name[np].random.normal, parameter[constant[0.0], constant[1.0], name[LogC].size]]]
variable[LogC_me] assign[=] binary_operation[name[LogC] + name[Measurement_Error]]
variable[DeltaLogC_me] assign[=] binary_operation[call[name[LogC_me]][<ast.Slice object at 0x7da2054a7bb0>] - call[name[LogC_me]][<ast.Slice object at 0x7da2054a49a0>]]
variable[LogC_long] assign[=] call[name[np].log, parameter[name[ClvlAgg_hist]]]
variable[LogC_long_me] assign[=] binary_operation[name[LogC_long] + binary_operation[name[sigma_meas_err] * call[name[np].random.normal, parameter[constant[0.0], constant[1.0], name[LogC_long].size]]]]
variable[Delta8LogC_me] assign[=] call[binary_operation[call[name[LogC_long_me]][<ast.Slice object at 0x7da2054a5000>] - call[name[LogC_long_me]][<ast.Slice object at 0x7da2054a6650>]]][<ast.Slice object at 0x7da2054a6740>]
variable[csv_output_string] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[str], parameter[call[name[np].mean, parameter[call[name[AnrmAgg_hist]][<ast.Slice object at 0x7da2054a6800>]]]]] + constant[,]] + call[name[str], parameter[call[name[np].mean, parameter[call[name[CnrmAgg_hist]][<ast.Slice object at 0x7da2054a47f0>]]]]]] + constant[,]] + call[name[str], parameter[call[name[np].std, parameter[call[name[np].log, parameter[call[name[AnrmAgg_hist]][<ast.Slice object at 0x7da2054a5330>]]]]]]]] + constant[,]] + call[name[str], parameter[call[name[np].std, parameter[name[DeltaLogC]]]]]] + constant[,]] + call[name[str], parameter[call[name[np].std, parameter[name[DeltaLogY]]]]]] + constant[,]] + call[name[str], parameter[call[name[np].std, parameter[name[DeltaLogA]]]]]]
if <ast.BoolOp object at 0x7da2054a4df0> begin[:]
<ast.AugAssign object at 0x7da2054a63e0>
if compare[name[filename] is_not constant[None]] begin[:]
with call[name[open], parameter[binary_operation[binary_operation[name[results_dir] + name[filename]] + constant[Results.csv]], constant[w]]] begin[:]
call[name[f].write, parameter[name[csv_output_string]]]
call[name[f].close, parameter[]]
if <ast.BoolOp object at 0x7da2041da140> begin[:]
with call[name[open], parameter[binary_operation[binary_operation[name[results_dir] + name[filename]] + constant[BirthValue.csv]], constant[w]]] begin[:]
variable[my_writer] assign[=] call[name[csv].writer, parameter[name[f]]]
call[name[my_writer].writerow, parameter[name[vBirth]]]
call[name[f].close, parameter[]]
if name[save_data] begin[:]
variable[DataArray] assign[=] call[call[name[np].vstack, parameter[tuple[[<ast.Call object at 0x7da2041da7a0>, <ast.Name object at 0x7da2041d93f0>, <ast.Name object at 0x7da2041db310>, <ast.Name object at 0x7da2041d98d0>, <ast.Name object at 0x7da2041da2f0>, <ast.Name object at 0x7da2041da9e0>, <ast.Name object at 0x7da2041da890>, <ast.Name object at 0x7da2041dbdc0>, <ast.Name object at 0x7da2041dbfa0>, <ast.Subscript object at 0x7da2041da4d0>]]]].transpose, parameter[]]
variable[VarNames] assign[=] list[[<ast.Constant object at 0x7da2041da9b0>, <ast.Constant object at 0x7da2041db3d0>, <ast.Constant object at 0x7da2041d8eb0>, <ast.Constant object at 0x7da2041d87c0>, <ast.Constant object at 0x7da2041d8c10>, <ast.Constant object at 0x7da2041db520>, <ast.Constant object at 0x7da2041d9e70>, <ast.Constant object at 0x7da2041dadd0>, <ast.Constant object at 0x7da2041dbc70>, <ast.Constant object at 0x7da2041d8af0>]]
if call[name[hasattr], parameter[name[Economy], constant[MrkvNow]]] begin[:]
variable[DataArray] assign[=] call[name[np].hstack, parameter[tuple[[<ast.Name object at 0x7da2041dae90>, <ast.Call object at 0x7da2041d9c30>]]]]
call[name[VarNames].append, parameter[constant[MrkvState]]]
if <ast.BoolOp object at 0x7da2041d9030> begin[:]
variable[DataArray] assign[=] call[name[np].hstack, parameter[tuple[[<ast.Name object at 0x7da2041d9ba0>, <ast.Call object at 0x7da2041db3a0>]]]]
call[name[VarNames].append, parameter[constant[R]]]
with call[name[open], parameter[binary_operation[binary_operation[name[results_dir] + name[filename]] + constant[Data.txt]], constant[w]]] begin[:]
variable[my_writer] assign[=] call[name[csv].writer, parameter[name[f]]]
call[name[my_writer].writerow, parameter[name[VarNames]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[DataArray].shape][constant[0]]]]] begin[:]
call[name[my_writer].writerow, parameter[call[name[DataArray]][tuple[[<ast.Name object at 0x7da204567130>, <ast.Slice object at 0x7da204567fd0>]]]]]
call[name[f].close, parameter[]] | keyword[def] identifier[makeStickyEdataFile] ( identifier[Economy] , identifier[ignore_periods] , identifier[description] = literal[string] , identifier[filename] = keyword[None] , identifier[save_data] = keyword[False] , identifier[calc_micro_stats] = keyword[True] , identifier[meas_err_base] = keyword[None] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[Economy] , literal[string] ):
keyword[if] identifier[len] ( identifier[Economy] . identifier[agents] )> literal[int] :
identifier[pLvlAll_hist] = identifier[np] . identifier[concatenate] ([ identifier[this_type] . identifier[pLvlTrue_hist] keyword[for] identifier[this_type] keyword[in] identifier[Economy] . identifier[agents] ], identifier[axis] = literal[int] )
identifier[aLvlAll_hist] = identifier[np] . identifier[concatenate] ([ identifier[this_type] . identifier[aLvlNow_hist] keyword[for] identifier[this_type] keyword[in] identifier[Economy] . identifier[agents] ], identifier[axis] = literal[int] )
identifier[cLvlAll_hist] = identifier[np] . identifier[concatenate] ([ identifier[this_type] . identifier[cLvlNow_hist] keyword[for] identifier[this_type] keyword[in] identifier[Economy] . identifier[agents] ], identifier[axis] = literal[int] )
identifier[yLvlAll_hist] = identifier[np] . identifier[concatenate] ([ identifier[this_type] . identifier[yLvlNow_hist] keyword[for] identifier[this_type] keyword[in] identifier[Economy] . identifier[agents] ], identifier[axis] = literal[int] )
keyword[else] :
identifier[pLvlAll_hist] = identifier[Economy] . identifier[agents] [ literal[int] ]. identifier[pLvlTrue_hist]
identifier[aLvlAll_hist] = identifier[Economy] . identifier[agents] [ literal[int] ]. identifier[aLvlNow_hist]
identifier[cLvlAll_hist] = identifier[Economy] . identifier[agents] [ literal[int] ]. identifier[cLvlNow_hist]
identifier[yLvlAll_hist] = identifier[Economy] . identifier[agents] [ literal[int] ]. identifier[yLvlNow_hist]
identifier[PlvlAgg_hist] = identifier[np] . identifier[cumprod] ( identifier[np] . identifier[concatenate] (([ literal[int] ], identifier[Economy] . identifier[PermShkAggHist] [:- literal[int] ]), identifier[axis] = literal[int] ))
identifier[AlvlAgg_hist] = identifier[np] . identifier[mean] ( identifier[aLvlAll_hist] , identifier[axis] = literal[int] )
identifier[AnrmAgg_hist] = identifier[AlvlAgg_hist] / identifier[PlvlAgg_hist]
identifier[ClvlAgg_hist] = identifier[np] . identifier[mean] ( identifier[cLvlAll_hist] , identifier[axis] = literal[int] )
identifier[CnrmAgg_hist] = identifier[ClvlAgg_hist] / identifier[PlvlAgg_hist]
identifier[YlvlAgg_hist] = identifier[np] . identifier[mean] ( identifier[yLvlAll_hist] , identifier[axis] = literal[int] )
identifier[YnrmAgg_hist] = identifier[YlvlAgg_hist] / identifier[PlvlAgg_hist]
keyword[if] identifier[calc_micro_stats] :
identifier[micro_stat_periods] = identifier[int] (( identifier[Economy] . identifier[agents] [ literal[int] ]. identifier[T_sim] - identifier[ignore_periods] )* literal[int] )
identifier[not_newborns] =( identifier[np] . identifier[concatenate] ([ identifier[this_type] . identifier[t_age_hist] [( identifier[ignore_periods] + literal[int] ):( identifier[ignore_periods] + identifier[micro_stat_periods] ),:] keyword[for] identifier[this_type] keyword[in] identifier[Economy] . identifier[agents] ], identifier[axis] = literal[int] )> literal[int] ). identifier[flatten] ()
identifier[Logc] = identifier[np] . identifier[log] ( identifier[cLvlAll_hist] [ identifier[ignore_periods] :( identifier[ignore_periods] + identifier[micro_stat_periods] ),:])
identifier[DeltaLogc] =( identifier[Logc] [ literal[int] :]- identifier[Logc] [ literal[int] :- literal[int] ]). identifier[flatten] ()
identifier[DeltaLogc_trimmed] = identifier[DeltaLogc] [ identifier[not_newborns] ]
identifier[Loga] = identifier[np] . identifier[log] ( identifier[aLvlAll_hist] [ identifier[ignore_periods] :( identifier[ignore_periods] + identifier[micro_stat_periods] ),:])
identifier[DeltaLoga] =( identifier[Loga] [ literal[int] :]- identifier[Loga] [ literal[int] :- literal[int] ]). identifier[flatten] ()
identifier[DeltaLoga_trimmed] = identifier[DeltaLoga] [ identifier[not_newborns] ]
identifier[Logp] = identifier[np] . identifier[log] ( identifier[pLvlAll_hist] [ identifier[ignore_periods] :( identifier[ignore_periods] + identifier[micro_stat_periods] ),:])
identifier[DeltaLogp] =( identifier[Logp] [ literal[int] :]- identifier[Logp] [ literal[int] :- literal[int] ]). identifier[flatten] ()
identifier[DeltaLogp_trimmed] = identifier[DeltaLogp] [ identifier[not_newborns] ]
identifier[Logy] = identifier[np] . identifier[log] ( identifier[yLvlAll_hist] [ identifier[ignore_periods] :( identifier[ignore_periods] + identifier[micro_stat_periods] ),:])
identifier[Logy_trimmed] = identifier[Logy]
identifier[Logy_trimmed] [ identifier[np] . identifier[isinf] ( identifier[Logy] )]= identifier[np] . identifier[nan]
identifier[birth_events] = identifier[np] . identifier[concatenate] ([ identifier[this_type] . identifier[t_age_hist] == literal[int] keyword[for] identifier[this_type] keyword[in] identifier[Economy] . identifier[agents] ], identifier[axis] = literal[int] )
identifier[vBirth] = identifier[calcValueAtBirth] ( identifier[cLvlAll_hist] [ identifier[ignore_periods] :,:], identifier[birth_events] [ identifier[ignore_periods] :,:], identifier[PlvlAgg_hist] [ identifier[ignore_periods] :], identifier[Economy] . identifier[MrkvNow_hist] [ identifier[ignore_periods] :], identifier[Economy] . identifier[agents] [ literal[int] ]. identifier[DiscFac] , identifier[Economy] . identifier[agents] [ literal[int] ]. identifier[CRRA] )
identifier[BigTheta_hist] = identifier[Economy] . identifier[TranShkAggHist]
keyword[if] identifier[hasattr] ( identifier[Economy] , literal[string] ):
identifier[Mrkv_hist] = identifier[Economy] . identifier[MrkvNow_hist]
keyword[if] ~ identifier[hasattr] ( identifier[Economy] , literal[string] ):
identifier[ExpectedGrowth_hist] = identifier[Economy] . identifier[PermGroFacAgg] [ identifier[Mrkv_hist] ]
identifier[ExpectedKLRatio_hist] = identifier[AnrmAgg_hist] / identifier[ExpectedGrowth_hist]
identifier[ExpectedR_hist] = identifier[Economy] . identifier[Rfunc] ( identifier[ExpectedKLRatio_hist] )
keyword[else] :
identifier[PlvlAgg_hist] = identifier[Economy] . identifier[pLvlTrue_hist] . identifier[flatten] ()
identifier[ClvlAgg_hist] = identifier[Economy] . identifier[cLvlNow_hist] . identifier[flatten] ()
identifier[CnrmAgg_hist] = identifier[ClvlAgg_hist] / identifier[PlvlAgg_hist] . identifier[flatten] ()
identifier[YnrmAgg_hist] = identifier[Economy] . identifier[yNrmTrue_hist] . identifier[flatten] ()
identifier[YlvlAgg_hist] = identifier[YnrmAgg_hist] * identifier[PlvlAgg_hist] . identifier[flatten] ()
identifier[AlvlAgg_hist] = identifier[Economy] . identifier[aLvlNow_hist] . identifier[flatten] ()
identifier[AnrmAgg_hist] = identifier[AlvlAgg_hist] / identifier[PlvlAgg_hist] . identifier[flatten] ()
identifier[BigTheta_hist] = identifier[Economy] . identifier[TranShkNow_hist] . identifier[flatten] ()
keyword[if] identifier[hasattr] ( identifier[Economy] , literal[string] ):
identifier[Mrkv_hist] = identifier[Economy] . identifier[MrkvNow_hist]
identifier[LogC] = identifier[np] . identifier[log] ( identifier[ClvlAgg_hist] [ identifier[ignore_periods] :])
identifier[LogA] = identifier[np] . identifier[log] ( identifier[AlvlAgg_hist] [ identifier[ignore_periods] :])
identifier[LogY] = identifier[np] . identifier[log] ( identifier[YlvlAgg_hist] [ identifier[ignore_periods] :])
identifier[DeltaLogC] = identifier[LogC] [ literal[int] :]- identifier[LogC] [ literal[int] :- literal[int] ]
identifier[DeltaLogA] = identifier[LogA] [ literal[int] :]- identifier[LogA] [ literal[int] :- literal[int] ]
identifier[DeltaLogY] = identifier[LogY] [ literal[int] :]- identifier[LogY] [ literal[int] :- literal[int] ]
identifier[A] = identifier[AnrmAgg_hist] [( identifier[ignore_periods] + literal[int] ):]
identifier[BigTheta] = identifier[BigTheta_hist] [( identifier[ignore_periods] + literal[int] ):]
keyword[if] identifier[hasattr] ( identifier[Economy] , literal[string] ):
identifier[Mrkv] = identifier[Mrkv_hist] [( identifier[ignore_periods] + literal[int] ):]
keyword[if] ~ identifier[hasattr] ( identifier[Economy] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[Economy] , literal[string] ):
identifier[R] = identifier[ExpectedR_hist] [( identifier[ignore_periods] + literal[int] ):]
identifier[Delta8LogC] =( identifier[np] . identifier[log] ( identifier[ClvlAgg_hist] [ literal[int] :])- identifier[np] . identifier[log] ( identifier[ClvlAgg_hist] [:- literal[int] ]))[( identifier[ignore_periods] - literal[int] ):]
identifier[Delta8LogY] =( identifier[np] . identifier[log] ( identifier[YlvlAgg_hist] [ literal[int] :])- identifier[np] . identifier[log] ( identifier[YlvlAgg_hist] [:- literal[int] ]))[( identifier[ignore_periods] - literal[int] ):]
keyword[if] identifier[meas_err_base] keyword[is] keyword[None] :
identifier[meas_err_base] = identifier[np] . identifier[std] ( identifier[DeltaLogC] )
identifier[sigma_meas_err] = identifier[meas_err_base] * literal[int]
identifier[np] . identifier[random] . identifier[seed] ( literal[int] )
identifier[Measurement_Error] = identifier[sigma_meas_err] * identifier[np] . identifier[random] . identifier[normal] ( literal[int] , literal[int] , identifier[LogC] . identifier[size] )
identifier[LogC_me] = identifier[LogC] + identifier[Measurement_Error]
identifier[DeltaLogC_me] = identifier[LogC_me] [ literal[int] :]- identifier[LogC_me] [ literal[int] :- literal[int] ]
identifier[LogC_long] = identifier[np] . identifier[log] ( identifier[ClvlAgg_hist] )
identifier[LogC_long_me] = identifier[LogC_long] + identifier[sigma_meas_err] * identifier[np] . identifier[random] . identifier[normal] ( literal[int] , literal[int] , identifier[LogC_long] . identifier[size] )
identifier[Delta8LogC_me] =( identifier[LogC_long_me] [ literal[int] :]- identifier[LogC_long_me] [:- literal[int] ])[( identifier[ignore_periods] - literal[int] ):]
identifier[csv_output_string] = identifier[str] ( identifier[np] . identifier[mean] ( identifier[AnrmAgg_hist] [ identifier[ignore_periods] :]))+ literal[string] + identifier[str] ( identifier[np] . identifier[mean] ( identifier[CnrmAgg_hist] [ identifier[ignore_periods] :]))+ literal[string] + identifier[str] ( identifier[np] . identifier[std] ( identifier[np] . identifier[log] ( identifier[AnrmAgg_hist] [ identifier[ignore_periods] :])))+ literal[string] + identifier[str] ( identifier[np] . identifier[std] ( identifier[DeltaLogC] ))+ literal[string] + identifier[str] ( identifier[np] . identifier[std] ( identifier[DeltaLogY] ))+ literal[string] + identifier[str] ( identifier[np] . identifier[std] ( identifier[DeltaLogA] ))
keyword[if] identifier[hasattr] ( identifier[Economy] , literal[string] ) keyword[and] identifier[calc_micro_stats] :
identifier[csv_output_string] += literal[string] + identifier[str] ( identifier[np] . identifier[mean] ( identifier[np] . identifier[std] ( identifier[Loga] , identifier[axis] = literal[int] )))+ literal[string] + identifier[str] ( identifier[np] . identifier[mean] ( identifier[np] . identifier[std] ( identifier[Logc] , identifier[axis] = literal[int] )))+ literal[string] + identifier[str] ( identifier[np] . identifier[mean] ( identifier[np] . identifier[std] ( identifier[Logp] , identifier[axis] = literal[int] )))+ literal[string] + identifier[str] ( identifier[np] . identifier[mean] ( identifier[np] . identifier[nanstd] ( identifier[Logy_trimmed] , identifier[axis] = literal[int] )))+ literal[string] + identifier[str] ( identifier[np] . identifier[std] ( identifier[DeltaLoga_trimmed] ))+ literal[string] + identifier[str] ( identifier[np] . identifier[std] ( identifier[DeltaLogc_trimmed] ))+ literal[string] + identifier[str] ( identifier[np] . identifier[std] ( identifier[DeltaLogp_trimmed] ))
keyword[if] identifier[filename] keyword[is] keyword[not] keyword[None] :
keyword[with] identifier[open] ( identifier[results_dir] + identifier[filename] + literal[string] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[csv_output_string] )
identifier[f] . identifier[close] ()
keyword[if] identifier[calc_micro_stats] keyword[and] identifier[hasattr] ( identifier[Economy] , literal[string] ):
keyword[with] identifier[open] ( identifier[results_dir] + identifier[filename] + literal[string] , literal[string] ) keyword[as] identifier[f] :
identifier[my_writer] = identifier[csv] . identifier[writer] ( identifier[f] , identifier[delimiter] = literal[string] )
identifier[my_writer] . identifier[writerow] ( identifier[vBirth] )
identifier[f] . identifier[close] ()
keyword[if] identifier[save_data] :
identifier[DataArray] =( identifier[np] . identifier[vstack] (( identifier[np] . identifier[arange] ( identifier[DeltaLogC] . identifier[size] ), identifier[DeltaLogC_me] , identifier[DeltaLogC] , identifier[DeltaLogY] , identifier[A] , identifier[BigTheta] , identifier[Delta8LogC] , identifier[Delta8LogY] , identifier[Delta8LogC_me] , identifier[Measurement_Error] [ literal[int] :]))). identifier[transpose] ()
identifier[VarNames] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[hasattr] ( identifier[Economy] , literal[string] ):
identifier[DataArray] = identifier[np] . identifier[hstack] (( identifier[DataArray] , identifier[np] . identifier[reshape] ( identifier[Mrkv] ,( identifier[Mrkv] . identifier[size] , literal[int] ))))
identifier[VarNames] . identifier[append] ( literal[string] )
keyword[if] identifier[hasattr] ( identifier[Economy] , literal[string] )&~ identifier[hasattr] ( identifier[Economy] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[Economy] , literal[string] ):
identifier[DataArray] = identifier[np] . identifier[hstack] (( identifier[DataArray] , identifier[np] . identifier[reshape] ( identifier[R] ,( identifier[R] . identifier[size] , literal[int] ))))
identifier[VarNames] . identifier[append] ( literal[string] )
keyword[with] identifier[open] ( identifier[results_dir] + identifier[filename] + literal[string] , literal[string] ) keyword[as] identifier[f] :
identifier[my_writer] = identifier[csv] . identifier[writer] ( identifier[f] , identifier[delimiter] = literal[string] )
identifier[my_writer] . identifier[writerow] ( identifier[VarNames] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[DataArray] . identifier[shape] [ literal[int] ]):
identifier[my_writer] . identifier[writerow] ( identifier[DataArray] [ identifier[i] ,:])
identifier[f] . identifier[close] () | def makeStickyEdataFile(Economy, ignore_periods, description='', filename=None, save_data=False, calc_micro_stats=True, meas_err_base=None):
"""
Makes descriptive statistics and macroeconomic data file. Behaves slightly
differently for heterogeneous agents vs representative agent models.
Parameters
----------
Economy : Market or AgentType
A representation of the model economy. For heterogeneous agents specifications,
this will be an instance of a subclass of Market. For representative agent
specifications, this will be an instance of an AgentType subclass.
ignore_periods : int
Number of periods at the start of the simulation to throw out.
description : str
Description of the economy that is prepended on the output string.
filename : str
Name of the output log file, if any; .txt will be appended automatically.
save_data : bool
When True, save simulation data to filename + 'Data.txt' for use in Stata.
calc_micro_stats : bool
When True, calculate microeconomic statistics like in Table 2 of the
paper draft.
meas_err_base : float or None
Base value of measurement error standard deviation, which will be adjusted.
When None (default), value is calculated as stdev(DeltaLogC).
Returns
-------
None
"""
# Extract time series data from the economy
if hasattr(Economy, 'agents'): # If this is a heterogeneous agent specification...
if len(Economy.agents) > 1:
pLvlAll_hist = np.concatenate([this_type.pLvlTrue_hist for this_type in Economy.agents], axis=1)
aLvlAll_hist = np.concatenate([this_type.aLvlNow_hist for this_type in Economy.agents], axis=1)
cLvlAll_hist = np.concatenate([this_type.cLvlNow_hist for this_type in Economy.agents], axis=1)
yLvlAll_hist = np.concatenate([this_type.yLvlNow_hist for this_type in Economy.agents], axis=1) # depends on [control=['if'], data=[]]
else: # Don't duplicate the data unless necessary (with one type, concatenating is useless)
pLvlAll_hist = Economy.agents[0].pLvlTrue_hist
aLvlAll_hist = Economy.agents[0].aLvlNow_hist
cLvlAll_hist = Economy.agents[0].cLvlNow_hist
yLvlAll_hist = Economy.agents[0].yLvlNow_hist
# PermShkAggHist needs to be shifted one period forward
PlvlAgg_hist = np.cumprod(np.concatenate(([1.0], Economy.PermShkAggHist[:-1]), axis=0))
AlvlAgg_hist = np.mean(aLvlAll_hist, axis=1) # Level of aggregate assets
AnrmAgg_hist = AlvlAgg_hist / PlvlAgg_hist # Normalized level of aggregate assets
ClvlAgg_hist = np.mean(cLvlAll_hist, axis=1) # Level of aggregate consumption
CnrmAgg_hist = ClvlAgg_hist / PlvlAgg_hist # Normalized level of aggregate consumption
YlvlAgg_hist = np.mean(yLvlAll_hist, axis=1) # Level of aggregate income
YnrmAgg_hist = YlvlAgg_hist / PlvlAgg_hist # Normalized level of aggregate income
if calc_micro_stats: # Only calculate stats if requested. This is a memory hog with many simulated periods
micro_stat_periods = int((Economy.agents[0].T_sim - ignore_periods) * 0.1)
not_newborns = (np.concatenate([this_type.t_age_hist[ignore_periods + 1:ignore_periods + micro_stat_periods, :] for this_type in Economy.agents], axis=1) > 1).flatten()
Logc = np.log(cLvlAll_hist[ignore_periods:ignore_periods + micro_stat_periods, :])
DeltaLogc = (Logc[1:] - Logc[0:-1]).flatten()
DeltaLogc_trimmed = DeltaLogc[not_newborns]
Loga = np.log(aLvlAll_hist[ignore_periods:ignore_periods + micro_stat_periods, :])
DeltaLoga = (Loga[1:] - Loga[0:-1]).flatten()
DeltaLoga_trimmed = DeltaLoga[not_newborns]
Logp = np.log(pLvlAll_hist[ignore_periods:ignore_periods + micro_stat_periods, :])
DeltaLogp = (Logp[1:] - Logp[0:-1]).flatten()
DeltaLogp_trimmed = DeltaLogp[not_newborns]
Logy = np.log(yLvlAll_hist[ignore_periods:ignore_periods + micro_stat_periods, :])
Logy_trimmed = Logy
Logy_trimmed[np.isinf(Logy)] = np.nan
birth_events = np.concatenate([this_type.t_age_hist == 1 for this_type in Economy.agents], axis=1)
vBirth = calcValueAtBirth(cLvlAll_hist[ignore_periods:, :], birth_events[ignore_periods:, :], PlvlAgg_hist[ignore_periods:], Economy.MrkvNow_hist[ignore_periods:], Economy.agents[0].DiscFac, Economy.agents[0].CRRA) # depends on [control=['if'], data=[]]
BigTheta_hist = Economy.TranShkAggHist
if hasattr(Economy, 'MrkvNow'):
Mrkv_hist = Economy.MrkvNow_hist
if ~hasattr(Economy, 'Rfree'): # If this is a markov DSGE specification...
# Find the expected interest rate - approximate by assuming growth = expected growth
ExpectedGrowth_hist = Economy.PermGroFacAgg[Mrkv_hist]
ExpectedKLRatio_hist = AnrmAgg_hist / ExpectedGrowth_hist
ExpectedR_hist = Economy.Rfunc(ExpectedKLRatio_hist) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else: # If this is a representative agent specification...
PlvlAgg_hist = Economy.pLvlTrue_hist.flatten()
ClvlAgg_hist = Economy.cLvlNow_hist.flatten()
CnrmAgg_hist = ClvlAgg_hist / PlvlAgg_hist.flatten()
YnrmAgg_hist = Economy.yNrmTrue_hist.flatten()
YlvlAgg_hist = YnrmAgg_hist * PlvlAgg_hist.flatten()
AlvlAgg_hist = Economy.aLvlNow_hist.flatten()
AnrmAgg_hist = AlvlAgg_hist / PlvlAgg_hist.flatten()
BigTheta_hist = Economy.TranShkNow_hist.flatten()
if hasattr(Economy, 'MrkvNow'):
Mrkv_hist = Economy.MrkvNow_hist # depends on [control=['if'], data=[]]
# Process aggregate data into forms used by regressions
LogC = np.log(ClvlAgg_hist[ignore_periods:])
LogA = np.log(AlvlAgg_hist[ignore_periods:])
LogY = np.log(YlvlAgg_hist[ignore_periods:])
DeltaLogC = LogC[1:] - LogC[0:-1]
DeltaLogA = LogA[1:] - LogA[0:-1]
DeltaLogY = LogY[1:] - LogY[0:-1]
A = AnrmAgg_hist[ignore_periods + 1:] # This is a relabeling for the regression code
BigTheta = BigTheta_hist[ignore_periods + 1:]
if hasattr(Economy, 'MrkvNow'):
Mrkv = Mrkv_hist[ignore_periods + 1:] # This is a relabeling for the regression code
if ~hasattr(Economy, 'Rfree') and hasattr(Economy, 'agents'): # If this is a markov DSGE specification...
R = ExpectedR_hist[ignore_periods + 1:] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
Delta8LogC = (np.log(ClvlAgg_hist[8:]) - np.log(ClvlAgg_hist[:-8]))[ignore_periods - 7:]
Delta8LogY = (np.log(YlvlAgg_hist[8:]) - np.log(YlvlAgg_hist[:-8]))[ignore_periods - 7:]
# Add measurement error to LogC
if meas_err_base is None:
meas_err_base = np.std(DeltaLogC) # depends on [control=['if'], data=['meas_err_base']]
sigma_meas_err = meas_err_base * 0.375 # This approximately matches the change in IV vs OLS in U.S. empirical coefficients
np.random.seed(10)
Measurement_Error = sigma_meas_err * np.random.normal(0.0, 1.0, LogC.size)
LogC_me = LogC + Measurement_Error
DeltaLogC_me = LogC_me[1:] - LogC_me[0:-1]
# Apply measurement error to long delta LogC
LogC_long = np.log(ClvlAgg_hist)
LogC_long_me = LogC_long + sigma_meas_err * np.random.normal(0.0, 1.0, LogC_long.size)
Delta8LogC_me = (LogC_long_me[8:] - LogC_long_me[:-8])[ignore_periods - 7:]
# Make summary statistics for the results file
csv_output_string = str(np.mean(AnrmAgg_hist[ignore_periods:])) + ',' + str(np.mean(CnrmAgg_hist[ignore_periods:])) + ',' + str(np.std(np.log(AnrmAgg_hist[ignore_periods:]))) + ',' + str(np.std(DeltaLogC)) + ',' + str(np.std(DeltaLogY)) + ',' + str(np.std(DeltaLogA))
if hasattr(Economy, 'agents') and calc_micro_stats: # This block only runs for heterogeneous agents specifications
csv_output_string += ',' + str(np.mean(np.std(Loga, axis=1))) + ',' + str(np.mean(np.std(Logc, axis=1))) + ',' + str(np.mean(np.std(Logp, axis=1))) + ',' + str(np.mean(np.nanstd(Logy_trimmed, axis=1))) + ',' + str(np.std(DeltaLoga_trimmed)) + ',' + str(np.std(DeltaLogc_trimmed)) + ',' + str(np.std(DeltaLogp_trimmed)) # depends on [control=['if'], data=[]]
# Save the results to a logfile if requested
if filename is not None:
with open(results_dir + filename + 'Results.csv', 'w') as f:
f.write(csv_output_string)
f.close() # depends on [control=['with'], data=['f']]
if calc_micro_stats and hasattr(Economy, 'agents'):
with open(results_dir + filename + 'BirthValue.csv', 'w') as f:
my_writer = csv.writer(f, delimiter=',')
my_writer.writerow(vBirth)
f.close() # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
if save_data:
DataArray = np.vstack((np.arange(DeltaLogC.size), DeltaLogC_me, DeltaLogC, DeltaLogY, A, BigTheta, Delta8LogC, Delta8LogY, Delta8LogC_me, Measurement_Error[1:])).transpose()
VarNames = ['time_period', 'DeltaLogC_me', 'DeltaLogC', 'DeltaLogY', 'A', 'BigTheta', 'Delta8LogC', 'Delta8LogY', 'Delta8LogC_me', 'Measurement_Error']
if hasattr(Economy, 'MrkvNow'):
DataArray = np.hstack((DataArray, np.reshape(Mrkv, (Mrkv.size, 1))))
VarNames.append('MrkvState') # depends on [control=['if'], data=[]]
if hasattr(Economy, 'MrkvNow') & ~hasattr(Economy, 'Rfree') and hasattr(Economy, 'agents'):
DataArray = np.hstack((DataArray, np.reshape(R, (R.size, 1))))
VarNames.append('R') # depends on [control=['if'], data=[]]
with open(results_dir + filename + 'Data.txt', 'w') as f:
my_writer = csv.writer(f, delimiter='\t')
my_writer.writerow(VarNames)
for i in range(DataArray.shape[0]):
my_writer.writerow(DataArray[i, :]) # depends on [control=['for'], data=['i']]
f.close() # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['filename']] |
def stop(self):
"""
Stops the backend process.
"""
if self._process is None:
return
if self._shared:
BackendManager.SHARE_COUNT -= 1
if BackendManager.SHARE_COUNT:
return
comm('stopping backend process')
# close all sockets
for s in self._sockets:
s._callback = None
s.close()
self._sockets[:] = []
# prevent crash logs from being written if we are busy killing
# the process
self._process._prevent_logs = True
while self._process.state() != self._process.NotRunning:
self._process.waitForFinished(1)
if sys.platform == 'win32':
# Console applications on Windows that do not run an event
# loop, or whose event loop does not handle the WM_CLOSE
# message, can only be terminated by calling kill().
self._process.kill()
else:
self._process.terminate()
self._process._prevent_logs = False
self._heartbeat_timer.stop()
comm('backend process terminated') | def function[stop, parameter[self]]:
constant[
Stops the backend process.
]
if compare[name[self]._process is constant[None]] begin[:]
return[None]
if name[self]._shared begin[:]
<ast.AugAssign object at 0x7da18f09fa90>
if name[BackendManager].SHARE_COUNT begin[:]
return[None]
call[name[comm], parameter[constant[stopping backend process]]]
for taget[name[s]] in starred[name[self]._sockets] begin[:]
name[s]._callback assign[=] constant[None]
call[name[s].close, parameter[]]
call[name[self]._sockets][<ast.Slice object at 0x7da20c76ef20>] assign[=] list[[]]
name[self]._process._prevent_logs assign[=] constant[True]
while compare[call[name[self]._process.state, parameter[]] not_equal[!=] name[self]._process.NotRunning] begin[:]
call[name[self]._process.waitForFinished, parameter[constant[1]]]
if compare[name[sys].platform equal[==] constant[win32]] begin[:]
call[name[self]._process.kill, parameter[]]
name[self]._process._prevent_logs assign[=] constant[False]
call[name[self]._heartbeat_timer.stop, parameter[]]
call[name[comm], parameter[constant[backend process terminated]]] | keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_process] keyword[is] keyword[None] :
keyword[return]
keyword[if] identifier[self] . identifier[_shared] :
identifier[BackendManager] . identifier[SHARE_COUNT] -= literal[int]
keyword[if] identifier[BackendManager] . identifier[SHARE_COUNT] :
keyword[return]
identifier[comm] ( literal[string] )
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[_sockets] :
identifier[s] . identifier[_callback] = keyword[None]
identifier[s] . identifier[close] ()
identifier[self] . identifier[_sockets] [:]=[]
identifier[self] . identifier[_process] . identifier[_prevent_logs] = keyword[True]
keyword[while] identifier[self] . identifier[_process] . identifier[state] ()!= identifier[self] . identifier[_process] . identifier[NotRunning] :
identifier[self] . identifier[_process] . identifier[waitForFinished] ( literal[int] )
keyword[if] identifier[sys] . identifier[platform] == literal[string] :
identifier[self] . identifier[_process] . identifier[kill] ()
keyword[else] :
identifier[self] . identifier[_process] . identifier[terminate] ()
identifier[self] . identifier[_process] . identifier[_prevent_logs] = keyword[False]
identifier[self] . identifier[_heartbeat_timer] . identifier[stop] ()
identifier[comm] ( literal[string] ) | def stop(self):
"""
Stops the backend process.
"""
if self._process is None:
return # depends on [control=['if'], data=[]]
if self._shared:
BackendManager.SHARE_COUNT -= 1
if BackendManager.SHARE_COUNT:
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
comm('stopping backend process')
# close all sockets
for s in self._sockets:
s._callback = None
s.close() # depends on [control=['for'], data=['s']]
self._sockets[:] = []
# prevent crash logs from being written if we are busy killing
# the process
self._process._prevent_logs = True
while self._process.state() != self._process.NotRunning:
self._process.waitForFinished(1)
if sys.platform == 'win32':
# Console applications on Windows that do not run an event
# loop, or whose event loop does not handle the WM_CLOSE
# message, can only be terminated by calling kill().
self._process.kill() # depends on [control=['if'], data=[]]
else:
self._process.terminate() # depends on [control=['while'], data=[]]
self._process._prevent_logs = False
self._heartbeat_timer.stop()
comm('backend process terminated') |
def runpf_consumer_loop(
in_queue_url,
workdir,
lc_altexts=('',),
wait_time_seconds=5,
shutdown_check_timer_seconds=60.0,
sqs_client=None,
s3_client=None
):
"""This runs period-finding in a loop until interrupted.
Consumes work task items from an input queue set up by `runpf_producer_loop`
above.
Parameters
----------
in_queue_url : str
The SQS URL of the input queue to listen to for work assignment
messages. The task orders will include the input and output S3 bucket
names, as well as the URL of the output queue to where this function
will report its work-complete or work-failed status.
workdir : str
The directory on the local machine where this worker loop will download
the input light curves, process them, and produce its output
periodfinding result pickles. These will then be uploaded to the
specified S3 output bucket, and then deleted from the local disk.
lc_altexts : sequence of str
If not None, this is a sequence of alternate extensions to try for the
input light curve file other than the one provided in the input task
order. For example, to get anything that's an .sqlite where .sqlite.gz
is expected, use altexts=[''] to strip the .gz.
wait_time_seconds : int
The amount of time to wait in the input SQS queue for an input task
order. If this timeout expires and no task has been received, this
function goes back to the top of the work loop.
shutdown_check_timer_seconds : float
The amount of time to wait before checking for a pending EC2 shutdown
message for the instance this worker loop is operating on. If a shutdown
is noticed, the worker loop is cancelled in preparation for instance
shutdown.
sqs_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its SQS operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
s3_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its S3 operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
Returns
-------
Nothing.
"""
if not sqs_client:
sqs_client = boto3.client('sqs')
if not s3_client:
s3_client = boto3.client('s3')
# listen to the kill and term signals and raise KeyboardInterrupt when
# called
signal.signal(signal.SIGINT, kill_handler)
signal.signal(signal.SIGTERM, kill_handler)
shutdown_last_time = time.monotonic()
while True:
curr_time = time.monotonic()
if (curr_time - shutdown_last_time) > shutdown_check_timer_seconds:
shutdown_check = shutdown_check_handler()
if shutdown_check:
LOGWARNING('instance will die soon, breaking loop')
break
shutdown_last_time = time.monotonic()
try:
# receive a single message from the inqueue
work = awsutils.sqs_get_item(in_queue_url,
client=sqs_client,
raiseonfail=True)
# JSON deserialize the work item
if work is not None and len(work) > 0:
recv = work[0]
# skip any messages that don't tell us to runpf
action = recv['item']['action']
if action != 'runpf':
continue
target = recv['item']['target']
args = recv['item']['args']
kwargs = recv['item']['kwargs']
outbucket = recv['item']['outbucket']
if 'outqueue' in recv['item']:
out_queue_url = recv['item']['outqueue']
else:
out_queue_url = None
receipt = recv['receipt_handle']
# download the target from S3 to a file in the work directory
try:
lc_filename = awsutils.s3_get_url(
target,
altexts=lc_altexts,
client=s3_client
)
runpf_args = (lc_filename, args[0])
# now runpf
pfresult = runpf(
*runpf_args,
**kwargs
)
if pfresult and os.path.exists(pfresult):
LOGINFO('runpf OK for LC: %s -> %s' %
(lc_filename, pfresult))
# check if the file exists already because it's been
# processed somewhere else
resp = s3_client.list_objects_v2(
Bucket=outbucket,
MaxKeys=1,
Prefix=pfresult
)
outbucket_list = resp.get('Contents',[])
if outbucket_list and len(outbucket_list) > 0:
LOGWARNING(
'not uploading pfresult for %s because '
'it exists in the output bucket already'
% target
)
awsutils.sqs_delete_item(in_queue_url, receipt)
continue
put_url = awsutils.s3_put_file(pfresult,
outbucket,
client=s3_client)
if put_url is not None:
LOGINFO('result uploaded to %s' % put_url)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(
out_queue_url,
{'pfresult':put_url,
'target': target,
'lc_filename':lc_filename,
'kwargs':kwargs},
raiseonfail=True
)
# delete the result from the local directory
os.remove(pfresult)
# if the upload fails, don't acknowledge the
# message. might be a temporary S3 failure, so
# another worker might succeed later.
# FIXME: add SNS bits to warn us of failures
else:
LOGERROR('failed to upload %s to S3' % pfresult)
os.remove(pfresult)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done and successful
awsutils.sqs_delete_item(in_queue_url, receipt)
# delete the light curve file when we're done with it
if ( (lc_filename is not None) and
(os.path.exists(lc_filename)) ):
os.remove(lc_filename)
# if runcp failed outright, don't requeue. instead, write a
# ('failed-checkplot-%s.pkl' % lc_filename) file to the
# output S3 bucket.
else:
LOGWARNING('runpf failed for LC: %s' %
(lc_filename,))
with open('failed-periodfinding-%s.pkl' %
lc_filename, 'wb') as outfd:
pickle.dump(
{'in_queue_url':in_queue_url,
'target':target,
'lc_filename':lc_filename,
'kwargs':kwargs,
'outbucket':outbucket,
'out_queue_url':out_queue_url},
outfd, pickle.HIGHEST_PROTOCOL
)
put_url = awsutils.s3_put_file(
'failed-periodfinding-%s.pkl' % lc_filename,
outbucket,
client=s3_client
)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(
out_queue_url,
{'pfresult':put_url,
'lc_filename':lc_filename,
'kwargs':kwargs},
raiseonfail=True
)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done
awsutils.sqs_delete_item(in_queue_url,
receipt,
raiseonfail=True)
# delete the light curve file when we're done with it
if ( (lc_filename is not None) and
(os.path.exists(lc_filename)) ):
os.remove(lc_filename)
except ClientError as e:
LOGWARNING('queues have disappeared. stopping worker loop')
break
# if there's any other exception, put a failed response into the
# output bucket and queue
except Exception as e:
LOGEXCEPTION('could not process input from queue')
if 'lc_filename' in locals():
with open('failed-periodfinding-%s.pkl' %
lc_filename,'wb') as outfd:
pickle.dump(
{'in_queue_url':in_queue_url,
'target':target,
'lc_filename':lc_filename,
'kwargs':kwargs,
'outbucket':outbucket,
'out_queue_url':out_queue_url},
outfd, pickle.HIGHEST_PROTOCOL
)
put_url = awsutils.s3_put_file(
'failed-periodfinding-%s.pkl' % lc_filename,
outbucket,
client=s3_client
)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(
out_queue_url,
{'pfresult':put_url,
'lc_filename':lc_filename,
'kwargs':kwargs},
raiseonfail=True
)
# delete the light curve file when we're done with it
if ( (lc_filename is not None) and
(os.path.exists(lc_filename)) ):
os.remove(lc_filename)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done
awsutils.sqs_delete_item(in_queue_url,
receipt,
raiseonfail=True)
# a keyboard interrupt kills the loop
except KeyboardInterrupt:
LOGWARNING('breaking out of the processing loop.')
break
# if the queues disappear, then the producer loop is done and we should
# exit
except ClientError as e:
LOGWARNING('queues have disappeared. stopping worker loop')
break
# any other exception continues the loop we'll write the output file to
# the output S3 bucket (and any optional output queue), but add a
# failed-* prefix to it to indicate that processing failed. FIXME: could
# use a dead-letter queue for this instead
except Exception as e:
LOGEXCEPTION('could not process input from queue')
if 'lc_filename' in locals():
with open('failed-periodfinding-%s.pkl' %
lc_filename,'wb') as outfd:
pickle.dump(
{'in_queue_url':in_queue_url,
'target':target,
'kwargs':kwargs,
'outbucket':outbucket,
'out_queue_url':out_queue_url},
outfd, pickle.HIGHEST_PROTOCOL
)
put_url = awsutils.s3_put_file(
'failed-periodfinding-%s.pkl' % lc_filename,
outbucket,
client=s3_client
)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(
out_queue_url,
{'cpf':put_url,
'kwargs':kwargs},
raiseonfail=True
)
if ( (lc_filename is not None) and
(os.path.exists(lc_filename)) ):
os.remove(lc_filename)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done
awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True) | def function[runpf_consumer_loop, parameter[in_queue_url, workdir, lc_altexts, wait_time_seconds, shutdown_check_timer_seconds, sqs_client, s3_client]]:
constant[This runs period-finding in a loop until interrupted.
Consumes work task items from an input queue set up by `runpf_producer_loop`
above.
Parameters
----------
in_queue_url : str
The SQS URL of the input queue to listen to for work assignment
messages. The task orders will include the input and output S3 bucket
names, as well as the URL of the output queue to where this function
will report its work-complete or work-failed status.
workdir : str
The directory on the local machine where this worker loop will download
the input light curves, process them, and produce its output
periodfinding result pickles. These will then be uploaded to the
specified S3 output bucket, and then deleted from the local disk.
lc_altexts : sequence of str
If not None, this is a sequence of alternate extensions to try for the
input light curve file other than the one provided in the input task
order. For example, to get anything that's an .sqlite where .sqlite.gz
is expected, use altexts=[''] to strip the .gz.
wait_time_seconds : int
The amount of time to wait in the input SQS queue for an input task
order. If this timeout expires and no task has been received, this
function goes back to the top of the work loop.
shutdown_check_timer_seconds : float
The amount of time to wait before checking for a pending EC2 shutdown
message for the instance this worker loop is operating on. If a shutdown
is noticed, the worker loop is cancelled in preparation for instance
shutdown.
sqs_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its SQS operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
s3_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its S3 operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
Returns
-------
Nothing.
]
if <ast.UnaryOp object at 0x7da1b0064430> begin[:]
variable[sqs_client] assign[=] call[name[boto3].client, parameter[constant[sqs]]]
if <ast.UnaryOp object at 0x7da1b00645e0> begin[:]
variable[s3_client] assign[=] call[name[boto3].client, parameter[constant[s3]]]
call[name[signal].signal, parameter[name[signal].SIGINT, name[kill_handler]]]
call[name[signal].signal, parameter[name[signal].SIGTERM, name[kill_handler]]]
variable[shutdown_last_time] assign[=] call[name[time].monotonic, parameter[]]
while constant[True] begin[:]
variable[curr_time] assign[=] call[name[time].monotonic, parameter[]]
if compare[binary_operation[name[curr_time] - name[shutdown_last_time]] greater[>] name[shutdown_check_timer_seconds]] begin[:]
variable[shutdown_check] assign[=] call[name[shutdown_check_handler], parameter[]]
if name[shutdown_check] begin[:]
call[name[LOGWARNING], parameter[constant[instance will die soon, breaking loop]]]
break
variable[shutdown_last_time] assign[=] call[name[time].monotonic, parameter[]]
<ast.Try object at 0x7da1b0065120> | keyword[def] identifier[runpf_consumer_loop] (
identifier[in_queue_url] ,
identifier[workdir] ,
identifier[lc_altexts] =( literal[string] ,),
identifier[wait_time_seconds] = literal[int] ,
identifier[shutdown_check_timer_seconds] = literal[int] ,
identifier[sqs_client] = keyword[None] ,
identifier[s3_client] = keyword[None]
):
literal[string]
keyword[if] keyword[not] identifier[sqs_client] :
identifier[sqs_client] = identifier[boto3] . identifier[client] ( literal[string] )
keyword[if] keyword[not] identifier[s3_client] :
identifier[s3_client] = identifier[boto3] . identifier[client] ( literal[string] )
identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGINT] , identifier[kill_handler] )
identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGTERM] , identifier[kill_handler] )
identifier[shutdown_last_time] = identifier[time] . identifier[monotonic] ()
keyword[while] keyword[True] :
identifier[curr_time] = identifier[time] . identifier[monotonic] ()
keyword[if] ( identifier[curr_time] - identifier[shutdown_last_time] )> identifier[shutdown_check_timer_seconds] :
identifier[shutdown_check] = identifier[shutdown_check_handler] ()
keyword[if] identifier[shutdown_check] :
identifier[LOGWARNING] ( literal[string] )
keyword[break]
identifier[shutdown_last_time] = identifier[time] . identifier[monotonic] ()
keyword[try] :
identifier[work] = identifier[awsutils] . identifier[sqs_get_item] ( identifier[in_queue_url] ,
identifier[client] = identifier[sqs_client] ,
identifier[raiseonfail] = keyword[True] )
keyword[if] identifier[work] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[work] )> literal[int] :
identifier[recv] = identifier[work] [ literal[int] ]
identifier[action] = identifier[recv] [ literal[string] ][ literal[string] ]
keyword[if] identifier[action] != literal[string] :
keyword[continue]
identifier[target] = identifier[recv] [ literal[string] ][ literal[string] ]
identifier[args] = identifier[recv] [ literal[string] ][ literal[string] ]
identifier[kwargs] = identifier[recv] [ literal[string] ][ literal[string] ]
identifier[outbucket] = identifier[recv] [ literal[string] ][ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[recv] [ literal[string] ]:
identifier[out_queue_url] = identifier[recv] [ literal[string] ][ literal[string] ]
keyword[else] :
identifier[out_queue_url] = keyword[None]
identifier[receipt] = identifier[recv] [ literal[string] ]
keyword[try] :
identifier[lc_filename] = identifier[awsutils] . identifier[s3_get_url] (
identifier[target] ,
identifier[altexts] = identifier[lc_altexts] ,
identifier[client] = identifier[s3_client]
)
identifier[runpf_args] =( identifier[lc_filename] , identifier[args] [ literal[int] ])
identifier[pfresult] = identifier[runpf] (
* identifier[runpf_args] ,
** identifier[kwargs]
)
keyword[if] identifier[pfresult] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[pfresult] ):
identifier[LOGINFO] ( literal[string] %
( identifier[lc_filename] , identifier[pfresult] ))
identifier[resp] = identifier[s3_client] . identifier[list_objects_v2] (
identifier[Bucket] = identifier[outbucket] ,
identifier[MaxKeys] = literal[int] ,
identifier[Prefix] = identifier[pfresult]
)
identifier[outbucket_list] = identifier[resp] . identifier[get] ( literal[string] ,[])
keyword[if] identifier[outbucket_list] keyword[and] identifier[len] ( identifier[outbucket_list] )> literal[int] :
identifier[LOGWARNING] (
literal[string]
literal[string]
% identifier[target]
)
identifier[awsutils] . identifier[sqs_delete_item] ( identifier[in_queue_url] , identifier[receipt] )
keyword[continue]
identifier[put_url] = identifier[awsutils] . identifier[s3_put_file] ( identifier[pfresult] ,
identifier[outbucket] ,
identifier[client] = identifier[s3_client] )
keyword[if] identifier[put_url] keyword[is] keyword[not] keyword[None] :
identifier[LOGINFO] ( literal[string] % identifier[put_url] )
keyword[if] identifier[out_queue_url] keyword[is] keyword[not] keyword[None] :
identifier[awsutils] . identifier[sqs_put_item] (
identifier[out_queue_url] ,
{ literal[string] : identifier[put_url] ,
literal[string] : identifier[target] ,
literal[string] : identifier[lc_filename] ,
literal[string] : identifier[kwargs] },
identifier[raiseonfail] = keyword[True]
)
identifier[os] . identifier[remove] ( identifier[pfresult] )
keyword[else] :
identifier[LOGERROR] ( literal[string] % identifier[pfresult] )
identifier[os] . identifier[remove] ( identifier[pfresult] )
identifier[awsutils] . identifier[sqs_delete_item] ( identifier[in_queue_url] , identifier[receipt] )
keyword[if] (( identifier[lc_filename] keyword[is] keyword[not] keyword[None] ) keyword[and]
( identifier[os] . identifier[path] . identifier[exists] ( identifier[lc_filename] ))):
identifier[os] . identifier[remove] ( identifier[lc_filename] )
keyword[else] :
identifier[LOGWARNING] ( literal[string] %
( identifier[lc_filename] ,))
keyword[with] identifier[open] ( literal[string] %
identifier[lc_filename] , literal[string] ) keyword[as] identifier[outfd] :
identifier[pickle] . identifier[dump] (
{ literal[string] : identifier[in_queue_url] ,
literal[string] : identifier[target] ,
literal[string] : identifier[lc_filename] ,
literal[string] : identifier[kwargs] ,
literal[string] : identifier[outbucket] ,
literal[string] : identifier[out_queue_url] },
identifier[outfd] , identifier[pickle] . identifier[HIGHEST_PROTOCOL]
)
identifier[put_url] = identifier[awsutils] . identifier[s3_put_file] (
literal[string] % identifier[lc_filename] ,
identifier[outbucket] ,
identifier[client] = identifier[s3_client]
)
keyword[if] identifier[out_queue_url] keyword[is] keyword[not] keyword[None] :
identifier[awsutils] . identifier[sqs_put_item] (
identifier[out_queue_url] ,
{ literal[string] : identifier[put_url] ,
literal[string] : identifier[lc_filename] ,
literal[string] : identifier[kwargs] },
identifier[raiseonfail] = keyword[True]
)
identifier[awsutils] . identifier[sqs_delete_item] ( identifier[in_queue_url] ,
identifier[receipt] ,
identifier[raiseonfail] = keyword[True] )
keyword[if] (( identifier[lc_filename] keyword[is] keyword[not] keyword[None] ) keyword[and]
( identifier[os] . identifier[path] . identifier[exists] ( identifier[lc_filename] ))):
identifier[os] . identifier[remove] ( identifier[lc_filename] )
keyword[except] identifier[ClientError] keyword[as] identifier[e] :
identifier[LOGWARNING] ( literal[string] )
keyword[break]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[LOGEXCEPTION] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[locals] ():
keyword[with] identifier[open] ( literal[string] %
identifier[lc_filename] , literal[string] ) keyword[as] identifier[outfd] :
identifier[pickle] . identifier[dump] (
{ literal[string] : identifier[in_queue_url] ,
literal[string] : identifier[target] ,
literal[string] : identifier[lc_filename] ,
literal[string] : identifier[kwargs] ,
literal[string] : identifier[outbucket] ,
literal[string] : identifier[out_queue_url] },
identifier[outfd] , identifier[pickle] . identifier[HIGHEST_PROTOCOL]
)
identifier[put_url] = identifier[awsutils] . identifier[s3_put_file] (
literal[string] % identifier[lc_filename] ,
identifier[outbucket] ,
identifier[client] = identifier[s3_client]
)
keyword[if] identifier[out_queue_url] keyword[is] keyword[not] keyword[None] :
identifier[awsutils] . identifier[sqs_put_item] (
identifier[out_queue_url] ,
{ literal[string] : identifier[put_url] ,
literal[string] : identifier[lc_filename] ,
literal[string] : identifier[kwargs] },
identifier[raiseonfail] = keyword[True]
)
keyword[if] (( identifier[lc_filename] keyword[is] keyword[not] keyword[None] ) keyword[and]
( identifier[os] . identifier[path] . identifier[exists] ( identifier[lc_filename] ))):
identifier[os] . identifier[remove] ( identifier[lc_filename] )
identifier[awsutils] . identifier[sqs_delete_item] ( identifier[in_queue_url] ,
identifier[receipt] ,
identifier[raiseonfail] = keyword[True] )
keyword[except] identifier[KeyboardInterrupt] :
identifier[LOGWARNING] ( literal[string] )
keyword[break]
keyword[except] identifier[ClientError] keyword[as] identifier[e] :
identifier[LOGWARNING] ( literal[string] )
keyword[break]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[LOGEXCEPTION] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[locals] ():
keyword[with] identifier[open] ( literal[string] %
identifier[lc_filename] , literal[string] ) keyword[as] identifier[outfd] :
identifier[pickle] . identifier[dump] (
{ literal[string] : identifier[in_queue_url] ,
literal[string] : identifier[target] ,
literal[string] : identifier[kwargs] ,
literal[string] : identifier[outbucket] ,
literal[string] : identifier[out_queue_url] },
identifier[outfd] , identifier[pickle] . identifier[HIGHEST_PROTOCOL]
)
identifier[put_url] = identifier[awsutils] . identifier[s3_put_file] (
literal[string] % identifier[lc_filename] ,
identifier[outbucket] ,
identifier[client] = identifier[s3_client]
)
keyword[if] identifier[out_queue_url] keyword[is] keyword[not] keyword[None] :
identifier[awsutils] . identifier[sqs_put_item] (
identifier[out_queue_url] ,
{ literal[string] : identifier[put_url] ,
literal[string] : identifier[kwargs] },
identifier[raiseonfail] = keyword[True]
)
keyword[if] (( identifier[lc_filename] keyword[is] keyword[not] keyword[None] ) keyword[and]
( identifier[os] . identifier[path] . identifier[exists] ( identifier[lc_filename] ))):
identifier[os] . identifier[remove] ( identifier[lc_filename] )
identifier[awsutils] . identifier[sqs_delete_item] ( identifier[in_queue_url] , identifier[receipt] , identifier[raiseonfail] = keyword[True] ) | def runpf_consumer_loop(in_queue_url, workdir, lc_altexts=('',), wait_time_seconds=5, shutdown_check_timer_seconds=60.0, sqs_client=None, s3_client=None):
"""This runs period-finding in a loop until interrupted.
Consumes work task items from an input queue set up by `runpf_producer_loop`
above.
Parameters
----------
in_queue_url : str
The SQS URL of the input queue to listen to for work assignment
messages. The task orders will include the input and output S3 bucket
names, as well as the URL of the output queue to where this function
will report its work-complete or work-failed status.
workdir : str
The directory on the local machine where this worker loop will download
the input light curves, process them, and produce its output
periodfinding result pickles. These will then be uploaded to the
specified S3 output bucket, and then deleted from the local disk.
lc_altexts : sequence of str
If not None, this is a sequence of alternate extensions to try for the
input light curve file other than the one provided in the input task
order. For example, to get anything that's an .sqlite where .sqlite.gz
is expected, use altexts=[''] to strip the .gz.
wait_time_seconds : int
The amount of time to wait in the input SQS queue for an input task
order. If this timeout expires and no task has been received, this
function goes back to the top of the work loop.
shutdown_check_timer_seconds : float
The amount of time to wait before checking for a pending EC2 shutdown
message for the instance this worker loop is operating on. If a shutdown
is noticed, the worker loop is cancelled in preparation for instance
shutdown.
sqs_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its SQS operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
s3_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its S3 operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
Returns
-------
Nothing.
"""
if not sqs_client:
sqs_client = boto3.client('sqs') # depends on [control=['if'], data=[]]
if not s3_client:
s3_client = boto3.client('s3') # depends on [control=['if'], data=[]]
# listen to the kill and term signals and raise KeyboardInterrupt when
# called
signal.signal(signal.SIGINT, kill_handler)
signal.signal(signal.SIGTERM, kill_handler)
shutdown_last_time = time.monotonic()
while True:
curr_time = time.monotonic()
if curr_time - shutdown_last_time > shutdown_check_timer_seconds:
shutdown_check = shutdown_check_handler()
if shutdown_check:
LOGWARNING('instance will die soon, breaking loop')
break # depends on [control=['if'], data=[]]
shutdown_last_time = time.monotonic() # depends on [control=['if'], data=[]]
try:
# receive a single message from the inqueue
work = awsutils.sqs_get_item(in_queue_url, client=sqs_client, raiseonfail=True)
# JSON deserialize the work item
if work is not None and len(work) > 0:
recv = work[0]
# skip any messages that don't tell us to runpf
action = recv['item']['action']
if action != 'runpf':
continue # depends on [control=['if'], data=[]]
target = recv['item']['target']
args = recv['item']['args']
kwargs = recv['item']['kwargs']
outbucket = recv['item']['outbucket']
if 'outqueue' in recv['item']:
out_queue_url = recv['item']['outqueue'] # depends on [control=['if'], data=[]]
else:
out_queue_url = None
receipt = recv['receipt_handle']
# download the target from S3 to a file in the work directory
try:
lc_filename = awsutils.s3_get_url(target, altexts=lc_altexts, client=s3_client)
runpf_args = (lc_filename, args[0])
# now runpf
pfresult = runpf(*runpf_args, **kwargs)
if pfresult and os.path.exists(pfresult):
LOGINFO('runpf OK for LC: %s -> %s' % (lc_filename, pfresult))
# check if the file exists already because it's been
# processed somewhere else
resp = s3_client.list_objects_v2(Bucket=outbucket, MaxKeys=1, Prefix=pfresult)
outbucket_list = resp.get('Contents', [])
if outbucket_list and len(outbucket_list) > 0:
LOGWARNING('not uploading pfresult for %s because it exists in the output bucket already' % target)
awsutils.sqs_delete_item(in_queue_url, receipt)
continue # depends on [control=['if'], data=[]]
put_url = awsutils.s3_put_file(pfresult, outbucket, client=s3_client)
if put_url is not None:
LOGINFO('result uploaded to %s' % put_url)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(out_queue_url, {'pfresult': put_url, 'target': target, 'lc_filename': lc_filename, 'kwargs': kwargs}, raiseonfail=True) # depends on [control=['if'], data=['out_queue_url']]
# delete the result from the local directory
os.remove(pfresult) # depends on [control=['if'], data=['put_url']]
else:
# if the upload fails, don't acknowledge the
# message. might be a temporary S3 failure, so
# another worker might succeed later.
# FIXME: add SNS bits to warn us of failures
LOGERROR('failed to upload %s to S3' % pfresult)
os.remove(pfresult)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done and successful
awsutils.sqs_delete_item(in_queue_url, receipt)
# delete the light curve file when we're done with it
if lc_filename is not None and os.path.exists(lc_filename):
os.remove(lc_filename) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# if runcp failed outright, don't requeue. instead, write a
# ('failed-checkplot-%s.pkl' % lc_filename) file to the
# output S3 bucket.
LOGWARNING('runpf failed for LC: %s' % (lc_filename,))
with open('failed-periodfinding-%s.pkl' % lc_filename, 'wb') as outfd:
pickle.dump({'in_queue_url': in_queue_url, 'target': target, 'lc_filename': lc_filename, 'kwargs': kwargs, 'outbucket': outbucket, 'out_queue_url': out_queue_url}, outfd, pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['outfd']]
put_url = awsutils.s3_put_file('failed-periodfinding-%s.pkl' % lc_filename, outbucket, client=s3_client)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(out_queue_url, {'pfresult': put_url, 'lc_filename': lc_filename, 'kwargs': kwargs}, raiseonfail=True) # depends on [control=['if'], data=['out_queue_url']]
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done
awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True)
# delete the light curve file when we're done with it
if lc_filename is not None and os.path.exists(lc_filename):
os.remove(lc_filename) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except ClientError as e:
LOGWARNING('queues have disappeared. stopping worker loop')
break # depends on [control=['except'], data=[]]
# if there's any other exception, put a failed response into the
# output bucket and queue
except Exception as e:
LOGEXCEPTION('could not process input from queue')
if 'lc_filename' in locals():
with open('failed-periodfinding-%s.pkl' % lc_filename, 'wb') as outfd:
pickle.dump({'in_queue_url': in_queue_url, 'target': target, 'lc_filename': lc_filename, 'kwargs': kwargs, 'outbucket': outbucket, 'out_queue_url': out_queue_url}, outfd, pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['outfd']]
put_url = awsutils.s3_put_file('failed-periodfinding-%s.pkl' % lc_filename, outbucket, client=s3_client)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(out_queue_url, {'pfresult': put_url, 'lc_filename': lc_filename, 'kwargs': kwargs}, raiseonfail=True) # depends on [control=['if'], data=['out_queue_url']]
# delete the light curve file when we're done with it
if lc_filename is not None and os.path.exists(lc_filename):
os.remove(lc_filename) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done
awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
# a keyboard interrupt kills the loop
except KeyboardInterrupt:
LOGWARNING('breaking out of the processing loop.')
break # depends on [control=['except'], data=[]]
# if the queues disappear, then the producer loop is done and we should
# exit
except ClientError as e:
LOGWARNING('queues have disappeared. stopping worker loop')
break # depends on [control=['except'], data=[]]
# any other exception continues the loop we'll write the output file to
# the output S3 bucket (and any optional output queue), but add a
# failed-* prefix to it to indicate that processing failed. FIXME: could
# use a dead-letter queue for this instead
except Exception as e:
LOGEXCEPTION('could not process input from queue')
if 'lc_filename' in locals():
with open('failed-periodfinding-%s.pkl' % lc_filename, 'wb') as outfd:
pickle.dump({'in_queue_url': in_queue_url, 'target': target, 'kwargs': kwargs, 'outbucket': outbucket, 'out_queue_url': out_queue_url}, outfd, pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['outfd']]
put_url = awsutils.s3_put_file('failed-periodfinding-%s.pkl' % lc_filename, outbucket, client=s3_client)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(out_queue_url, {'cpf': put_url, 'kwargs': kwargs}, raiseonfail=True) # depends on [control=['if'], data=['out_queue_url']]
if lc_filename is not None and os.path.exists(lc_filename):
os.remove(lc_filename) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done
awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def update(self, n=1):
"""Increment current value."""
with self._lock:
self._pbar.update(n)
self.refresh() | def function[update, parameter[self, n]]:
constant[Increment current value.]
with name[self]._lock begin[:]
call[name[self]._pbar.update, parameter[name[n]]]
call[name[self].refresh, parameter[]] | keyword[def] identifier[update] ( identifier[self] , identifier[n] = literal[int] ):
literal[string]
keyword[with] identifier[self] . identifier[_lock] :
identifier[self] . identifier[_pbar] . identifier[update] ( identifier[n] )
identifier[self] . identifier[refresh] () | def update(self, n=1):
"""Increment current value."""
with self._lock:
self._pbar.update(n)
self.refresh() # depends on [control=['with'], data=[]] |
def get_all(cls):
''' Gets all available instances of this model from the database '''
redis = cls.get_redis()
return list(map(
lambda id: cls.get(id),
map(
debyte_string,
redis.smembers(cls.members_key())
)
)) | def function[get_all, parameter[cls]]:
constant[ Gets all available instances of this model from the database ]
variable[redis] assign[=] call[name[cls].get_redis, parameter[]]
return[call[name[list], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da18dc9bf70>, call[name[map], parameter[name[debyte_string], call[name[redis].smembers, parameter[call[name[cls].members_key, parameter[]]]]]]]]]]] | keyword[def] identifier[get_all] ( identifier[cls] ):
literal[string]
identifier[redis] = identifier[cls] . identifier[get_redis] ()
keyword[return] identifier[list] ( identifier[map] (
keyword[lambda] identifier[id] : identifier[cls] . identifier[get] ( identifier[id] ),
identifier[map] (
identifier[debyte_string] ,
identifier[redis] . identifier[smembers] ( identifier[cls] . identifier[members_key] ())
)
)) | def get_all(cls):
""" Gets all available instances of this model from the database """
redis = cls.get_redis()
return list(map(lambda id: cls.get(id), map(debyte_string, redis.smembers(cls.members_key())))) |
def _step99(self, in_row, tmp_row, out_row):
"""
Validates all mandatory fields are in the output row and are filled.
:param dict in_row: The input row.
:param dict tmp_row: Not used.
:param dict out_row: The output row.
"""
park_info = ''
for field in self._mandatory_fields:
if field not in out_row or not out_row[field]:
if park_info:
park_info += ' '
park_info += field
return park_info, None | def function[_step99, parameter[self, in_row, tmp_row, out_row]]:
constant[
Validates all mandatory fields are in the output row and are filled.
:param dict in_row: The input row.
:param dict tmp_row: Not used.
:param dict out_row: The output row.
]
variable[park_info] assign[=] constant[]
for taget[name[field]] in starred[name[self]._mandatory_fields] begin[:]
if <ast.BoolOp object at 0x7da18dc9a1d0> begin[:]
if name[park_info] begin[:]
<ast.AugAssign object at 0x7da18dc98eb0>
<ast.AugAssign object at 0x7da18dc98a60>
return[tuple[[<ast.Name object at 0x7da18dc99de0>, <ast.Constant object at 0x7da18dc9a800>]]] | keyword[def] identifier[_step99] ( identifier[self] , identifier[in_row] , identifier[tmp_row] , identifier[out_row] ):
literal[string]
identifier[park_info] = literal[string]
keyword[for] identifier[field] keyword[in] identifier[self] . identifier[_mandatory_fields] :
keyword[if] identifier[field] keyword[not] keyword[in] identifier[out_row] keyword[or] keyword[not] identifier[out_row] [ identifier[field] ]:
keyword[if] identifier[park_info] :
identifier[park_info] += literal[string]
identifier[park_info] += identifier[field]
keyword[return] identifier[park_info] , keyword[None] | def _step99(self, in_row, tmp_row, out_row):
"""
Validates all mandatory fields are in the output row and are filled.
:param dict in_row: The input row.
:param dict tmp_row: Not used.
:param dict out_row: The output row.
"""
park_info = ''
for field in self._mandatory_fields:
if field not in out_row or not out_row[field]:
if park_info:
park_info += ' ' # depends on [control=['if'], data=[]]
park_info += field # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
return (park_info, None) |
def _check_job_status(self, job, desc, status_key_name):
"""Check to see if the job completed successfully and, if not, construct and
raise a ValueError.
Args:
job (str): The name of the job to check.
desc (dict[str, str]): The result of ``describe_training_job()``.
status_key_name (str): Status key name to check for.
Raises:
ValueError: If the training job fails.
"""
status = desc[status_key_name]
# If the status is capital case, then convert it to Camel case
status = _STATUS_CODE_TABLE.get(status, status)
if status != 'Completed' and status != 'Stopped':
reason = desc.get('FailureReason', '(No reason provided)')
job_type = status_key_name.replace('JobStatus', ' job')
raise ValueError('Error for {} {}: {} Reason: {}'.format(job_type, job, status, reason)) | def function[_check_job_status, parameter[self, job, desc, status_key_name]]:
constant[Check to see if the job completed successfully and, if not, construct and
raise a ValueError.
Args:
job (str): The name of the job to check.
desc (dict[str, str]): The result of ``describe_training_job()``.
status_key_name (str): Status key name to check for.
Raises:
ValueError: If the training job fails.
]
variable[status] assign[=] call[name[desc]][name[status_key_name]]
variable[status] assign[=] call[name[_STATUS_CODE_TABLE].get, parameter[name[status], name[status]]]
if <ast.BoolOp object at 0x7da1b1c499c0> begin[:]
variable[reason] assign[=] call[name[desc].get, parameter[constant[FailureReason], constant[(No reason provided)]]]
variable[job_type] assign[=] call[name[status_key_name].replace, parameter[constant[JobStatus], constant[ job]]]
<ast.Raise object at 0x7da1b1c4a0b0> | keyword[def] identifier[_check_job_status] ( identifier[self] , identifier[job] , identifier[desc] , identifier[status_key_name] ):
literal[string]
identifier[status] = identifier[desc] [ identifier[status_key_name] ]
identifier[status] = identifier[_STATUS_CODE_TABLE] . identifier[get] ( identifier[status] , identifier[status] )
keyword[if] identifier[status] != literal[string] keyword[and] identifier[status] != literal[string] :
identifier[reason] = identifier[desc] . identifier[get] ( literal[string] , literal[string] )
identifier[job_type] = identifier[status_key_name] . identifier[replace] ( literal[string] , literal[string] )
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[job_type] , identifier[job] , identifier[status] , identifier[reason] )) | def _check_job_status(self, job, desc, status_key_name):
"""Check to see if the job completed successfully and, if not, construct and
raise a ValueError.
Args:
job (str): The name of the job to check.
desc (dict[str, str]): The result of ``describe_training_job()``.
status_key_name (str): Status key name to check for.
Raises:
ValueError: If the training job fails.
"""
status = desc[status_key_name]
# If the status is capital case, then convert it to Camel case
status = _STATUS_CODE_TABLE.get(status, status)
if status != 'Completed' and status != 'Stopped':
reason = desc.get('FailureReason', '(No reason provided)')
job_type = status_key_name.replace('JobStatus', ' job')
raise ValueError('Error for {} {}: {} Reason: {}'.format(job_type, job, status, reason)) # depends on [control=['if'], data=[]] |
def get_parent_folder(self):
""" Get the parent folder from attribute self.parent or
getting it from the cloud
:return: Parent Folder
:rtype: mailbox.Folder or None
"""
if self.root:
return None
if self.parent:
return self.parent
if self.parent_id:
self.parent = self.get_folder(folder_id=self.parent_id)
return self.parent | def function[get_parent_folder, parameter[self]]:
constant[ Get the parent folder from attribute self.parent or
getting it from the cloud
:return: Parent Folder
:rtype: mailbox.Folder or None
]
if name[self].root begin[:]
return[constant[None]]
if name[self].parent begin[:]
return[name[self].parent]
if name[self].parent_id begin[:]
name[self].parent assign[=] call[name[self].get_folder, parameter[]]
return[name[self].parent] | keyword[def] identifier[get_parent_folder] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[root] :
keyword[return] keyword[None]
keyword[if] identifier[self] . identifier[parent] :
keyword[return] identifier[self] . identifier[parent]
keyword[if] identifier[self] . identifier[parent_id] :
identifier[self] . identifier[parent] = identifier[self] . identifier[get_folder] ( identifier[folder_id] = identifier[self] . identifier[parent_id] )
keyword[return] identifier[self] . identifier[parent] | def get_parent_folder(self):
""" Get the parent folder from attribute self.parent or
getting it from the cloud
:return: Parent Folder
:rtype: mailbox.Folder or None
"""
if self.root:
return None # depends on [control=['if'], data=[]]
if self.parent:
return self.parent # depends on [control=['if'], data=[]]
if self.parent_id:
self.parent = self.get_folder(folder_id=self.parent_id) # depends on [control=['if'], data=[]]
return self.parent |
def map_sid2sub(self, sid, sub):
"""
Store the connection between a Session ID and a subject ID.
:param sid: Session ID
:param sub: subject ID
"""
self.set('sid2sub', sid, sub)
self.set('sub2sid', sub, sid) | def function[map_sid2sub, parameter[self, sid, sub]]:
constant[
Store the connection between a Session ID and a subject ID.
:param sid: Session ID
:param sub: subject ID
]
call[name[self].set, parameter[constant[sid2sub], name[sid], name[sub]]]
call[name[self].set, parameter[constant[sub2sid], name[sub], name[sid]]] | keyword[def] identifier[map_sid2sub] ( identifier[self] , identifier[sid] , identifier[sub] ):
literal[string]
identifier[self] . identifier[set] ( literal[string] , identifier[sid] , identifier[sub] )
identifier[self] . identifier[set] ( literal[string] , identifier[sub] , identifier[sid] ) | def map_sid2sub(self, sid, sub):
"""
Store the connection between a Session ID and a subject ID.
:param sid: Session ID
:param sub: subject ID
"""
self.set('sid2sub', sid, sub)
self.set('sub2sid', sub, sid) |
def distance_to_contact(D, alpha=1):
"""Compute contact matrix from input distance matrix. Distance values of
zeroes are given the largest contact count otherwise inferred non-zero
distance values.
"""
if callable(alpha):
distance_function = alpha
else:
try:
a = np.float64(alpha)
def distance_function(x):
return 1 / (x ** (1 / a))
except TypeError:
print("Alpha parameter must be callable or an array-like")
raise
except ZeroDivisionError:
raise ValueError("Alpha parameter must be non-zero")
m = np.max(distance_function(D[D != 0]))
M = np.zeros(D.shape)
M[D != 0] = distance_function(D[D != 0])
M[D == 0] = m
return M | def function[distance_to_contact, parameter[D, alpha]]:
constant[Compute contact matrix from input distance matrix. Distance values of
zeroes are given the largest contact count otherwise inferred non-zero
distance values.
]
if call[name[callable], parameter[name[alpha]]] begin[:]
variable[distance_function] assign[=] name[alpha]
variable[m] assign[=] call[name[np].max, parameter[call[name[distance_function], parameter[call[name[D]][compare[name[D] not_equal[!=] constant[0]]]]]]]
variable[M] assign[=] call[name[np].zeros, parameter[name[D].shape]]
call[name[M]][compare[name[D] not_equal[!=] constant[0]]] assign[=] call[name[distance_function], parameter[call[name[D]][compare[name[D] not_equal[!=] constant[0]]]]]
call[name[M]][compare[name[D] equal[==] constant[0]]] assign[=] name[m]
return[name[M]] | keyword[def] identifier[distance_to_contact] ( identifier[D] , identifier[alpha] = literal[int] ):
literal[string]
keyword[if] identifier[callable] ( identifier[alpha] ):
identifier[distance_function] = identifier[alpha]
keyword[else] :
keyword[try] :
identifier[a] = identifier[np] . identifier[float64] ( identifier[alpha] )
keyword[def] identifier[distance_function] ( identifier[x] ):
keyword[return] literal[int] /( identifier[x] **( literal[int] / identifier[a] ))
keyword[except] identifier[TypeError] :
identifier[print] ( literal[string] )
keyword[raise]
keyword[except] identifier[ZeroDivisionError] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[m] = identifier[np] . identifier[max] ( identifier[distance_function] ( identifier[D] [ identifier[D] != literal[int] ]))
identifier[M] = identifier[np] . identifier[zeros] ( identifier[D] . identifier[shape] )
identifier[M] [ identifier[D] != literal[int] ]= identifier[distance_function] ( identifier[D] [ identifier[D] != literal[int] ])
identifier[M] [ identifier[D] == literal[int] ]= identifier[m]
keyword[return] identifier[M] | def distance_to_contact(D, alpha=1):
"""Compute contact matrix from input distance matrix. Distance values of
zeroes are given the largest contact count otherwise inferred non-zero
distance values.
"""
if callable(alpha):
distance_function = alpha # depends on [control=['if'], data=[]]
else:
try:
a = np.float64(alpha)
def distance_function(x):
return 1 / x ** (1 / a) # depends on [control=['try'], data=[]]
except TypeError:
print('Alpha parameter must be callable or an array-like')
raise # depends on [control=['except'], data=[]]
except ZeroDivisionError:
raise ValueError('Alpha parameter must be non-zero') # depends on [control=['except'], data=[]]
m = np.max(distance_function(D[D != 0]))
M = np.zeros(D.shape)
M[D != 0] = distance_function(D[D != 0])
M[D == 0] = m
return M |
def drawQuad(self, img=None, quad=None, thickness=30):
'''
Draw the quad into given img
'''
if img is None:
img = self.img
if quad is None:
quad = self.quad
q = np.int32(quad)
c = int(img.max())
cv2.line(img, tuple(q[0]), tuple(q[1]), c, thickness)
cv2.line(img, tuple(q[1]), tuple(q[2]), c, thickness)
cv2.line(img, tuple(q[2]), tuple(q[3]), c, thickness)
cv2.line(img, tuple(q[3]), tuple(q[0]), c, thickness)
return img | def function[drawQuad, parameter[self, img, quad, thickness]]:
constant[
Draw the quad into given img
]
if compare[name[img] is constant[None]] begin[:]
variable[img] assign[=] name[self].img
if compare[name[quad] is constant[None]] begin[:]
variable[quad] assign[=] name[self].quad
variable[q] assign[=] call[name[np].int32, parameter[name[quad]]]
variable[c] assign[=] call[name[int], parameter[call[name[img].max, parameter[]]]]
call[name[cv2].line, parameter[name[img], call[name[tuple], parameter[call[name[q]][constant[0]]]], call[name[tuple], parameter[call[name[q]][constant[1]]]], name[c], name[thickness]]]
call[name[cv2].line, parameter[name[img], call[name[tuple], parameter[call[name[q]][constant[1]]]], call[name[tuple], parameter[call[name[q]][constant[2]]]], name[c], name[thickness]]]
call[name[cv2].line, parameter[name[img], call[name[tuple], parameter[call[name[q]][constant[2]]]], call[name[tuple], parameter[call[name[q]][constant[3]]]], name[c], name[thickness]]]
call[name[cv2].line, parameter[name[img], call[name[tuple], parameter[call[name[q]][constant[3]]]], call[name[tuple], parameter[call[name[q]][constant[0]]]], name[c], name[thickness]]]
return[name[img]] | keyword[def] identifier[drawQuad] ( identifier[self] , identifier[img] = keyword[None] , identifier[quad] = keyword[None] , identifier[thickness] = literal[int] ):
literal[string]
keyword[if] identifier[img] keyword[is] keyword[None] :
identifier[img] = identifier[self] . identifier[img]
keyword[if] identifier[quad] keyword[is] keyword[None] :
identifier[quad] = identifier[self] . identifier[quad]
identifier[q] = identifier[np] . identifier[int32] ( identifier[quad] )
identifier[c] = identifier[int] ( identifier[img] . identifier[max] ())
identifier[cv2] . identifier[line] ( identifier[img] , identifier[tuple] ( identifier[q] [ literal[int] ]), identifier[tuple] ( identifier[q] [ literal[int] ]), identifier[c] , identifier[thickness] )
identifier[cv2] . identifier[line] ( identifier[img] , identifier[tuple] ( identifier[q] [ literal[int] ]), identifier[tuple] ( identifier[q] [ literal[int] ]), identifier[c] , identifier[thickness] )
identifier[cv2] . identifier[line] ( identifier[img] , identifier[tuple] ( identifier[q] [ literal[int] ]), identifier[tuple] ( identifier[q] [ literal[int] ]), identifier[c] , identifier[thickness] )
identifier[cv2] . identifier[line] ( identifier[img] , identifier[tuple] ( identifier[q] [ literal[int] ]), identifier[tuple] ( identifier[q] [ literal[int] ]), identifier[c] , identifier[thickness] )
keyword[return] identifier[img] | def drawQuad(self, img=None, quad=None, thickness=30):
"""
Draw the quad into given img
"""
if img is None:
img = self.img # depends on [control=['if'], data=['img']]
if quad is None:
quad = self.quad # depends on [control=['if'], data=['quad']]
q = np.int32(quad)
c = int(img.max())
cv2.line(img, tuple(q[0]), tuple(q[1]), c, thickness)
cv2.line(img, tuple(q[1]), tuple(q[2]), c, thickness)
cv2.line(img, tuple(q[2]), tuple(q[3]), c, thickness)
cv2.line(img, tuple(q[3]), tuple(q[0]), c, thickness)
return img |
def _ScheduleGenericHunt(hunt_obj):
"""Adds foreman rules for a generic hunt."""
# TODO: Migrate foreman conditions to use relation expiration
# durations instead of absolute timestamps.
foreman_condition = foreman_rules.ForemanCondition(
creation_time=rdfvalue.RDFDatetime.Now(),
expiration_time=hunt_obj.init_start_time + hunt_obj.duration,
description="Hunt %s %s" % (hunt_obj.hunt_id, hunt_obj.args.hunt_type),
client_rule_set=hunt_obj.client_rule_set,
hunt_id=hunt_obj.hunt_id)
# Make sure the rule makes sense.
foreman_condition.Validate()
data_store.REL_DB.WriteForemanRule(foreman_condition) | def function[_ScheduleGenericHunt, parameter[hunt_obj]]:
constant[Adds foreman rules for a generic hunt.]
variable[foreman_condition] assign[=] call[name[foreman_rules].ForemanCondition, parameter[]]
call[name[foreman_condition].Validate, parameter[]]
call[name[data_store].REL_DB.WriteForemanRule, parameter[name[foreman_condition]]] | keyword[def] identifier[_ScheduleGenericHunt] ( identifier[hunt_obj] ):
literal[string]
identifier[foreman_condition] = identifier[foreman_rules] . identifier[ForemanCondition] (
identifier[creation_time] = identifier[rdfvalue] . identifier[RDFDatetime] . identifier[Now] (),
identifier[expiration_time] = identifier[hunt_obj] . identifier[init_start_time] + identifier[hunt_obj] . identifier[duration] ,
identifier[description] = literal[string] %( identifier[hunt_obj] . identifier[hunt_id] , identifier[hunt_obj] . identifier[args] . identifier[hunt_type] ),
identifier[client_rule_set] = identifier[hunt_obj] . identifier[client_rule_set] ,
identifier[hunt_id] = identifier[hunt_obj] . identifier[hunt_id] )
identifier[foreman_condition] . identifier[Validate] ()
identifier[data_store] . identifier[REL_DB] . identifier[WriteForemanRule] ( identifier[foreman_condition] ) | def _ScheduleGenericHunt(hunt_obj):
"""Adds foreman rules for a generic hunt."""
# TODO: Migrate foreman conditions to use relation expiration
# durations instead of absolute timestamps.
foreman_condition = foreman_rules.ForemanCondition(creation_time=rdfvalue.RDFDatetime.Now(), expiration_time=hunt_obj.init_start_time + hunt_obj.duration, description='Hunt %s %s' % (hunt_obj.hunt_id, hunt_obj.args.hunt_type), client_rule_set=hunt_obj.client_rule_set, hunt_id=hunt_obj.hunt_id)
# Make sure the rule makes sense.
foreman_condition.Validate()
data_store.REL_DB.WriteForemanRule(foreman_condition) |
def _populate(self):
"""
**Purpose**: Populate the ResourceManager class with the validated
resource description
"""
if self._validated:
self._prof.prof('populating rmgr', uid=self._uid)
self._logger.debug('Populating resource manager object')
self._resource = self._resource_desc['resource']
self._walltime = self._resource_desc['walltime']
self._cpus = self._resource_desc['cpus']
self._gpus = self._resource_desc.get('gpus', 0)
self._project = self._resource_desc.get('project', None)
self._access_schema = self._resource_desc.get('access_schema', None)
self._queue = self._resource_desc.get('queue', None)
self._logger.debug('Resource manager population successful')
self._prof.prof('rmgr populated', uid=self._uid)
else:
raise EnTKError('Resource description not validated') | def function[_populate, parameter[self]]:
constant[
**Purpose**: Populate the ResourceManager class with the validated
resource description
]
if name[self]._validated begin[:]
call[name[self]._prof.prof, parameter[constant[populating rmgr]]]
call[name[self]._logger.debug, parameter[constant[Populating resource manager object]]]
name[self]._resource assign[=] call[name[self]._resource_desc][constant[resource]]
name[self]._walltime assign[=] call[name[self]._resource_desc][constant[walltime]]
name[self]._cpus assign[=] call[name[self]._resource_desc][constant[cpus]]
name[self]._gpus assign[=] call[name[self]._resource_desc.get, parameter[constant[gpus], constant[0]]]
name[self]._project assign[=] call[name[self]._resource_desc.get, parameter[constant[project], constant[None]]]
name[self]._access_schema assign[=] call[name[self]._resource_desc.get, parameter[constant[access_schema], constant[None]]]
name[self]._queue assign[=] call[name[self]._resource_desc.get, parameter[constant[queue], constant[None]]]
call[name[self]._logger.debug, parameter[constant[Resource manager population successful]]]
call[name[self]._prof.prof, parameter[constant[rmgr populated]]] | keyword[def] identifier[_populate] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_validated] :
identifier[self] . identifier[_prof] . identifier[prof] ( literal[string] , identifier[uid] = identifier[self] . identifier[_uid] )
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_resource] = identifier[self] . identifier[_resource_desc] [ literal[string] ]
identifier[self] . identifier[_walltime] = identifier[self] . identifier[_resource_desc] [ literal[string] ]
identifier[self] . identifier[_cpus] = identifier[self] . identifier[_resource_desc] [ literal[string] ]
identifier[self] . identifier[_gpus] = identifier[self] . identifier[_resource_desc] . identifier[get] ( literal[string] , literal[int] )
identifier[self] . identifier[_project] = identifier[self] . identifier[_resource_desc] . identifier[get] ( literal[string] , keyword[None] )
identifier[self] . identifier[_access_schema] = identifier[self] . identifier[_resource_desc] . identifier[get] ( literal[string] , keyword[None] )
identifier[self] . identifier[_queue] = identifier[self] . identifier[_resource_desc] . identifier[get] ( literal[string] , keyword[None] )
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_prof] . identifier[prof] ( literal[string] , identifier[uid] = identifier[self] . identifier[_uid] )
keyword[else] :
keyword[raise] identifier[EnTKError] ( literal[string] ) | def _populate(self):
"""
**Purpose**: Populate the ResourceManager class with the validated
resource description
"""
if self._validated:
self._prof.prof('populating rmgr', uid=self._uid)
self._logger.debug('Populating resource manager object')
self._resource = self._resource_desc['resource']
self._walltime = self._resource_desc['walltime']
self._cpus = self._resource_desc['cpus']
self._gpus = self._resource_desc.get('gpus', 0)
self._project = self._resource_desc.get('project', None)
self._access_schema = self._resource_desc.get('access_schema', None)
self._queue = self._resource_desc.get('queue', None)
self._logger.debug('Resource manager population successful')
self._prof.prof('rmgr populated', uid=self._uid) # depends on [control=['if'], data=[]]
else:
raise EnTKError('Resource description not validated') |
def decode(model_path_prefix: Union[str, Path],
input_paths: Sequence[Path],
label_set: Set[str],
*,
feature_type: str = "fbank", #TODO Make this None and infer feature_type from dimension of NN input layer.
batch_size: int = 64,
feat_dir: Optional[Path]=None,
batch_x_name: str="batch_x:0",
batch_x_lens_name: str="batch_x_lens:0",
output_name: str="hyp_dense_decoded:0") -> List[List[str]]:
"""Use an existing tensorflow model that exists on disk to decode
WAV files.
Args:
model_path_prefix: The path to the saved tensorflow model.
This is the full prefix to the ".ckpt" file.
input_paths: A sequence of `pathlib.Path`s to WAV files to put through
the model provided.
label_set: The set of all the labels this model uses.
feature_type: The type of features this model uses.
Note that this MUST match the type of features that the
model was trained on initially.
feat_dir: Any files that require preprocessing will be
saved to the path specified by this.
batch_x_name: The name of the tensorflow input for batch_x
batch_x_lens_name: The name of the tensorflow input for batch_x_lens
output_name: The name of the tensorflow output
"""
if not input_paths:
raise PersephoneException("No untranscribed WAVs to transcribe.")
model_path_prefix = str(model_path_prefix)
for p in input_paths:
if not p.exists():
raise PersephoneException(
"The WAV file path {} does not exist".format(p)
)
preprocessed_file_paths = []
for p in input_paths:
prefix = p.stem
# Check the "feat" directory as per the filesystem conventions of a Corpus
feature_file_ext = ".{}.npy".format(feature_type)
conventional_npy_location = p.parent.parent / "feat" / (Path(prefix + feature_file_ext))
if conventional_npy_location.exists():
# don't need to preprocess it
preprocessed_file_paths.append(conventional_npy_location)
else:
if not feat_dir:
feat_dir = p.parent.parent / "feat"
if not feat_dir.is_dir():
os.makedirs(str(feat_dir))
mono16k_wav_path = feat_dir / "{}.wav".format(prefix)
feat_path = feat_dir / "{}.{}.npy".format(prefix, feature_type)
feat_extract.convert_wav(p, mono16k_wav_path)
preprocessed_file_paths.append(feat_path)
# preprocess the file that weren't found in the features directory
# as per the filesystem conventions
if feat_dir:
feat_extract.from_dir(feat_dir, feature_type)
fn_batches = utils.make_batches(preprocessed_file_paths, batch_size)
# Load the model and perform decoding.
metagraph = load_metagraph(model_path_prefix)
with tf.Session() as sess:
metagraph.restore(sess, model_path_prefix)
for fn_batch in fn_batches:
batch_x, batch_x_lens = utils.load_batch_x(fn_batch)
# TODO These placeholder names should be a backup if names from a newer
# naming scheme aren't present. Otherwise this won't generalize to
# different architectures.
feed_dict = {batch_x_name: batch_x,
batch_x_lens_name: batch_x_lens}
dense_decoded = sess.run(output_name, feed_dict=feed_dict)
# Create a human-readable representation of the decoded.
indices_to_labels = labels.make_indices_to_labels(label_set)
human_readable = dense_to_human_readable(dense_decoded, indices_to_labels)
return human_readable | def function[decode, parameter[model_path_prefix, input_paths, label_set]]:
constant[Use an existing tensorflow model that exists on disk to decode
WAV files.
Args:
model_path_prefix: The path to the saved tensorflow model.
This is the full prefix to the ".ckpt" file.
input_paths: A sequence of `pathlib.Path`s to WAV files to put through
the model provided.
label_set: The set of all the labels this model uses.
feature_type: The type of features this model uses.
Note that this MUST match the type of features that the
model was trained on initially.
feat_dir: Any files that require preprocessing will be
saved to the path specified by this.
batch_x_name: The name of the tensorflow input for batch_x
batch_x_lens_name: The name of the tensorflow input for batch_x_lens
output_name: The name of the tensorflow output
]
if <ast.UnaryOp object at 0x7da1b11faf20> begin[:]
<ast.Raise object at 0x7da1b11faf50>
variable[model_path_prefix] assign[=] call[name[str], parameter[name[model_path_prefix]]]
for taget[name[p]] in starred[name[input_paths]] begin[:]
if <ast.UnaryOp object at 0x7da1b11fad10> begin[:]
<ast.Raise object at 0x7da1b11facb0>
variable[preprocessed_file_paths] assign[=] list[[]]
for taget[name[p]] in starred[name[input_paths]] begin[:]
variable[prefix] assign[=] name[p].stem
variable[feature_file_ext] assign[=] call[constant[.{}.npy].format, parameter[name[feature_type]]]
variable[conventional_npy_location] assign[=] binary_operation[binary_operation[name[p].parent.parent / constant[feat]] / call[name[Path], parameter[binary_operation[name[prefix] + name[feature_file_ext]]]]]
if call[name[conventional_npy_location].exists, parameter[]] begin[:]
call[name[preprocessed_file_paths].append, parameter[name[conventional_npy_location]]]
if name[feat_dir] begin[:]
call[name[feat_extract].from_dir, parameter[name[feat_dir], name[feature_type]]]
variable[fn_batches] assign[=] call[name[utils].make_batches, parameter[name[preprocessed_file_paths], name[batch_size]]]
variable[metagraph] assign[=] call[name[load_metagraph], parameter[name[model_path_prefix]]]
with call[name[tf].Session, parameter[]] begin[:]
call[name[metagraph].restore, parameter[name[sess], name[model_path_prefix]]]
for taget[name[fn_batch]] in starred[name[fn_batches]] begin[:]
<ast.Tuple object at 0x7da1b11fb7c0> assign[=] call[name[utils].load_batch_x, parameter[name[fn_batch]]]
variable[feed_dict] assign[=] dictionary[[<ast.Name object at 0x7da1b11fb2b0>, <ast.Name object at 0x7da1b11fb3d0>], [<ast.Name object at 0x7da1b11fb340>, <ast.Name object at 0x7da1b11fb310>]]
variable[dense_decoded] assign[=] call[name[sess].run, parameter[name[output_name]]]
variable[indices_to_labels] assign[=] call[name[labels].make_indices_to_labels, parameter[name[label_set]]]
variable[human_readable] assign[=] call[name[dense_to_human_readable], parameter[name[dense_decoded], name[indices_to_labels]]]
return[name[human_readable]] | keyword[def] identifier[decode] ( identifier[model_path_prefix] : identifier[Union] [ identifier[str] , identifier[Path] ],
identifier[input_paths] : identifier[Sequence] [ identifier[Path] ],
identifier[label_set] : identifier[Set] [ identifier[str] ],
*,
identifier[feature_type] : identifier[str] = literal[string] ,
identifier[batch_size] : identifier[int] = literal[int] ,
identifier[feat_dir] : identifier[Optional] [ identifier[Path] ]= keyword[None] ,
identifier[batch_x_name] : identifier[str] = literal[string] ,
identifier[batch_x_lens_name] : identifier[str] = literal[string] ,
identifier[output_name] : identifier[str] = literal[string] )-> identifier[List] [ identifier[List] [ identifier[str] ]]:
literal[string]
keyword[if] keyword[not] identifier[input_paths] :
keyword[raise] identifier[PersephoneException] ( literal[string] )
identifier[model_path_prefix] = identifier[str] ( identifier[model_path_prefix] )
keyword[for] identifier[p] keyword[in] identifier[input_paths] :
keyword[if] keyword[not] identifier[p] . identifier[exists] ():
keyword[raise] identifier[PersephoneException] (
literal[string] . identifier[format] ( identifier[p] )
)
identifier[preprocessed_file_paths] =[]
keyword[for] identifier[p] keyword[in] identifier[input_paths] :
identifier[prefix] = identifier[p] . identifier[stem]
identifier[feature_file_ext] = literal[string] . identifier[format] ( identifier[feature_type] )
identifier[conventional_npy_location] = identifier[p] . identifier[parent] . identifier[parent] / literal[string] /( identifier[Path] ( identifier[prefix] + identifier[feature_file_ext] ))
keyword[if] identifier[conventional_npy_location] . identifier[exists] ():
identifier[preprocessed_file_paths] . identifier[append] ( identifier[conventional_npy_location] )
keyword[else] :
keyword[if] keyword[not] identifier[feat_dir] :
identifier[feat_dir] = identifier[p] . identifier[parent] . identifier[parent] / literal[string]
keyword[if] keyword[not] identifier[feat_dir] . identifier[is_dir] ():
identifier[os] . identifier[makedirs] ( identifier[str] ( identifier[feat_dir] ))
identifier[mono16k_wav_path] = identifier[feat_dir] / literal[string] . identifier[format] ( identifier[prefix] )
identifier[feat_path] = identifier[feat_dir] / literal[string] . identifier[format] ( identifier[prefix] , identifier[feature_type] )
identifier[feat_extract] . identifier[convert_wav] ( identifier[p] , identifier[mono16k_wav_path] )
identifier[preprocessed_file_paths] . identifier[append] ( identifier[feat_path] )
keyword[if] identifier[feat_dir] :
identifier[feat_extract] . identifier[from_dir] ( identifier[feat_dir] , identifier[feature_type] )
identifier[fn_batches] = identifier[utils] . identifier[make_batches] ( identifier[preprocessed_file_paths] , identifier[batch_size] )
identifier[metagraph] = identifier[load_metagraph] ( identifier[model_path_prefix] )
keyword[with] identifier[tf] . identifier[Session] () keyword[as] identifier[sess] :
identifier[metagraph] . identifier[restore] ( identifier[sess] , identifier[model_path_prefix] )
keyword[for] identifier[fn_batch] keyword[in] identifier[fn_batches] :
identifier[batch_x] , identifier[batch_x_lens] = identifier[utils] . identifier[load_batch_x] ( identifier[fn_batch] )
identifier[feed_dict] ={ identifier[batch_x_name] : identifier[batch_x] ,
identifier[batch_x_lens_name] : identifier[batch_x_lens] }
identifier[dense_decoded] = identifier[sess] . identifier[run] ( identifier[output_name] , identifier[feed_dict] = identifier[feed_dict] )
identifier[indices_to_labels] = identifier[labels] . identifier[make_indices_to_labels] ( identifier[label_set] )
identifier[human_readable] = identifier[dense_to_human_readable] ( identifier[dense_decoded] , identifier[indices_to_labels] )
keyword[return] identifier[human_readable] | def decode(model_path_prefix: Union[str, Path], input_paths: Sequence[Path], label_set: Set[str], *, feature_type: str='fbank', batch_size: int=64, feat_dir: Optional[Path]=None, batch_x_name: str='batch_x:0', batch_x_lens_name: str='batch_x_lens:0', output_name: str='hyp_dense_decoded:0') -> List[List[str]]: #TODO Make this None and infer feature_type from dimension of NN input layer.
'Use an existing tensorflow model that exists on disk to decode\n WAV files.\n\n Args:\n model_path_prefix: The path to the saved tensorflow model.\n This is the full prefix to the ".ckpt" file.\n input_paths: A sequence of `pathlib.Path`s to WAV files to put through\n the model provided.\n label_set: The set of all the labels this model uses.\n feature_type: The type of features this model uses.\n Note that this MUST match the type of features that the\n model was trained on initially.\n feat_dir: Any files that require preprocessing will be\n saved to the path specified by this.\n batch_x_name: The name of the tensorflow input for batch_x\n batch_x_lens_name: The name of the tensorflow input for batch_x_lens\n output_name: The name of the tensorflow output\n '
if not input_paths:
raise PersephoneException('No untranscribed WAVs to transcribe.') # depends on [control=['if'], data=[]]
model_path_prefix = str(model_path_prefix)
for p in input_paths:
if not p.exists():
raise PersephoneException('The WAV file path {} does not exist'.format(p)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']]
preprocessed_file_paths = []
for p in input_paths:
prefix = p.stem
# Check the "feat" directory as per the filesystem conventions of a Corpus
feature_file_ext = '.{}.npy'.format(feature_type)
conventional_npy_location = p.parent.parent / 'feat' / Path(prefix + feature_file_ext)
if conventional_npy_location.exists():
# don't need to preprocess it
preprocessed_file_paths.append(conventional_npy_location) # depends on [control=['if'], data=[]]
else:
if not feat_dir:
feat_dir = p.parent.parent / 'feat' # depends on [control=['if'], data=[]]
if not feat_dir.is_dir():
os.makedirs(str(feat_dir)) # depends on [control=['if'], data=[]]
mono16k_wav_path = feat_dir / '{}.wav'.format(prefix)
feat_path = feat_dir / '{}.{}.npy'.format(prefix, feature_type)
feat_extract.convert_wav(p, mono16k_wav_path)
preprocessed_file_paths.append(feat_path) # depends on [control=['for'], data=['p']]
# preprocess the file that weren't found in the features directory
# as per the filesystem conventions
if feat_dir:
feat_extract.from_dir(feat_dir, feature_type) # depends on [control=['if'], data=[]]
fn_batches = utils.make_batches(preprocessed_file_paths, batch_size)
# Load the model and perform decoding.
metagraph = load_metagraph(model_path_prefix)
with tf.Session() as sess:
metagraph.restore(sess, model_path_prefix)
for fn_batch in fn_batches:
(batch_x, batch_x_lens) = utils.load_batch_x(fn_batch) # depends on [control=['for'], data=['fn_batch']]
# TODO These placeholder names should be a backup if names from a newer
# naming scheme aren't present. Otherwise this won't generalize to
# different architectures.
feed_dict = {batch_x_name: batch_x, batch_x_lens_name: batch_x_lens}
dense_decoded = sess.run(output_name, feed_dict=feed_dict) # depends on [control=['with'], data=['sess']]
# Create a human-readable representation of the decoded.
indices_to_labels = labels.make_indices_to_labels(label_set)
human_readable = dense_to_human_readable(dense_decoded, indices_to_labels)
return human_readable |
def get_primary_key(self, table):
"""Retrieve the column which is the primary key for a table."""
for column in self.get_schema(table):
if len(column) > 3 and 'pri' in column[3].lower():
return column[0] | def function[get_primary_key, parameter[self, table]]:
constant[Retrieve the column which is the primary key for a table.]
for taget[name[column]] in starred[call[name[self].get_schema, parameter[name[table]]]] begin[:]
if <ast.BoolOp object at 0x7da1b0bd23e0> begin[:]
return[call[name[column]][constant[0]]] | keyword[def] identifier[get_primary_key] ( identifier[self] , identifier[table] ):
literal[string]
keyword[for] identifier[column] keyword[in] identifier[self] . identifier[get_schema] ( identifier[table] ):
keyword[if] identifier[len] ( identifier[column] )> literal[int] keyword[and] literal[string] keyword[in] identifier[column] [ literal[int] ]. identifier[lower] ():
keyword[return] identifier[column] [ literal[int] ] | def get_primary_key(self, table):
"""Retrieve the column which is the primary key for a table."""
for column in self.get_schema(table):
if len(column) > 3 and 'pri' in column[3].lower():
return column[0] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['column']] |
def group(ctx, project, group): # pylint:disable=redefined-outer-name
"""Commands for experiment groups."""
ctx.obj = ctx.obj or {}
ctx.obj['project'] = project
ctx.obj['group'] = group | def function[group, parameter[ctx, project, group]]:
constant[Commands for experiment groups.]
name[ctx].obj assign[=] <ast.BoolOp object at 0x7da1aff1e4d0>
call[name[ctx].obj][constant[project]] assign[=] name[project]
call[name[ctx].obj][constant[group]] assign[=] name[group] | keyword[def] identifier[group] ( identifier[ctx] , identifier[project] , identifier[group] ):
literal[string]
identifier[ctx] . identifier[obj] = identifier[ctx] . identifier[obj] keyword[or] {}
identifier[ctx] . identifier[obj] [ literal[string] ]= identifier[project]
identifier[ctx] . identifier[obj] [ literal[string] ]= identifier[group] | def group(ctx, project, group): # pylint:disable=redefined-outer-name
'Commands for experiment groups.'
ctx.obj = ctx.obj or {}
ctx.obj['project'] = project
ctx.obj['group'] = group |
def del_all_svc_comments(self, service):
"""Delete all service comments
Format of the line that triggers function call::
DEL_ALL_SVC_COMMENTS;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
comments = list(service.comments.keys())
for uuid in comments:
service.del_comment(uuid)
self.send_an_element(service.get_update_status_brok()) | def function[del_all_svc_comments, parameter[self, service]]:
constant[Delete all service comments
Format of the line that triggers function call::
DEL_ALL_SVC_COMMENTS;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
]
variable[comments] assign[=] call[name[list], parameter[call[name[service].comments.keys, parameter[]]]]
for taget[name[uuid]] in starred[name[comments]] begin[:]
call[name[service].del_comment, parameter[name[uuid]]]
call[name[self].send_an_element, parameter[call[name[service].get_update_status_brok, parameter[]]]] | keyword[def] identifier[del_all_svc_comments] ( identifier[self] , identifier[service] ):
literal[string]
identifier[comments] = identifier[list] ( identifier[service] . identifier[comments] . identifier[keys] ())
keyword[for] identifier[uuid] keyword[in] identifier[comments] :
identifier[service] . identifier[del_comment] ( identifier[uuid] )
identifier[self] . identifier[send_an_element] ( identifier[service] . identifier[get_update_status_brok] ()) | def del_all_svc_comments(self, service):
"""Delete all service comments
Format of the line that triggers function call::
DEL_ALL_SVC_COMMENTS;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
comments = list(service.comments.keys())
for uuid in comments:
service.del_comment(uuid) # depends on [control=['for'], data=['uuid']]
self.send_an_element(service.get_update_status_brok()) |
def declare_selfvars(self):
"""
A block to declare self variables
"""
self._dictErr = {
'inputDirFail' : {
'action' : 'trying to check on the input directory, ',
'error' : 'directory not found. This is a *required* input',
'exitCode' : 1},
'inputReadCallback' : {
'action' : 'checking on the status of the inputReadCallback return, ',
'error' : 'no boolean "status" was found. This is a *required* return key',
'exitCode' : 2},
'analysisCallback' : {
'action' : 'checking on the status of the analysisCallback return, ',
'error' : 'no boolean "status" was found. This is a *required* return key',
'exitCode' : 3},
'outputWriteCallback' : {
'action' : 'checking on the status of the outputWriteCallback return, ',
'error' : 'no boolean "status" was found. This is a *required* return key',
'exitCode' : 4}
}
#
# Object desc block
#
self.str_desc = ''
self.__name__ = "pftree"
self.str_version = "2.0.0"
# Object containing this class
self.within = None
# Thread number
self.numThreads = 1
# Directory and filenames
self.str_inputDir = ''
self.str_inputFile = ''
self.str_outputDir = ''
self.d_inputTree = {}
self.d_inputTreeCallback = {}
self.d_outputTree = {}
self.str_outputLeafDir = ''
self.maxdepth = -1
# Flags
self.b_persistAnalysisResults = False
self.b_relativeDir = False
self.b_stats = False
self.b_statsReverse = False
self.b_jsonStats = False
self.b_json = False
self.b_test = False
self.b_followLinks = False
self.str_sleepLength = ''
self.f_sleepLength = 0.0
self.testType = 0
self.dp = None
self.log = None
self.tic_start = 0.0
self.pp = pprint.PrettyPrinter(indent=4)
self.verbosityLevel = 1 | def function[declare_selfvars, parameter[self]]:
constant[
A block to declare self variables
]
name[self]._dictErr assign[=] dictionary[[<ast.Constant object at 0x7da2054a4d90>, <ast.Constant object at 0x7da2054a61a0>, <ast.Constant object at 0x7da2054a5ea0>, <ast.Constant object at 0x7da2054a4070>], [<ast.Dict object at 0x7da2054a40d0>, <ast.Dict object at 0x7da2054a4760>, <ast.Dict object at 0x7da2054a7d90>, <ast.Dict object at 0x7da2054a6770>]]
name[self].str_desc assign[=] constant[]
name[self].__name__ assign[=] constant[pftree]
name[self].str_version assign[=] constant[2.0.0]
name[self].within assign[=] constant[None]
name[self].numThreads assign[=] constant[1]
name[self].str_inputDir assign[=] constant[]
name[self].str_inputFile assign[=] constant[]
name[self].str_outputDir assign[=] constant[]
name[self].d_inputTree assign[=] dictionary[[], []]
name[self].d_inputTreeCallback assign[=] dictionary[[], []]
name[self].d_outputTree assign[=] dictionary[[], []]
name[self].str_outputLeafDir assign[=] constant[]
name[self].maxdepth assign[=] <ast.UnaryOp object at 0x7da20cabedd0>
name[self].b_persistAnalysisResults assign[=] constant[False]
name[self].b_relativeDir assign[=] constant[False]
name[self].b_stats assign[=] constant[False]
name[self].b_statsReverse assign[=] constant[False]
name[self].b_jsonStats assign[=] constant[False]
name[self].b_json assign[=] constant[False]
name[self].b_test assign[=] constant[False]
name[self].b_followLinks assign[=] constant[False]
name[self].str_sleepLength assign[=] constant[]
name[self].f_sleepLength assign[=] constant[0.0]
name[self].testType assign[=] constant[0]
name[self].dp assign[=] constant[None]
name[self].log assign[=] constant[None]
name[self].tic_start assign[=] constant[0.0]
name[self].pp assign[=] call[name[pprint].PrettyPrinter, parameter[]]
name[self].verbosityLevel assign[=] constant[1] | keyword[def] identifier[declare_selfvars] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_dictErr] ={
literal[string] :{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[int] },
literal[string] :{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[int] },
literal[string] :{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[int] },
literal[string] :{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[int] }
}
identifier[self] . identifier[str_desc] = literal[string]
identifier[self] . identifier[__name__] = literal[string]
identifier[self] . identifier[str_version] = literal[string]
identifier[self] . identifier[within] = keyword[None]
identifier[self] . identifier[numThreads] = literal[int]
identifier[self] . identifier[str_inputDir] = literal[string]
identifier[self] . identifier[str_inputFile] = literal[string]
identifier[self] . identifier[str_outputDir] = literal[string]
identifier[self] . identifier[d_inputTree] ={}
identifier[self] . identifier[d_inputTreeCallback] ={}
identifier[self] . identifier[d_outputTree] ={}
identifier[self] . identifier[str_outputLeafDir] = literal[string]
identifier[self] . identifier[maxdepth] =- literal[int]
identifier[self] . identifier[b_persistAnalysisResults] = keyword[False]
identifier[self] . identifier[b_relativeDir] = keyword[False]
identifier[self] . identifier[b_stats] = keyword[False]
identifier[self] . identifier[b_statsReverse] = keyword[False]
identifier[self] . identifier[b_jsonStats] = keyword[False]
identifier[self] . identifier[b_json] = keyword[False]
identifier[self] . identifier[b_test] = keyword[False]
identifier[self] . identifier[b_followLinks] = keyword[False]
identifier[self] . identifier[str_sleepLength] = literal[string]
identifier[self] . identifier[f_sleepLength] = literal[int]
identifier[self] . identifier[testType] = literal[int]
identifier[self] . identifier[dp] = keyword[None]
identifier[self] . identifier[log] = keyword[None]
identifier[self] . identifier[tic_start] = literal[int]
identifier[self] . identifier[pp] = identifier[pprint] . identifier[PrettyPrinter] ( identifier[indent] = literal[int] )
identifier[self] . identifier[verbosityLevel] = literal[int] | def declare_selfvars(self):
"""
A block to declare self variables
"""
self._dictErr = {'inputDirFail': {'action': 'trying to check on the input directory, ', 'error': 'directory not found. This is a *required* input', 'exitCode': 1}, 'inputReadCallback': {'action': 'checking on the status of the inputReadCallback return, ', 'error': 'no boolean "status" was found. This is a *required* return key', 'exitCode': 2}, 'analysisCallback': {'action': 'checking on the status of the analysisCallback return, ', 'error': 'no boolean "status" was found. This is a *required* return key', 'exitCode': 3}, 'outputWriteCallback': {'action': 'checking on the status of the outputWriteCallback return, ', 'error': 'no boolean "status" was found. This is a *required* return key', 'exitCode': 4}}
#
# Object desc block
#
self.str_desc = ''
self.__name__ = 'pftree'
self.str_version = '2.0.0'
# Object containing this class
self.within = None
# Thread number
self.numThreads = 1
# Directory and filenames
self.str_inputDir = ''
self.str_inputFile = ''
self.str_outputDir = ''
self.d_inputTree = {}
self.d_inputTreeCallback = {}
self.d_outputTree = {}
self.str_outputLeafDir = ''
self.maxdepth = -1
# Flags
self.b_persistAnalysisResults = False
self.b_relativeDir = False
self.b_stats = False
self.b_statsReverse = False
self.b_jsonStats = False
self.b_json = False
self.b_test = False
self.b_followLinks = False
self.str_sleepLength = ''
self.f_sleepLength = 0.0
self.testType = 0
self.dp = None
self.log = None
self.tic_start = 0.0
self.pp = pprint.PrettyPrinter(indent=4)
self.verbosityLevel = 1 |
def get_audits():
"""Get OS hardening security limits audits.
:returns: dictionary of audits
"""
audits = []
settings = utils.get_settings('os')
# Ensure that the /etc/security/limits.d directory is only writable
# by the root user, but others can execute and read.
audits.append(DirectoryPermissionAudit('/etc/security/limits.d',
user='root', group='root',
mode=0o755))
# If core dumps are not enabled, then don't allow core dumps to be
# created as they may contain sensitive information.
if not settings['security']['kernel_enable_core_dump']:
audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf',
SecurityLimitsContext(),
template_dir=TEMPLATES_DIR,
user='root', group='root', mode=0o0440))
return audits | def function[get_audits, parameter[]]:
constant[Get OS hardening security limits audits.
:returns: dictionary of audits
]
variable[audits] assign[=] list[[]]
variable[settings] assign[=] call[name[utils].get_settings, parameter[constant[os]]]
call[name[audits].append, parameter[call[name[DirectoryPermissionAudit], parameter[constant[/etc/security/limits.d]]]]]
if <ast.UnaryOp object at 0x7da18bc73a60> begin[:]
call[name[audits].append, parameter[call[name[TemplatedFile], parameter[constant[/etc/security/limits.d/10.hardcore.conf], call[name[SecurityLimitsContext], parameter[]]]]]]
return[name[audits]] | keyword[def] identifier[get_audits] ():
literal[string]
identifier[audits] =[]
identifier[settings] = identifier[utils] . identifier[get_settings] ( literal[string] )
identifier[audits] . identifier[append] ( identifier[DirectoryPermissionAudit] ( literal[string] ,
identifier[user] = literal[string] , identifier[group] = literal[string] ,
identifier[mode] = literal[int] ))
keyword[if] keyword[not] identifier[settings] [ literal[string] ][ literal[string] ]:
identifier[audits] . identifier[append] ( identifier[TemplatedFile] ( literal[string] ,
identifier[SecurityLimitsContext] (),
identifier[template_dir] = identifier[TEMPLATES_DIR] ,
identifier[user] = literal[string] , identifier[group] = literal[string] , identifier[mode] = literal[int] ))
keyword[return] identifier[audits] | def get_audits():
"""Get OS hardening security limits audits.
:returns: dictionary of audits
"""
audits = []
settings = utils.get_settings('os')
# Ensure that the /etc/security/limits.d directory is only writable
# by the root user, but others can execute and read.
audits.append(DirectoryPermissionAudit('/etc/security/limits.d', user='root', group='root', mode=493))
# If core dumps are not enabled, then don't allow core dumps to be
# created as they may contain sensitive information.
if not settings['security']['kernel_enable_core_dump']:
audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf', SecurityLimitsContext(), template_dir=TEMPLATES_DIR, user='root', group='root', mode=288)) # depends on [control=['if'], data=[]]
return audits |
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut | def function[columns, parameter[self]]:
constant[Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
]
variable[fut] assign[=] call[name[self]._run_operation, parameter[name[self]._impl.columns]]
return[name[fut]] | keyword[def] identifier[columns] ( identifier[self] ,** identifier[kw] ):
literal[string]
identifier[fut] = identifier[self] . identifier[_run_operation] ( identifier[self] . identifier[_impl] . identifier[columns] ,** identifier[kw] )
keyword[return] identifier[fut] | def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut |
def get_description(self, description_type='Abstract'):
"""Get DataCite description."""
if 'descriptions' in self.xml:
if isinstance(self.xml['descriptions']['description'], list):
for description in self.xml['descriptions']['description']:
if description_type in description:
return description[description_type]
elif isinstance(self.xml['descriptions']['description'], dict):
description = self.xml['descriptions']['description']
if description_type in description:
return description[description_type]
elif len(description) == 1:
# return the only description
return description.values()[0]
return None | def function[get_description, parameter[self, description_type]]:
constant[Get DataCite description.]
if compare[constant[descriptions] in name[self].xml] begin[:]
if call[name[isinstance], parameter[call[call[name[self].xml][constant[descriptions]]][constant[description]], name[list]]] begin[:]
for taget[name[description]] in starred[call[call[name[self].xml][constant[descriptions]]][constant[description]]] begin[:]
if compare[name[description_type] in name[description]] begin[:]
return[call[name[description]][name[description_type]]]
return[constant[None]] | keyword[def] identifier[get_description] ( identifier[self] , identifier[description_type] = literal[string] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[xml] :
keyword[if] identifier[isinstance] ( identifier[self] . identifier[xml] [ literal[string] ][ literal[string] ], identifier[list] ):
keyword[for] identifier[description] keyword[in] identifier[self] . identifier[xml] [ literal[string] ][ literal[string] ]:
keyword[if] identifier[description_type] keyword[in] identifier[description] :
keyword[return] identifier[description] [ identifier[description_type] ]
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[xml] [ literal[string] ][ literal[string] ], identifier[dict] ):
identifier[description] = identifier[self] . identifier[xml] [ literal[string] ][ literal[string] ]
keyword[if] identifier[description_type] keyword[in] identifier[description] :
keyword[return] identifier[description] [ identifier[description_type] ]
keyword[elif] identifier[len] ( identifier[description] )== literal[int] :
keyword[return] identifier[description] . identifier[values] ()[ literal[int] ]
keyword[return] keyword[None] | def get_description(self, description_type='Abstract'):
"""Get DataCite description."""
if 'descriptions' in self.xml:
if isinstance(self.xml['descriptions']['description'], list):
for description in self.xml['descriptions']['description']:
if description_type in description:
return description[description_type] # depends on [control=['if'], data=['description_type', 'description']] # depends on [control=['for'], data=['description']] # depends on [control=['if'], data=[]]
elif isinstance(self.xml['descriptions']['description'], dict):
description = self.xml['descriptions']['description']
if description_type in description:
return description[description_type] # depends on [control=['if'], data=['description_type', 'description']]
elif len(description) == 1:
# return the only description
return description.values()[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return None |
def tornado_combiner(configs, use_gevent=False, start=True, monkey_patch=None,
Container=None, Server=None, threadpool=None): # pragma: no cover
"""Combine servers in one tornado event loop process
:param configs: [
{
'app': Microservice Application or another wsgi application, required
'port': int, default: 5000
'address': str, default: ""
},
{ ... }
]
:param use_gevent: if True, app.wsgi will be run in gevent.spawn
:param start: if True, will be call utils.tornado_start()
:param Container: your class, bases on tornado.wsgi.WSGIContainer, default: tornado.wsgi.WSGIContainer
:param Server: your class, bases on tornado.httpserver.HTTPServer, default: tornado.httpserver.HTTPServer
:param monkey_patch: boolean, use gevent.monkey.patch_all() for patching standard modules, default: use_gevent
:return: list of tornado servers
"""
servers = []
if monkey_patch is None:
monkey_patch = use_gevent
if use_gevent:
if monkey_patch:
from gevent import monkey
monkey.patch_all()
if threadpool is not None:
from multiprocessing.pool import ThreadPool
if not isinstance(threadpool, ThreadPool):
threadpool = ThreadPool(threadpool)
for config in configs:
app = config['app']
port = config.get('port', 5000)
address = config.get('address', '')
server = tornado_run(app, use_gevent=use_gevent, port=port,
monkey_patch=False, address=address, start=False,
Container=Container,
Server=Server, threadpool=threadpool)
servers.append(server)
if start:
tornado_start()
return servers | def function[tornado_combiner, parameter[configs, use_gevent, start, monkey_patch, Container, Server, threadpool]]:
constant[Combine servers in one tornado event loop process
:param configs: [
{
'app': Microservice Application or another wsgi application, required
'port': int, default: 5000
'address': str, default: ""
},
{ ... }
]
:param use_gevent: if True, app.wsgi will be run in gevent.spawn
:param start: if True, will be call utils.tornado_start()
:param Container: your class, bases on tornado.wsgi.WSGIContainer, default: tornado.wsgi.WSGIContainer
:param Server: your class, bases on tornado.httpserver.HTTPServer, default: tornado.httpserver.HTTPServer
:param monkey_patch: boolean, use gevent.monkey.patch_all() for patching standard modules, default: use_gevent
:return: list of tornado servers
]
variable[servers] assign[=] list[[]]
if compare[name[monkey_patch] is constant[None]] begin[:]
variable[monkey_patch] assign[=] name[use_gevent]
if name[use_gevent] begin[:]
if name[monkey_patch] begin[:]
from relative_module[gevent] import module[monkey]
call[name[monkey].patch_all, parameter[]]
if compare[name[threadpool] is_not constant[None]] begin[:]
from relative_module[multiprocessing.pool] import module[ThreadPool]
if <ast.UnaryOp object at 0x7da18eb55420> begin[:]
variable[threadpool] assign[=] call[name[ThreadPool], parameter[name[threadpool]]]
for taget[name[config]] in starred[name[configs]] begin[:]
variable[app] assign[=] call[name[config]][constant[app]]
variable[port] assign[=] call[name[config].get, parameter[constant[port], constant[5000]]]
variable[address] assign[=] call[name[config].get, parameter[constant[address], constant[]]]
variable[server] assign[=] call[name[tornado_run], parameter[name[app]]]
call[name[servers].append, parameter[name[server]]]
if name[start] begin[:]
call[name[tornado_start], parameter[]]
return[name[servers]] | keyword[def] identifier[tornado_combiner] ( identifier[configs] , identifier[use_gevent] = keyword[False] , identifier[start] = keyword[True] , identifier[monkey_patch] = keyword[None] ,
identifier[Container] = keyword[None] , identifier[Server] = keyword[None] , identifier[threadpool] = keyword[None] ):
literal[string]
identifier[servers] =[]
keyword[if] identifier[monkey_patch] keyword[is] keyword[None] :
identifier[monkey_patch] = identifier[use_gevent]
keyword[if] identifier[use_gevent] :
keyword[if] identifier[monkey_patch] :
keyword[from] identifier[gevent] keyword[import] identifier[monkey]
identifier[monkey] . identifier[patch_all] ()
keyword[if] identifier[threadpool] keyword[is] keyword[not] keyword[None] :
keyword[from] identifier[multiprocessing] . identifier[pool] keyword[import] identifier[ThreadPool]
keyword[if] keyword[not] identifier[isinstance] ( identifier[threadpool] , identifier[ThreadPool] ):
identifier[threadpool] = identifier[ThreadPool] ( identifier[threadpool] )
keyword[for] identifier[config] keyword[in] identifier[configs] :
identifier[app] = identifier[config] [ literal[string] ]
identifier[port] = identifier[config] . identifier[get] ( literal[string] , literal[int] )
identifier[address] = identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[server] = identifier[tornado_run] ( identifier[app] , identifier[use_gevent] = identifier[use_gevent] , identifier[port] = identifier[port] ,
identifier[monkey_patch] = keyword[False] , identifier[address] = identifier[address] , identifier[start] = keyword[False] ,
identifier[Container] = identifier[Container] ,
identifier[Server] = identifier[Server] , identifier[threadpool] = identifier[threadpool] )
identifier[servers] . identifier[append] ( identifier[server] )
keyword[if] identifier[start] :
identifier[tornado_start] ()
keyword[return] identifier[servers] | def tornado_combiner(configs, use_gevent=False, start=True, monkey_patch=None, Container=None, Server=None, threadpool=None): # pragma: no cover
'Combine servers in one tornado event loop process\n\n :param configs: [\n {\n \'app\': Microservice Application or another wsgi application, required\n \'port\': int, default: 5000\n \'address\': str, default: ""\n },\n { ... }\n ]\n :param use_gevent: if True, app.wsgi will be run in gevent.spawn\n :param start: if True, will be call utils.tornado_start()\n :param Container: your class, bases on tornado.wsgi.WSGIContainer, default: tornado.wsgi.WSGIContainer\n :param Server: your class, bases on tornado.httpserver.HTTPServer, default: tornado.httpserver.HTTPServer\n :param monkey_patch: boolean, use gevent.monkey.patch_all() for patching standard modules, default: use_gevent\n :return: list of tornado servers\n '
servers = []
if monkey_patch is None:
monkey_patch = use_gevent # depends on [control=['if'], data=['monkey_patch']]
if use_gevent:
if monkey_patch:
from gevent import monkey
monkey.patch_all() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if threadpool is not None:
from multiprocessing.pool import ThreadPool
if not isinstance(threadpool, ThreadPool):
threadpool = ThreadPool(threadpool) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['threadpool']]
for config in configs:
app = config['app']
port = config.get('port', 5000)
address = config.get('address', '')
server = tornado_run(app, use_gevent=use_gevent, port=port, monkey_patch=False, address=address, start=False, Container=Container, Server=Server, threadpool=threadpool)
servers.append(server) # depends on [control=['for'], data=['config']]
if start:
tornado_start() # depends on [control=['if'], data=[]]
return servers |
def check_cache(self, *args):
"""Call linecache.checkcache() safely protecting our cached values.
"""
# First call the orignal checkcache as intended
linecache._checkcache_ori(*args)
# Then, update back the cache with our data, so that tracebacks related
# to our compiled codes can be produced.
linecache.cache.update(linecache._ipython_cache) | def function[check_cache, parameter[self]]:
constant[Call linecache.checkcache() safely protecting our cached values.
]
call[name[linecache]._checkcache_ori, parameter[<ast.Starred object at 0x7da18ede5870>]]
call[name[linecache].cache.update, parameter[name[linecache]._ipython_cache]] | keyword[def] identifier[check_cache] ( identifier[self] ,* identifier[args] ):
literal[string]
identifier[linecache] . identifier[_checkcache_ori] (* identifier[args] )
identifier[linecache] . identifier[cache] . identifier[update] ( identifier[linecache] . identifier[_ipython_cache] ) | def check_cache(self, *args):
"""Call linecache.checkcache() safely protecting our cached values.
"""
# First call the orignal checkcache as intended
linecache._checkcache_ori(*args)
# Then, update back the cache with our data, so that tracebacks related
# to our compiled codes can be produced.
linecache.cache.update(linecache._ipython_cache) |
def get_edgestore_handle(
client: arango.client.ArangoClient,
username=None,
password=None,
edgestore_db_name: str = edgestore_db_name,
edgestore_edges_name: str = edgestore_edges_name,
edgestore_nodes_name: str = edgestore_nodes_name,
edgestore_pipeline_name: str = edgestore_pipeline_name,
edgestore_pipeline_stats_name: str = edgestore_pipeline_stats_name,
edgestore_pipeline_errors_name: str = edgestore_pipeline_errors_name,
) -> arango.database.StandardDatabase:
"""Get Edgestore arangodb database handle
Args:
client (arango.client.ArangoClient): Description
username (None, optional): Description
password (None, optional): Description
edgestore_db_name (str, optional): Description
edgestore_edges_name (str, optional): Description
edgestore_nodes_name (str, optional): Description
Returns:
arango.database.StandardDatabase: Description
"""
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
# Create a new database named "edgestore"
try:
if username and password:
edgestore_db = sys_db.create_database(
name=edgestore_db_name,
users=[{"username": username, "password": password, "active": True}],
)
else:
edgestore_db = sys_db.create_database(name=edgestore_db_name)
except arango.exceptions.DatabaseCreateError:
if username and password:
edgestore_db = client.db(
edgestore_db_name, username=username, password=password
)
else:
edgestore_db = client.db(edgestore_db_name)
# TODO - add a skiplist index for _from? or _key? to be able to do paging?
# has_collection function doesn't seem to be working
# if not edgestore_db.has_collection(edgestore_nodes_name):
try:
nodes = edgestore_db.create_collection(
edgestore_nodes_name, index_bucket_count=64
)
nodes.add_hash_index(fields=["name"], unique=False)
nodes.add_hash_index(
fields=["components"], unique=False
) # add subject/object components as node properties
except Exception:
pass
# if not edgestore_db.has_collection(edgestore_edges_name):
try:
edges = edgestore_db.create_collection(
edgestore_edges_name, edge=True, index_bucket_count=64
)
edges.add_hash_index(fields=["relation"], unique=False)
edges.add_hash_index(fields=["edge_types"], unique=False)
edges.add_hash_index(fields=["nanopub_id"], unique=False)
edges.add_hash_index(fields=["metadata.project"], unique=False)
edges.add_hash_index(fields=["annotations[*].id"], unique=False)
except Exception:
pass
# if not edgestore_db.has_collection(edgestore_pipeline_name):
try:
edgestore_db.create_collection(edgestore_pipeline_name)
except Exception:
pass
try:
edgestore_db.create_collection(edgestore_pipeline_errors_name)
except Exception:
pass
try:
edgestore_db.create_collection(edgestore_pipeline_stats_name)
except arango.exceptions.CollectionCreateError as e:
pass
return edgestore_db | def function[get_edgestore_handle, parameter[client, username, password, edgestore_db_name, edgestore_edges_name, edgestore_nodes_name, edgestore_pipeline_name, edgestore_pipeline_stats_name, edgestore_pipeline_errors_name]]:
constant[Get Edgestore arangodb database handle
Args:
client (arango.client.ArangoClient): Description
username (None, optional): Description
password (None, optional): Description
edgestore_db_name (str, optional): Description
edgestore_edges_name (str, optional): Description
edgestore_nodes_name (str, optional): Description
Returns:
arango.database.StandardDatabase: Description
]
<ast.Tuple object at 0x7da18f00e740> assign[=] call[name[get_user_creds], parameter[name[username], name[password]]]
variable[sys_db] assign[=] call[name[client].db, parameter[constant[_system]]]
<ast.Try object at 0x7da18f00eb30>
<ast.Try object at 0x7da18f00ffd0>
<ast.Try object at 0x7da18f00e200>
<ast.Try object at 0x7da18f00e7a0>
<ast.Try object at 0x7da18f00ea10>
<ast.Try object at 0x7da18f00e380>
return[name[edgestore_db]] | keyword[def] identifier[get_edgestore_handle] (
identifier[client] : identifier[arango] . identifier[client] . identifier[ArangoClient] ,
identifier[username] = keyword[None] ,
identifier[password] = keyword[None] ,
identifier[edgestore_db_name] : identifier[str] = identifier[edgestore_db_name] ,
identifier[edgestore_edges_name] : identifier[str] = identifier[edgestore_edges_name] ,
identifier[edgestore_nodes_name] : identifier[str] = identifier[edgestore_nodes_name] ,
identifier[edgestore_pipeline_name] : identifier[str] = identifier[edgestore_pipeline_name] ,
identifier[edgestore_pipeline_stats_name] : identifier[str] = identifier[edgestore_pipeline_stats_name] ,
identifier[edgestore_pipeline_errors_name] : identifier[str] = identifier[edgestore_pipeline_errors_name] ,
)-> identifier[arango] . identifier[database] . identifier[StandardDatabase] :
literal[string]
( identifier[username] , identifier[password] )= identifier[get_user_creds] ( identifier[username] , identifier[password] )
identifier[sys_db] = identifier[client] . identifier[db] ( literal[string] , identifier[username] = identifier[username] , identifier[password] = identifier[password] )
keyword[try] :
keyword[if] identifier[username] keyword[and] identifier[password] :
identifier[edgestore_db] = identifier[sys_db] . identifier[create_database] (
identifier[name] = identifier[edgestore_db_name] ,
identifier[users] =[{ literal[string] : identifier[username] , literal[string] : identifier[password] , literal[string] : keyword[True] }],
)
keyword[else] :
identifier[edgestore_db] = identifier[sys_db] . identifier[create_database] ( identifier[name] = identifier[edgestore_db_name] )
keyword[except] identifier[arango] . identifier[exceptions] . identifier[DatabaseCreateError] :
keyword[if] identifier[username] keyword[and] identifier[password] :
identifier[edgestore_db] = identifier[client] . identifier[db] (
identifier[edgestore_db_name] , identifier[username] = identifier[username] , identifier[password] = identifier[password]
)
keyword[else] :
identifier[edgestore_db] = identifier[client] . identifier[db] ( identifier[edgestore_db_name] )
keyword[try] :
identifier[nodes] = identifier[edgestore_db] . identifier[create_collection] (
identifier[edgestore_nodes_name] , identifier[index_bucket_count] = literal[int]
)
identifier[nodes] . identifier[add_hash_index] ( identifier[fields] =[ literal[string] ], identifier[unique] = keyword[False] )
identifier[nodes] . identifier[add_hash_index] (
identifier[fields] =[ literal[string] ], identifier[unique] = keyword[False]
)
keyword[except] identifier[Exception] :
keyword[pass]
keyword[try] :
identifier[edges] = identifier[edgestore_db] . identifier[create_collection] (
identifier[edgestore_edges_name] , identifier[edge] = keyword[True] , identifier[index_bucket_count] = literal[int]
)
identifier[edges] . identifier[add_hash_index] ( identifier[fields] =[ literal[string] ], identifier[unique] = keyword[False] )
identifier[edges] . identifier[add_hash_index] ( identifier[fields] =[ literal[string] ], identifier[unique] = keyword[False] )
identifier[edges] . identifier[add_hash_index] ( identifier[fields] =[ literal[string] ], identifier[unique] = keyword[False] )
identifier[edges] . identifier[add_hash_index] ( identifier[fields] =[ literal[string] ], identifier[unique] = keyword[False] )
identifier[edges] . identifier[add_hash_index] ( identifier[fields] =[ literal[string] ], identifier[unique] = keyword[False] )
keyword[except] identifier[Exception] :
keyword[pass]
keyword[try] :
identifier[edgestore_db] . identifier[create_collection] ( identifier[edgestore_pipeline_name] )
keyword[except] identifier[Exception] :
keyword[pass]
keyword[try] :
identifier[edgestore_db] . identifier[create_collection] ( identifier[edgestore_pipeline_errors_name] )
keyword[except] identifier[Exception] :
keyword[pass]
keyword[try] :
identifier[edgestore_db] . identifier[create_collection] ( identifier[edgestore_pipeline_stats_name] )
keyword[except] identifier[arango] . identifier[exceptions] . identifier[CollectionCreateError] keyword[as] identifier[e] :
keyword[pass]
keyword[return] identifier[edgestore_db] | def get_edgestore_handle(client: arango.client.ArangoClient, username=None, password=None, edgestore_db_name: str=edgestore_db_name, edgestore_edges_name: str=edgestore_edges_name, edgestore_nodes_name: str=edgestore_nodes_name, edgestore_pipeline_name: str=edgestore_pipeline_name, edgestore_pipeline_stats_name: str=edgestore_pipeline_stats_name, edgestore_pipeline_errors_name: str=edgestore_pipeline_errors_name) -> arango.database.StandardDatabase:
"""Get Edgestore arangodb database handle
Args:
client (arango.client.ArangoClient): Description
username (None, optional): Description
password (None, optional): Description
edgestore_db_name (str, optional): Description
edgestore_edges_name (str, optional): Description
edgestore_nodes_name (str, optional): Description
Returns:
arango.database.StandardDatabase: Description
"""
(username, password) = get_user_creds(username, password)
sys_db = client.db('_system', username=username, password=password)
# Create a new database named "edgestore"
try:
if username and password:
edgestore_db = sys_db.create_database(name=edgestore_db_name, users=[{'username': username, 'password': password, 'active': True}]) # depends on [control=['if'], data=[]]
else:
edgestore_db = sys_db.create_database(name=edgestore_db_name) # depends on [control=['try'], data=[]]
except arango.exceptions.DatabaseCreateError:
if username and password:
edgestore_db = client.db(edgestore_db_name, username=username, password=password) # depends on [control=['if'], data=[]]
else:
edgestore_db = client.db(edgestore_db_name) # depends on [control=['except'], data=[]]
# TODO - add a skiplist index for _from? or _key? to be able to do paging?
# has_collection function doesn't seem to be working
# if not edgestore_db.has_collection(edgestore_nodes_name):
try:
nodes = edgestore_db.create_collection(edgestore_nodes_name, index_bucket_count=64)
nodes.add_hash_index(fields=['name'], unique=False)
nodes.add_hash_index(fields=['components'], unique=False) # add subject/object components as node properties # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
# if not edgestore_db.has_collection(edgestore_edges_name):
try:
edges = edgestore_db.create_collection(edgestore_edges_name, edge=True, index_bucket_count=64)
edges.add_hash_index(fields=['relation'], unique=False)
edges.add_hash_index(fields=['edge_types'], unique=False)
edges.add_hash_index(fields=['nanopub_id'], unique=False)
edges.add_hash_index(fields=['metadata.project'], unique=False)
edges.add_hash_index(fields=['annotations[*].id'], unique=False) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
# if not edgestore_db.has_collection(edgestore_pipeline_name):
try:
edgestore_db.create_collection(edgestore_pipeline_name) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
try:
edgestore_db.create_collection(edgestore_pipeline_errors_name) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
try:
edgestore_db.create_collection(edgestore_pipeline_stats_name) # depends on [control=['try'], data=[]]
except arango.exceptions.CollectionCreateError as e:
pass # depends on [control=['except'], data=[]]
return edgestore_db |
def data(self, data, part=False, dataset=''):
"""Parse data and update links.
Parameters
----------
data
Data to parse.
part : `bool`, optional
True if data is partial (default: `False`).
dataset : `str`, optional
Dataset key prefix (default: '').
"""
links = self.parser(self.scanner(data, part), part, dataset)
self.storage.add_links(links) | def function[data, parameter[self, data, part, dataset]]:
constant[Parse data and update links.
Parameters
----------
data
Data to parse.
part : `bool`, optional
True if data is partial (default: `False`).
dataset : `str`, optional
Dataset key prefix (default: '').
]
variable[links] assign[=] call[name[self].parser, parameter[call[name[self].scanner, parameter[name[data], name[part]]], name[part], name[dataset]]]
call[name[self].storage.add_links, parameter[name[links]]] | keyword[def] identifier[data] ( identifier[self] , identifier[data] , identifier[part] = keyword[False] , identifier[dataset] = literal[string] ):
literal[string]
identifier[links] = identifier[self] . identifier[parser] ( identifier[self] . identifier[scanner] ( identifier[data] , identifier[part] ), identifier[part] , identifier[dataset] )
identifier[self] . identifier[storage] . identifier[add_links] ( identifier[links] ) | def data(self, data, part=False, dataset=''):
"""Parse data and update links.
Parameters
----------
data
Data to parse.
part : `bool`, optional
True if data is partial (default: `False`).
dataset : `str`, optional
Dataset key prefix (default: '').
"""
links = self.parser(self.scanner(data, part), part, dataset)
self.storage.add_links(links) |
def _joinedQnames(self, _list):
"""util for returning a string joinin names of entities *used only in info command*"""
try:
s = "; ".join([p.qname for p in _list])
except:
s = "; ".join([p for p in _list])
return s | def function[_joinedQnames, parameter[self, _list]]:
constant[util for returning a string joinin names of entities *used only in info command*]
<ast.Try object at 0x7da1b10139a0>
return[name[s]] | keyword[def] identifier[_joinedQnames] ( identifier[self] , identifier[_list] ):
literal[string]
keyword[try] :
identifier[s] = literal[string] . identifier[join] ([ identifier[p] . identifier[qname] keyword[for] identifier[p] keyword[in] identifier[_list] ])
keyword[except] :
identifier[s] = literal[string] . identifier[join] ([ identifier[p] keyword[for] identifier[p] keyword[in] identifier[_list] ])
keyword[return] identifier[s] | def _joinedQnames(self, _list):
"""util for returning a string joinin names of entities *used only in info command*"""
try:
s = '; '.join([p.qname for p in _list]) # depends on [control=['try'], data=[]]
except:
s = '; '.join([p for p in _list]) # depends on [control=['except'], data=[]]
return s |
def report(self, msg, do_reset=False, file=sys.stdout):
"""Print to stdout msg followed by the runtime.
When true, do_reset will result in a reset of start time.
"""
print >> file, "%s (%s s)" % (msg, time.time() - self.start)
if do_reset:
self.start = time.time() | def function[report, parameter[self, msg, do_reset, file]]:
constant[Print to stdout msg followed by the runtime.
When true, do_reset will result in a reset of start time.
]
tuple[[<ast.BinOp object at 0x7da207f996f0>, <ast.BinOp object at 0x7da207f99e10>]]
if name[do_reset] begin[:]
name[self].start assign[=] call[name[time].time, parameter[]] | keyword[def] identifier[report] ( identifier[self] , identifier[msg] , identifier[do_reset] = keyword[False] , identifier[file] = identifier[sys] . identifier[stdout] ):
literal[string]
identifier[print] >> identifier[file] , literal[string] %( identifier[msg] , identifier[time] . identifier[time] ()- identifier[self] . identifier[start] )
keyword[if] identifier[do_reset] :
identifier[self] . identifier[start] = identifier[time] . identifier[time] () | def report(self, msg, do_reset=False, file=sys.stdout):
"""Print to stdout msg followed by the runtime.
When true, do_reset will result in a reset of start time.
"""
(print >> file, '%s (%s s)' % (msg, time.time() - self.start))
if do_reset:
self.start = time.time() # depends on [control=['if'], data=[]] |
def list_team_repos(team_name, profile="github", ignore_cache=False):
'''
Gets the repo details for a given team as a dict from repo_name to repo details.
Note that repo names are always in lower case.
team_name
The name of the team from which to list repos.
profile
The name of the profile configuration to use. Defaults to ``github``.
ignore_cache
Bypasses the use of cached team repos.
CLI Example:
.. code-block:: bash
salt myminion github.list_team_repos 'team_name'
.. versionadded:: 2016.11.0
'''
cached_team = get_team(team_name, profile=profile)
if not cached_team:
log.error('Team %s does not exist.', team_name)
return False
# Return from cache if available
if cached_team.get('repos') and not ignore_cache:
return cached_team.get('repos')
try:
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, 'org_name')
)
team = organization.get_team(cached_team['id'])
except UnknownObjectException:
log.exception('Resource not found: %s', cached_team['id'])
try:
repos = {}
for repo in team.get_repos():
permission = 'pull'
if repo.permissions.admin:
permission = 'admin'
elif repo.permissions.push:
permission = 'push'
repos[repo.name.lower()] = {
'permission': permission
}
cached_team['repos'] = repos
return repos
except UnknownObjectException:
log.exception('Resource not found: %s', cached_team['id'])
return [] | def function[list_team_repos, parameter[team_name, profile, ignore_cache]]:
constant[
Gets the repo details for a given team as a dict from repo_name to repo details.
Note that repo names are always in lower case.
team_name
The name of the team from which to list repos.
profile
The name of the profile configuration to use. Defaults to ``github``.
ignore_cache
Bypasses the use of cached team repos.
CLI Example:
.. code-block:: bash
salt myminion github.list_team_repos 'team_name'
.. versionadded:: 2016.11.0
]
variable[cached_team] assign[=] call[name[get_team], parameter[name[team_name]]]
if <ast.UnaryOp object at 0x7da1b2113d90> begin[:]
call[name[log].error, parameter[constant[Team %s does not exist.], name[team_name]]]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b2111de0> begin[:]
return[call[name[cached_team].get, parameter[constant[repos]]]]
<ast.Try object at 0x7da1b21108e0>
<ast.Try object at 0x7da1b21121a0> | keyword[def] identifier[list_team_repos] ( identifier[team_name] , identifier[profile] = literal[string] , identifier[ignore_cache] = keyword[False] ):
literal[string]
identifier[cached_team] = identifier[get_team] ( identifier[team_name] , identifier[profile] = identifier[profile] )
keyword[if] keyword[not] identifier[cached_team] :
identifier[log] . identifier[error] ( literal[string] , identifier[team_name] )
keyword[return] keyword[False]
keyword[if] identifier[cached_team] . identifier[get] ( literal[string] ) keyword[and] keyword[not] identifier[ignore_cache] :
keyword[return] identifier[cached_team] . identifier[get] ( literal[string] )
keyword[try] :
identifier[client] = identifier[_get_client] ( identifier[profile] )
identifier[organization] = identifier[client] . identifier[get_organization] (
identifier[_get_config_value] ( identifier[profile] , literal[string] )
)
identifier[team] = identifier[organization] . identifier[get_team] ( identifier[cached_team] [ literal[string] ])
keyword[except] identifier[UnknownObjectException] :
identifier[log] . identifier[exception] ( literal[string] , identifier[cached_team] [ literal[string] ])
keyword[try] :
identifier[repos] ={}
keyword[for] identifier[repo] keyword[in] identifier[team] . identifier[get_repos] ():
identifier[permission] = literal[string]
keyword[if] identifier[repo] . identifier[permissions] . identifier[admin] :
identifier[permission] = literal[string]
keyword[elif] identifier[repo] . identifier[permissions] . identifier[push] :
identifier[permission] = literal[string]
identifier[repos] [ identifier[repo] . identifier[name] . identifier[lower] ()]={
literal[string] : identifier[permission]
}
identifier[cached_team] [ literal[string] ]= identifier[repos]
keyword[return] identifier[repos]
keyword[except] identifier[UnknownObjectException] :
identifier[log] . identifier[exception] ( literal[string] , identifier[cached_team] [ literal[string] ])
keyword[return] [] | def list_team_repos(team_name, profile='github', ignore_cache=False):
"""
Gets the repo details for a given team as a dict from repo_name to repo details.
Note that repo names are always in lower case.
team_name
The name of the team from which to list repos.
profile
The name of the profile configuration to use. Defaults to ``github``.
ignore_cache
Bypasses the use of cached team repos.
CLI Example:
.. code-block:: bash
salt myminion github.list_team_repos 'team_name'
.. versionadded:: 2016.11.0
"""
cached_team = get_team(team_name, profile=profile)
if not cached_team:
log.error('Team %s does not exist.', team_name)
return False # depends on [control=['if'], data=[]]
# Return from cache if available
if cached_team.get('repos') and (not ignore_cache):
return cached_team.get('repos') # depends on [control=['if'], data=[]]
try:
client = _get_client(profile)
organization = client.get_organization(_get_config_value(profile, 'org_name'))
team = organization.get_team(cached_team['id']) # depends on [control=['try'], data=[]]
except UnknownObjectException:
log.exception('Resource not found: %s', cached_team['id']) # depends on [control=['except'], data=[]]
try:
repos = {}
for repo in team.get_repos():
permission = 'pull'
if repo.permissions.admin:
permission = 'admin' # depends on [control=['if'], data=[]]
elif repo.permissions.push:
permission = 'push' # depends on [control=['if'], data=[]]
repos[repo.name.lower()] = {'permission': permission} # depends on [control=['for'], data=['repo']]
cached_team['repos'] = repos
return repos # depends on [control=['try'], data=[]]
except UnknownObjectException:
log.exception('Resource not found: %s', cached_team['id'])
return [] # depends on [control=['except'], data=[]] |
def job(self, name):
"""
Method for searching specific job by it's name.
:param name: name of the job to search.
:return: found job or None.
:rtype: yagocd.resources.job.JobInstance
"""
for job in self.jobs():
if job.data.name == name:
return job | def function[job, parameter[self, name]]:
constant[
Method for searching specific job by it's name.
:param name: name of the job to search.
:return: found job or None.
:rtype: yagocd.resources.job.JobInstance
]
for taget[name[job]] in starred[call[name[self].jobs, parameter[]]] begin[:]
if compare[name[job].data.name equal[==] name[name]] begin[:]
return[name[job]] | keyword[def] identifier[job] ( identifier[self] , identifier[name] ):
literal[string]
keyword[for] identifier[job] keyword[in] identifier[self] . identifier[jobs] ():
keyword[if] identifier[job] . identifier[data] . identifier[name] == identifier[name] :
keyword[return] identifier[job] | def job(self, name):
"""
Method for searching specific job by it's name.
:param name: name of the job to search.
:return: found job or None.
:rtype: yagocd.resources.job.JobInstance
"""
for job in self.jobs():
if job.data.name == name:
return job # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['job']] |
def read(self, path):
"""Read EPW weather data from path.
Args:
path (str): path to read weather data from
"""
with open(path, "r") as f:
for line in f:
line = line.strip()
match_obj_name = re.search(r"^([A-Z][A-Z/ \d]+),", line)
if match_obj_name is not None:
internal_name = match_obj_name.group(1)
if internal_name in self._data:
self._data[internal_name] = self._create_datadict(
internal_name)
data_line = line[len(internal_name) + 1:]
vals = data_line.strip().split(',')
self._data[internal_name].read(vals)
else:
wd = WeatherData()
wd.read(line.strip().split(','))
self.add_weatherdata(wd) | def function[read, parameter[self, path]]:
constant[Read EPW weather data from path.
Args:
path (str): path to read weather data from
]
with call[name[open], parameter[name[path], constant[r]]] begin[:]
for taget[name[line]] in starred[name[f]] begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
variable[match_obj_name] assign[=] call[name[re].search, parameter[constant[^([A-Z][A-Z/ \d]+),], name[line]]]
if compare[name[match_obj_name] is_not constant[None]] begin[:]
variable[internal_name] assign[=] call[name[match_obj_name].group, parameter[constant[1]]]
if compare[name[internal_name] in name[self]._data] begin[:]
call[name[self]._data][name[internal_name]] assign[=] call[name[self]._create_datadict, parameter[name[internal_name]]]
variable[data_line] assign[=] call[name[line]][<ast.Slice object at 0x7da18eb54100>]
variable[vals] assign[=] call[call[name[data_line].strip, parameter[]].split, parameter[constant[,]]]
call[call[name[self]._data][name[internal_name]].read, parameter[name[vals]]] | keyword[def] identifier[read] ( identifier[self] , identifier[path] ):
literal[string]
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
identifier[line] = identifier[line] . identifier[strip] ()
identifier[match_obj_name] = identifier[re] . identifier[search] ( literal[string] , identifier[line] )
keyword[if] identifier[match_obj_name] keyword[is] keyword[not] keyword[None] :
identifier[internal_name] = identifier[match_obj_name] . identifier[group] ( literal[int] )
keyword[if] identifier[internal_name] keyword[in] identifier[self] . identifier[_data] :
identifier[self] . identifier[_data] [ identifier[internal_name] ]= identifier[self] . identifier[_create_datadict] (
identifier[internal_name] )
identifier[data_line] = identifier[line] [ identifier[len] ( identifier[internal_name] )+ literal[int] :]
identifier[vals] = identifier[data_line] . identifier[strip] (). identifier[split] ( literal[string] )
identifier[self] . identifier[_data] [ identifier[internal_name] ]. identifier[read] ( identifier[vals] )
keyword[else] :
identifier[wd] = identifier[WeatherData] ()
identifier[wd] . identifier[read] ( identifier[line] . identifier[strip] (). identifier[split] ( literal[string] ))
identifier[self] . identifier[add_weatherdata] ( identifier[wd] ) | def read(self, path):
"""Read EPW weather data from path.
Args:
path (str): path to read weather data from
"""
with open(path, 'r') as f:
for line in f:
line = line.strip()
match_obj_name = re.search('^([A-Z][A-Z/ \\d]+),', line)
if match_obj_name is not None:
internal_name = match_obj_name.group(1)
if internal_name in self._data:
self._data[internal_name] = self._create_datadict(internal_name)
data_line = line[len(internal_name) + 1:]
vals = data_line.strip().split(',')
self._data[internal_name].read(vals) # depends on [control=['if'], data=['internal_name']] # depends on [control=['if'], data=['match_obj_name']]
else:
wd = WeatherData()
wd.read(line.strip().split(','))
self.add_weatherdata(wd) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']] |
def train_fn(data_dir=None, output_dir=None,
model_class=gin.REQUIRED, dataset=gin.REQUIRED,
input_names=None, target_names=None,
train_steps=1000, eval_steps=1, eval_frequency=100):
"""Train the given model on the given dataset.
Args:
data_dir: Directory where the data is located.
output_dir: Directory where to put the logs and checkpoints.
model_class: The model class to train.
dataset: The name of the dataset to train on.
input_names: List of strings with the names of the features on input.
target_names: List of strings with the names of the target features.
train_steps: for how many steps to train.
eval_steps: for how many steps to do evaluation.
eval_frequency: how often (every this many steps) to run evaluation.
"""
train_data, eval_data, features_info, keys = train_and_eval_dataset(
dataset, data_dir)
if input_names is None:
input_names = keys[0]
if target_names is None:
target_names = keys[1]
# TODO(lukaszkaiser): The use of distribution strategy below fails like this:
# .../keras/models.py", line 93, in _clone_functional_model
# for layer in model._input_layers:
# AttributeError: 'BasicFcRelu' object has no attribute '_input_layers'
# strategy = tf.distribute.MirroredStrategy()
# with strategy.scope():
model = model_class(features_info=features_info,
input_names=input_names, target_names=target_names)
optimize_fn(model)
train_batches = shuffle_and_batch_data(
train_data, target_names, features_info, training=True)
eval_batches = shuffle_and_batch_data(
eval_data, target_names, features_info, training=False)
# Need to run one training step just to get optimizer variables to load.
model.fit(train_batches, epochs=1, steps_per_epoch=1)
# Training loop.
callbacks = []
callbacks.append(tf.keras.callbacks.History())
callbacks.append(tf.keras.callbacks.BaseLogger())
last_epoch = 0
if output_dir is not None:
callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=output_dir))
output_format = os.path.join(output_dir, "model-{epoch:05d}")
callbacks.append(tf.keras.callbacks.ModelCheckpoint(
filepath=output_format, save_weights_only=True))
checkpoints = tf.gfile.Glob(os.path.join(output_dir, "model-*"))
# Take basenames and strip the "model-" prefix.
checkpoints = [os.path.basename(ckpt)[6:] for ckpt in checkpoints]
# Get epoch numbers from the filenames and sort to obtain last epoch.
epoch_numbers = [int(ckpt[:5]) for ckpt in checkpoints if len(ckpt) > 4]
epoch_numbers.sort()
if epoch_numbers:
last_epoch = epoch_numbers[-1]
saved_path = os.path.join(output_dir, "model-%05d" % last_epoch)
model.load_weights(saved_path)
model.fit(train_batches,
epochs=train_steps // eval_frequency,
steps_per_epoch=eval_frequency,
validation_data=eval_batches,
validation_steps=eval_steps,
initial_epoch=last_epoch,
callbacks=callbacks) | def function[train_fn, parameter[data_dir, output_dir, model_class, dataset, input_names, target_names, train_steps, eval_steps, eval_frequency]]:
constant[Train the given model on the given dataset.
Args:
data_dir: Directory where the data is located.
output_dir: Directory where to put the logs and checkpoints.
model_class: The model class to train.
dataset: The name of the dataset to train on.
input_names: List of strings with the names of the features on input.
target_names: List of strings with the names of the target features.
train_steps: for how many steps to train.
eval_steps: for how many steps to do evaluation.
eval_frequency: how often (every this many steps) to run evaluation.
]
<ast.Tuple object at 0x7da204567640> assign[=] call[name[train_and_eval_dataset], parameter[name[dataset], name[data_dir]]]
if compare[name[input_names] is constant[None]] begin[:]
variable[input_names] assign[=] call[name[keys]][constant[0]]
if compare[name[target_names] is constant[None]] begin[:]
variable[target_names] assign[=] call[name[keys]][constant[1]]
variable[model] assign[=] call[name[model_class], parameter[]]
call[name[optimize_fn], parameter[name[model]]]
variable[train_batches] assign[=] call[name[shuffle_and_batch_data], parameter[name[train_data], name[target_names], name[features_info]]]
variable[eval_batches] assign[=] call[name[shuffle_and_batch_data], parameter[name[eval_data], name[target_names], name[features_info]]]
call[name[model].fit, parameter[name[train_batches]]]
variable[callbacks] assign[=] list[[]]
call[name[callbacks].append, parameter[call[name[tf].keras.callbacks.History, parameter[]]]]
call[name[callbacks].append, parameter[call[name[tf].keras.callbacks.BaseLogger, parameter[]]]]
variable[last_epoch] assign[=] constant[0]
if compare[name[output_dir] is_not constant[None]] begin[:]
call[name[callbacks].append, parameter[call[name[tf].keras.callbacks.TensorBoard, parameter[]]]]
variable[output_format] assign[=] call[name[os].path.join, parameter[name[output_dir], constant[model-{epoch:05d}]]]
call[name[callbacks].append, parameter[call[name[tf].keras.callbacks.ModelCheckpoint, parameter[]]]]
variable[checkpoints] assign[=] call[name[tf].gfile.Glob, parameter[call[name[os].path.join, parameter[name[output_dir], constant[model-*]]]]]
variable[checkpoints] assign[=] <ast.ListComp object at 0x7da2045656c0>
variable[epoch_numbers] assign[=] <ast.ListComp object at 0x7da204566800>
call[name[epoch_numbers].sort, parameter[]]
if name[epoch_numbers] begin[:]
variable[last_epoch] assign[=] call[name[epoch_numbers]][<ast.UnaryOp object at 0x7da1b205afb0>]
variable[saved_path] assign[=] call[name[os].path.join, parameter[name[output_dir], binary_operation[constant[model-%05d] <ast.Mod object at 0x7da2590d6920> name[last_epoch]]]]
call[name[model].load_weights, parameter[name[saved_path]]]
call[name[model].fit, parameter[name[train_batches]]] | keyword[def] identifier[train_fn] ( identifier[data_dir] = keyword[None] , identifier[output_dir] = keyword[None] ,
identifier[model_class] = identifier[gin] . identifier[REQUIRED] , identifier[dataset] = identifier[gin] . identifier[REQUIRED] ,
identifier[input_names] = keyword[None] , identifier[target_names] = keyword[None] ,
identifier[train_steps] = literal[int] , identifier[eval_steps] = literal[int] , identifier[eval_frequency] = literal[int] ):
literal[string]
identifier[train_data] , identifier[eval_data] , identifier[features_info] , identifier[keys] = identifier[train_and_eval_dataset] (
identifier[dataset] , identifier[data_dir] )
keyword[if] identifier[input_names] keyword[is] keyword[None] :
identifier[input_names] = identifier[keys] [ literal[int] ]
keyword[if] identifier[target_names] keyword[is] keyword[None] :
identifier[target_names] = identifier[keys] [ literal[int] ]
identifier[model] = identifier[model_class] ( identifier[features_info] = identifier[features_info] ,
identifier[input_names] = identifier[input_names] , identifier[target_names] = identifier[target_names] )
identifier[optimize_fn] ( identifier[model] )
identifier[train_batches] = identifier[shuffle_and_batch_data] (
identifier[train_data] , identifier[target_names] , identifier[features_info] , identifier[training] = keyword[True] )
identifier[eval_batches] = identifier[shuffle_and_batch_data] (
identifier[eval_data] , identifier[target_names] , identifier[features_info] , identifier[training] = keyword[False] )
identifier[model] . identifier[fit] ( identifier[train_batches] , identifier[epochs] = literal[int] , identifier[steps_per_epoch] = literal[int] )
identifier[callbacks] =[]
identifier[callbacks] . identifier[append] ( identifier[tf] . identifier[keras] . identifier[callbacks] . identifier[History] ())
identifier[callbacks] . identifier[append] ( identifier[tf] . identifier[keras] . identifier[callbacks] . identifier[BaseLogger] ())
identifier[last_epoch] = literal[int]
keyword[if] identifier[output_dir] keyword[is] keyword[not] keyword[None] :
identifier[callbacks] . identifier[append] ( identifier[tf] . identifier[keras] . identifier[callbacks] . identifier[TensorBoard] ( identifier[log_dir] = identifier[output_dir] ))
identifier[output_format] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir] , literal[string] )
identifier[callbacks] . identifier[append] ( identifier[tf] . identifier[keras] . identifier[callbacks] . identifier[ModelCheckpoint] (
identifier[filepath] = identifier[output_format] , identifier[save_weights_only] = keyword[True] ))
identifier[checkpoints] = identifier[tf] . identifier[gfile] . identifier[Glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir] , literal[string] ))
identifier[checkpoints] =[ identifier[os] . identifier[path] . identifier[basename] ( identifier[ckpt] )[ literal[int] :] keyword[for] identifier[ckpt] keyword[in] identifier[checkpoints] ]
identifier[epoch_numbers] =[ identifier[int] ( identifier[ckpt] [: literal[int] ]) keyword[for] identifier[ckpt] keyword[in] identifier[checkpoints] keyword[if] identifier[len] ( identifier[ckpt] )> literal[int] ]
identifier[epoch_numbers] . identifier[sort] ()
keyword[if] identifier[epoch_numbers] :
identifier[last_epoch] = identifier[epoch_numbers] [- literal[int] ]
identifier[saved_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir] , literal[string] % identifier[last_epoch] )
identifier[model] . identifier[load_weights] ( identifier[saved_path] )
identifier[model] . identifier[fit] ( identifier[train_batches] ,
identifier[epochs] = identifier[train_steps] // identifier[eval_frequency] ,
identifier[steps_per_epoch] = identifier[eval_frequency] ,
identifier[validation_data] = identifier[eval_batches] ,
identifier[validation_steps] = identifier[eval_steps] ,
identifier[initial_epoch] = identifier[last_epoch] ,
identifier[callbacks] = identifier[callbacks] ) | def train_fn(data_dir=None, output_dir=None, model_class=gin.REQUIRED, dataset=gin.REQUIRED, input_names=None, target_names=None, train_steps=1000, eval_steps=1, eval_frequency=100):
"""Train the given model on the given dataset.
Args:
data_dir: Directory where the data is located.
output_dir: Directory where to put the logs and checkpoints.
model_class: The model class to train.
dataset: The name of the dataset to train on.
input_names: List of strings with the names of the features on input.
target_names: List of strings with the names of the target features.
train_steps: for how many steps to train.
eval_steps: for how many steps to do evaluation.
eval_frequency: how often (every this many steps) to run evaluation.
"""
(train_data, eval_data, features_info, keys) = train_and_eval_dataset(dataset, data_dir)
if input_names is None:
input_names = keys[0] # depends on [control=['if'], data=['input_names']]
if target_names is None:
target_names = keys[1] # depends on [control=['if'], data=['target_names']]
# TODO(lukaszkaiser): The use of distribution strategy below fails like this:
# .../keras/models.py", line 93, in _clone_functional_model
# for layer in model._input_layers:
# AttributeError: 'BasicFcRelu' object has no attribute '_input_layers'
# strategy = tf.distribute.MirroredStrategy()
# with strategy.scope():
model = model_class(features_info=features_info, input_names=input_names, target_names=target_names)
optimize_fn(model)
train_batches = shuffle_and_batch_data(train_data, target_names, features_info, training=True)
eval_batches = shuffle_and_batch_data(eval_data, target_names, features_info, training=False)
# Need to run one training step just to get optimizer variables to load.
model.fit(train_batches, epochs=1, steps_per_epoch=1)
# Training loop.
callbacks = []
callbacks.append(tf.keras.callbacks.History())
callbacks.append(tf.keras.callbacks.BaseLogger())
last_epoch = 0
if output_dir is not None:
callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=output_dir))
output_format = os.path.join(output_dir, 'model-{epoch:05d}')
callbacks.append(tf.keras.callbacks.ModelCheckpoint(filepath=output_format, save_weights_only=True))
checkpoints = tf.gfile.Glob(os.path.join(output_dir, 'model-*'))
# Take basenames and strip the "model-" prefix.
checkpoints = [os.path.basename(ckpt)[6:] for ckpt in checkpoints]
# Get epoch numbers from the filenames and sort to obtain last epoch.
epoch_numbers = [int(ckpt[:5]) for ckpt in checkpoints if len(ckpt) > 4]
epoch_numbers.sort()
if epoch_numbers:
last_epoch = epoch_numbers[-1]
saved_path = os.path.join(output_dir, 'model-%05d' % last_epoch)
model.load_weights(saved_path) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['output_dir']]
model.fit(train_batches, epochs=train_steps // eval_frequency, steps_per_epoch=eval_frequency, validation_data=eval_batches, validation_steps=eval_steps, initial_epoch=last_epoch, callbacks=callbacks) |
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set) | def function[as_set, parameter[self, preserve_casing]]:
constant[Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
]
if name[preserve_casing] begin[:]
return[call[name[set], parameter[name[self]._headers]]]
return[call[name[set], parameter[name[self]._set]]] | keyword[def] identifier[as_set] ( identifier[self] , identifier[preserve_casing] = keyword[False] ):
literal[string]
keyword[if] identifier[preserve_casing] :
keyword[return] identifier[set] ( identifier[self] . identifier[_headers] )
keyword[return] identifier[set] ( identifier[self] . identifier[_set] ) | def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers) # depends on [control=['if'], data=[]]
return set(self._set) |
def _bcrypt_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):
"""
Encrypts a value using an RSA public key via CNG
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
flags = BcryptConst.BCRYPT_PAD_PKCS1
if rsa_oaep_padding is True:
flags = BcryptConst.BCRYPT_PAD_OAEP
padding_info_struct_pointer = struct(bcrypt, 'BCRYPT_OAEP_PADDING_INFO')
padding_info_struct = unwrap(padding_info_struct_pointer)
# This has to be assigned to a variable to prevent cffi from gc'ing it
hash_buffer = buffer_from_unicode(BcryptConst.BCRYPT_SHA1_ALGORITHM)
padding_info_struct.pszAlgId = cast(bcrypt, 'wchar_t *', hash_buffer)
padding_info_struct.pbLabel = null()
padding_info_struct.cbLabel = 0
padding_info = cast(bcrypt, 'void *', padding_info_struct_pointer)
else:
padding_info = null()
out_len = new(bcrypt, 'ULONG *')
res = bcrypt.BCryptEncrypt(
certificate_or_public_key.key_handle,
data,
len(data),
padding_info,
null(),
0,
null(),
0,
out_len,
flags
)
handle_error(res)
buffer_len = deref(out_len)
buffer = buffer_from_bytes(buffer_len)
res = bcrypt.BCryptEncrypt(
certificate_or_public_key.key_handle,
data,
len(data),
padding_info,
null(),
0,
buffer,
buffer_len,
out_len,
flags
)
handle_error(res)
return bytes_from_buffer(buffer, deref(out_len)) | def function[_bcrypt_encrypt, parameter[certificate_or_public_key, data, rsa_oaep_padding]]:
constant[
Encrypts a value using an RSA public key via CNG
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
]
variable[flags] assign[=] name[BcryptConst].BCRYPT_PAD_PKCS1
if compare[name[rsa_oaep_padding] is constant[True]] begin[:]
variable[flags] assign[=] name[BcryptConst].BCRYPT_PAD_OAEP
variable[padding_info_struct_pointer] assign[=] call[name[struct], parameter[name[bcrypt], constant[BCRYPT_OAEP_PADDING_INFO]]]
variable[padding_info_struct] assign[=] call[name[unwrap], parameter[name[padding_info_struct_pointer]]]
variable[hash_buffer] assign[=] call[name[buffer_from_unicode], parameter[name[BcryptConst].BCRYPT_SHA1_ALGORITHM]]
name[padding_info_struct].pszAlgId assign[=] call[name[cast], parameter[name[bcrypt], constant[wchar_t *], name[hash_buffer]]]
name[padding_info_struct].pbLabel assign[=] call[name[null], parameter[]]
name[padding_info_struct].cbLabel assign[=] constant[0]
variable[padding_info] assign[=] call[name[cast], parameter[name[bcrypt], constant[void *], name[padding_info_struct_pointer]]]
variable[out_len] assign[=] call[name[new], parameter[name[bcrypt], constant[ULONG *]]]
variable[res] assign[=] call[name[bcrypt].BCryptEncrypt, parameter[name[certificate_or_public_key].key_handle, name[data], call[name[len], parameter[name[data]]], name[padding_info], call[name[null], parameter[]], constant[0], call[name[null], parameter[]], constant[0], name[out_len], name[flags]]]
call[name[handle_error], parameter[name[res]]]
variable[buffer_len] assign[=] call[name[deref], parameter[name[out_len]]]
variable[buffer] assign[=] call[name[buffer_from_bytes], parameter[name[buffer_len]]]
variable[res] assign[=] call[name[bcrypt].BCryptEncrypt, parameter[name[certificate_or_public_key].key_handle, name[data], call[name[len], parameter[name[data]]], name[padding_info], call[name[null], parameter[]], constant[0], name[buffer], name[buffer_len], name[out_len], name[flags]]]
call[name[handle_error], parameter[name[res]]]
return[call[name[bytes_from_buffer], parameter[name[buffer], call[name[deref], parameter[name[out_len]]]]]] | keyword[def] identifier[_bcrypt_encrypt] ( identifier[certificate_or_public_key] , identifier[data] , identifier[rsa_oaep_padding] = keyword[False] ):
literal[string]
identifier[flags] = identifier[BcryptConst] . identifier[BCRYPT_PAD_PKCS1]
keyword[if] identifier[rsa_oaep_padding] keyword[is] keyword[True] :
identifier[flags] = identifier[BcryptConst] . identifier[BCRYPT_PAD_OAEP]
identifier[padding_info_struct_pointer] = identifier[struct] ( identifier[bcrypt] , literal[string] )
identifier[padding_info_struct] = identifier[unwrap] ( identifier[padding_info_struct_pointer] )
identifier[hash_buffer] = identifier[buffer_from_unicode] ( identifier[BcryptConst] . identifier[BCRYPT_SHA1_ALGORITHM] )
identifier[padding_info_struct] . identifier[pszAlgId] = identifier[cast] ( identifier[bcrypt] , literal[string] , identifier[hash_buffer] )
identifier[padding_info_struct] . identifier[pbLabel] = identifier[null] ()
identifier[padding_info_struct] . identifier[cbLabel] = literal[int]
identifier[padding_info] = identifier[cast] ( identifier[bcrypt] , literal[string] , identifier[padding_info_struct_pointer] )
keyword[else] :
identifier[padding_info] = identifier[null] ()
identifier[out_len] = identifier[new] ( identifier[bcrypt] , literal[string] )
identifier[res] = identifier[bcrypt] . identifier[BCryptEncrypt] (
identifier[certificate_or_public_key] . identifier[key_handle] ,
identifier[data] ,
identifier[len] ( identifier[data] ),
identifier[padding_info] ,
identifier[null] (),
literal[int] ,
identifier[null] (),
literal[int] ,
identifier[out_len] ,
identifier[flags]
)
identifier[handle_error] ( identifier[res] )
identifier[buffer_len] = identifier[deref] ( identifier[out_len] )
identifier[buffer] = identifier[buffer_from_bytes] ( identifier[buffer_len] )
identifier[res] = identifier[bcrypt] . identifier[BCryptEncrypt] (
identifier[certificate_or_public_key] . identifier[key_handle] ,
identifier[data] ,
identifier[len] ( identifier[data] ),
identifier[padding_info] ,
identifier[null] (),
literal[int] ,
identifier[buffer] ,
identifier[buffer_len] ,
identifier[out_len] ,
identifier[flags]
)
identifier[handle_error] ( identifier[res] )
keyword[return] identifier[bytes_from_buffer] ( identifier[buffer] , identifier[deref] ( identifier[out_len] )) | def _bcrypt_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):
"""
Encrypts a value using an RSA public key via CNG
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
flags = BcryptConst.BCRYPT_PAD_PKCS1
if rsa_oaep_padding is True:
flags = BcryptConst.BCRYPT_PAD_OAEP
padding_info_struct_pointer = struct(bcrypt, 'BCRYPT_OAEP_PADDING_INFO')
padding_info_struct = unwrap(padding_info_struct_pointer)
# This has to be assigned to a variable to prevent cffi from gc'ing it
hash_buffer = buffer_from_unicode(BcryptConst.BCRYPT_SHA1_ALGORITHM)
padding_info_struct.pszAlgId = cast(bcrypt, 'wchar_t *', hash_buffer)
padding_info_struct.pbLabel = null()
padding_info_struct.cbLabel = 0
padding_info = cast(bcrypt, 'void *', padding_info_struct_pointer) # depends on [control=['if'], data=[]]
else:
padding_info = null()
out_len = new(bcrypt, 'ULONG *')
res = bcrypt.BCryptEncrypt(certificate_or_public_key.key_handle, data, len(data), padding_info, null(), 0, null(), 0, out_len, flags)
handle_error(res)
buffer_len = deref(out_len)
buffer = buffer_from_bytes(buffer_len)
res = bcrypt.BCryptEncrypt(certificate_or_public_key.key_handle, data, len(data), padding_info, null(), 0, buffer, buffer_len, out_len, flags)
handle_error(res)
return bytes_from_buffer(buffer, deref(out_len)) |
def _build_lv_grid_dict(network):
"""Creates dict of LV grids
LV grid ids are used as keys, LV grid references as values.
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
Returns
-------
:obj:`dict`
Format: {:obj:`int`: :class:`~.grid.grids.LVGrid`}
"""
lv_grid_dict = {}
for lv_grid in network.mv_grid.lv_grids:
lv_grid_dict[lv_grid.id] = lv_grid
return lv_grid_dict | def function[_build_lv_grid_dict, parameter[network]]:
constant[Creates dict of LV grids
LV grid ids are used as keys, LV grid references as values.
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
Returns
-------
:obj:`dict`
Format: {:obj:`int`: :class:`~.grid.grids.LVGrid`}
]
variable[lv_grid_dict] assign[=] dictionary[[], []]
for taget[name[lv_grid]] in starred[name[network].mv_grid.lv_grids] begin[:]
call[name[lv_grid_dict]][name[lv_grid].id] assign[=] name[lv_grid]
return[name[lv_grid_dict]] | keyword[def] identifier[_build_lv_grid_dict] ( identifier[network] ):
literal[string]
identifier[lv_grid_dict] ={}
keyword[for] identifier[lv_grid] keyword[in] identifier[network] . identifier[mv_grid] . identifier[lv_grids] :
identifier[lv_grid_dict] [ identifier[lv_grid] . identifier[id] ]= identifier[lv_grid]
keyword[return] identifier[lv_grid_dict] | def _build_lv_grid_dict(network):
"""Creates dict of LV grids
LV grid ids are used as keys, LV grid references as values.
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
Returns
-------
:obj:`dict`
Format: {:obj:`int`: :class:`~.grid.grids.LVGrid`}
"""
lv_grid_dict = {}
for lv_grid in network.mv_grid.lv_grids:
lv_grid_dict[lv_grid.id] = lv_grid # depends on [control=['for'], data=['lv_grid']]
return lv_grid_dict |
def apply_rf_classifier(classifier,
varfeaturesdir,
outpickle,
maxobjects=None):
'''This applys an RF classifier trained using `train_rf_classifier`
to varfeatures pickles in `varfeaturesdir`.
Parameters
----------
classifier : dict or str
This is the output dict or pickle created by `get_rf_classifier`. This
will contain a `features_name` key that will be used to collect the same
features used to train the classifier from the varfeatures pickles in
varfeaturesdir.
varfeaturesdir : str
The directory containing the varfeatures pickles for objects that will
be classified by the trained `classifier`.
outpickle : str
This is a filename for the pickle that will be written containing the
result dict from this function.
maxobjects : int
This sets the number of objects to process in `varfeaturesdir`.
Returns
-------
dict
The classification results after running the trained `classifier` as
returned as a dict. This contains predicted labels and their prediction
probabilities.
'''
if isinstance(classifier,str) and os.path.exists(classifier):
with open(classifier,'rb') as infd:
clfdict = pickle.load(infd)
elif isinstance(classifier, dict):
clfdict = classifier
else:
LOGERROR("can't figure out the input classifier arg")
return None
# get the features to extract from clfdict
if 'feature_names' not in clfdict:
LOGERROR("feature_names not present in classifier input, "
"can't figure out which ones to extract from "
"varfeature pickles in %s" % varfeaturesdir)
return None
# get the feature labeltype, pklglob, and maxobjects from classifier's
# collect_kwargs elem.
featurestouse = clfdict['feature_names']
pklglob = clfdict['collect_kwargs']['pklglob']
magcol = clfdict['magcol']
# extract the features used by the classifier from the varfeatures pickles
# in varfeaturesdir using the pklglob provided
featfile = os.path.join(
os.path.dirname(outpickle),
'actual-collected-features.pkl'
)
features = collect_nonperiodic_features(
varfeaturesdir,
magcol,
featfile,
pklglob=pklglob,
featurestouse=featurestouse,
maxobjects=maxobjects
)
# now use the trained classifier on these features
bestclf = clfdict['best_classifier']
predicted_labels = bestclf.predict(features['features_array'])
# FIXME: do we need to use the probability calibration curves to fix these
# probabilities? probably. figure out how to do this.
predicted_label_probs = bestclf.predict_proba(
features['features_array']
)
outdict = {
'features':features,
'featfile':featfile,
'classifier':clfdict,
'predicted_labels':predicted_labels,
'predicted_label_probs':predicted_label_probs,
}
with open(outpickle,'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return outdict | def function[apply_rf_classifier, parameter[classifier, varfeaturesdir, outpickle, maxobjects]]:
constant[This applys an RF classifier trained using `train_rf_classifier`
to varfeatures pickles in `varfeaturesdir`.
Parameters
----------
classifier : dict or str
This is the output dict or pickle created by `get_rf_classifier`. This
will contain a `features_name` key that will be used to collect the same
features used to train the classifier from the varfeatures pickles in
varfeaturesdir.
varfeaturesdir : str
The directory containing the varfeatures pickles for objects that will
be classified by the trained `classifier`.
outpickle : str
This is a filename for the pickle that will be written containing the
result dict from this function.
maxobjects : int
This sets the number of objects to process in `varfeaturesdir`.
Returns
-------
dict
The classification results after running the trained `classifier` as
returned as a dict. This contains predicted labels and their prediction
probabilities.
]
if <ast.BoolOp object at 0x7da2041d9f60> begin[:]
with call[name[open], parameter[name[classifier], constant[rb]]] begin[:]
variable[clfdict] assign[=] call[name[pickle].load, parameter[name[infd]]]
if compare[constant[feature_names] <ast.NotIn object at 0x7da2590d7190> name[clfdict]] begin[:]
call[name[LOGERROR], parameter[binary_operation[constant[feature_names not present in classifier input, can't figure out which ones to extract from varfeature pickles in %s] <ast.Mod object at 0x7da2590d6920> name[varfeaturesdir]]]]
return[constant[None]]
variable[featurestouse] assign[=] call[name[clfdict]][constant[feature_names]]
variable[pklglob] assign[=] call[call[name[clfdict]][constant[collect_kwargs]]][constant[pklglob]]
variable[magcol] assign[=] call[name[clfdict]][constant[magcol]]
variable[featfile] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[outpickle]]], constant[actual-collected-features.pkl]]]
variable[features] assign[=] call[name[collect_nonperiodic_features], parameter[name[varfeaturesdir], name[magcol], name[featfile]]]
variable[bestclf] assign[=] call[name[clfdict]][constant[best_classifier]]
variable[predicted_labels] assign[=] call[name[bestclf].predict, parameter[call[name[features]][constant[features_array]]]]
variable[predicted_label_probs] assign[=] call[name[bestclf].predict_proba, parameter[call[name[features]][constant[features_array]]]]
variable[outdict] assign[=] dictionary[[<ast.Constant object at 0x7da20c6ab5b0>, <ast.Constant object at 0x7da20c6aa6e0>, <ast.Constant object at 0x7da20c6ab640>, <ast.Constant object at 0x7da20c6abf70>, <ast.Constant object at 0x7da20c6aabf0>], [<ast.Name object at 0x7da20c6a8880>, <ast.Name object at 0x7da20c6a8910>, <ast.Name object at 0x7da20c6ab340>, <ast.Name object at 0x7da20c6aadd0>, <ast.Name object at 0x7da20c6aa3b0>]]
with call[name[open], parameter[name[outpickle], constant[wb]]] begin[:]
call[name[pickle].dump, parameter[name[outdict], name[outfd], name[pickle].HIGHEST_PROTOCOL]]
return[name[outdict]] | keyword[def] identifier[apply_rf_classifier] ( identifier[classifier] ,
identifier[varfeaturesdir] ,
identifier[outpickle] ,
identifier[maxobjects] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[classifier] , identifier[str] ) keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[classifier] ):
keyword[with] identifier[open] ( identifier[classifier] , literal[string] ) keyword[as] identifier[infd] :
identifier[clfdict] = identifier[pickle] . identifier[load] ( identifier[infd] )
keyword[elif] identifier[isinstance] ( identifier[classifier] , identifier[dict] ):
identifier[clfdict] = identifier[classifier]
keyword[else] :
identifier[LOGERROR] ( literal[string] )
keyword[return] keyword[None]
keyword[if] literal[string] keyword[not] keyword[in] identifier[clfdict] :
identifier[LOGERROR] ( literal[string]
literal[string]
literal[string] % identifier[varfeaturesdir] )
keyword[return] keyword[None]
identifier[featurestouse] = identifier[clfdict] [ literal[string] ]
identifier[pklglob] = identifier[clfdict] [ literal[string] ][ literal[string] ]
identifier[magcol] = identifier[clfdict] [ literal[string] ]
identifier[featfile] = identifier[os] . identifier[path] . identifier[join] (
identifier[os] . identifier[path] . identifier[dirname] ( identifier[outpickle] ),
literal[string]
)
identifier[features] = identifier[collect_nonperiodic_features] (
identifier[varfeaturesdir] ,
identifier[magcol] ,
identifier[featfile] ,
identifier[pklglob] = identifier[pklglob] ,
identifier[featurestouse] = identifier[featurestouse] ,
identifier[maxobjects] = identifier[maxobjects]
)
identifier[bestclf] = identifier[clfdict] [ literal[string] ]
identifier[predicted_labels] = identifier[bestclf] . identifier[predict] ( identifier[features] [ literal[string] ])
identifier[predicted_label_probs] = identifier[bestclf] . identifier[predict_proba] (
identifier[features] [ literal[string] ]
)
identifier[outdict] ={
literal[string] : identifier[features] ,
literal[string] : identifier[featfile] ,
literal[string] : identifier[clfdict] ,
literal[string] : identifier[predicted_labels] ,
literal[string] : identifier[predicted_label_probs] ,
}
keyword[with] identifier[open] ( identifier[outpickle] , literal[string] ) keyword[as] identifier[outfd] :
identifier[pickle] . identifier[dump] ( identifier[outdict] , identifier[outfd] , identifier[pickle] . identifier[HIGHEST_PROTOCOL] )
keyword[return] identifier[outdict] | def apply_rf_classifier(classifier, varfeaturesdir, outpickle, maxobjects=None):
"""This applys an RF classifier trained using `train_rf_classifier`
to varfeatures pickles in `varfeaturesdir`.
Parameters
----------
classifier : dict or str
This is the output dict or pickle created by `get_rf_classifier`. This
will contain a `features_name` key that will be used to collect the same
features used to train the classifier from the varfeatures pickles in
varfeaturesdir.
varfeaturesdir : str
The directory containing the varfeatures pickles for objects that will
be classified by the trained `classifier`.
outpickle : str
This is a filename for the pickle that will be written containing the
result dict from this function.
maxobjects : int
This sets the number of objects to process in `varfeaturesdir`.
Returns
-------
dict
The classification results after running the trained `classifier` as
returned as a dict. This contains predicted labels and their prediction
probabilities.
"""
if isinstance(classifier, str) and os.path.exists(classifier):
with open(classifier, 'rb') as infd:
clfdict = pickle.load(infd) # depends on [control=['with'], data=['infd']] # depends on [control=['if'], data=[]]
elif isinstance(classifier, dict):
clfdict = classifier # depends on [control=['if'], data=[]]
else:
LOGERROR("can't figure out the input classifier arg")
return None
# get the features to extract from clfdict
if 'feature_names' not in clfdict:
LOGERROR("feature_names not present in classifier input, can't figure out which ones to extract from varfeature pickles in %s" % varfeaturesdir)
return None # depends on [control=['if'], data=[]]
# get the feature labeltype, pklglob, and maxobjects from classifier's
# collect_kwargs elem.
featurestouse = clfdict['feature_names']
pklglob = clfdict['collect_kwargs']['pklglob']
magcol = clfdict['magcol']
# extract the features used by the classifier from the varfeatures pickles
# in varfeaturesdir using the pklglob provided
featfile = os.path.join(os.path.dirname(outpickle), 'actual-collected-features.pkl')
features = collect_nonperiodic_features(varfeaturesdir, magcol, featfile, pklglob=pklglob, featurestouse=featurestouse, maxobjects=maxobjects)
# now use the trained classifier on these features
bestclf = clfdict['best_classifier']
predicted_labels = bestclf.predict(features['features_array'])
# FIXME: do we need to use the probability calibration curves to fix these
# probabilities? probably. figure out how to do this.
predicted_label_probs = bestclf.predict_proba(features['features_array'])
outdict = {'features': features, 'featfile': featfile, 'classifier': clfdict, 'predicted_labels': predicted_labels, 'predicted_label_probs': predicted_label_probs}
with open(outpickle, 'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['outfd']]
return outdict |
def _chk_qualifier(self, qualifiers, flds, lnum):
"""Check that qualifiers are expected values."""
# http://geneontology.org/page/go-annotation-conventions#qual
for qual in qualifiers:
if qual not in AnnoReaderBase.exp_qualifiers:
errname = 'UNEXPECTED QUALIFIER({QUAL})'.format(QUAL=qual)
self.illegal_lines[errname].append((lnum, "\t".join(flds))) | def function[_chk_qualifier, parameter[self, qualifiers, flds, lnum]]:
constant[Check that qualifiers are expected values.]
for taget[name[qual]] in starred[name[qualifiers]] begin[:]
if compare[name[qual] <ast.NotIn object at 0x7da2590d7190> name[AnnoReaderBase].exp_qualifiers] begin[:]
variable[errname] assign[=] call[constant[UNEXPECTED QUALIFIER({QUAL})].format, parameter[]]
call[call[name[self].illegal_lines][name[errname]].append, parameter[tuple[[<ast.Name object at 0x7da20e9557b0>, <ast.Call object at 0x7da20e956dd0>]]]] | keyword[def] identifier[_chk_qualifier] ( identifier[self] , identifier[qualifiers] , identifier[flds] , identifier[lnum] ):
literal[string]
keyword[for] identifier[qual] keyword[in] identifier[qualifiers] :
keyword[if] identifier[qual] keyword[not] keyword[in] identifier[AnnoReaderBase] . identifier[exp_qualifiers] :
identifier[errname] = literal[string] . identifier[format] ( identifier[QUAL] = identifier[qual] )
identifier[self] . identifier[illegal_lines] [ identifier[errname] ]. identifier[append] (( identifier[lnum] , literal[string] . identifier[join] ( identifier[flds] ))) | def _chk_qualifier(self, qualifiers, flds, lnum):
"""Check that qualifiers are expected values."""
# http://geneontology.org/page/go-annotation-conventions#qual
for qual in qualifiers:
if qual not in AnnoReaderBase.exp_qualifiers:
errname = 'UNEXPECTED QUALIFIER({QUAL})'.format(QUAL=qual)
self.illegal_lines[errname].append((lnum, '\t'.join(flds))) # depends on [control=['if'], data=['qual']] # depends on [control=['for'], data=['qual']] |
def add_t(self, text):
"""
Return a newly added ``<w:t>`` element containing *text*.
"""
t = self._add_t(text=text)
if len(text.strip()) < len(text):
t.set(qn('xml:space'), 'preserve')
return t | def function[add_t, parameter[self, text]]:
constant[
Return a newly added ``<w:t>`` element containing *text*.
]
variable[t] assign[=] call[name[self]._add_t, parameter[]]
if compare[call[name[len], parameter[call[name[text].strip, parameter[]]]] less[<] call[name[len], parameter[name[text]]]] begin[:]
call[name[t].set, parameter[call[name[qn], parameter[constant[xml:space]]], constant[preserve]]]
return[name[t]] | keyword[def] identifier[add_t] ( identifier[self] , identifier[text] ):
literal[string]
identifier[t] = identifier[self] . identifier[_add_t] ( identifier[text] = identifier[text] )
keyword[if] identifier[len] ( identifier[text] . identifier[strip] ())< identifier[len] ( identifier[text] ):
identifier[t] . identifier[set] ( identifier[qn] ( literal[string] ), literal[string] )
keyword[return] identifier[t] | def add_t(self, text):
"""
Return a newly added ``<w:t>`` element containing *text*.
"""
t = self._add_t(text=text)
if len(text.strip()) < len(text):
t.set(qn('xml:space'), 'preserve') # depends on [control=['if'], data=[]]
return t |
def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalization with l2 norm."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(name, default_name="l2_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"l2_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"l2_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
l2norm = tf.reduce_sum(
tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)
return norm_x * scale + bias | def function[l2_norm, parameter[x, filters, epsilon, name, reuse]]:
constant[Layer normalization with l2 norm.]
if compare[name[filters] is constant[None]] begin[:]
variable[filters] assign[=] call[call[name[shape_list], parameter[name[x]]]][<ast.UnaryOp object at 0x7da18dc04a60>]
with call[name[tf].variable_scope, parameter[name[name]]] begin[:]
variable[scale] assign[=] call[name[tf].get_variable, parameter[constant[l2_norm_scale], list[[<ast.Name object at 0x7da18dc05f90>]]]]
variable[bias] assign[=] call[name[tf].get_variable, parameter[constant[l2_norm_bias], list[[<ast.Name object at 0x7da18dc06920>]]]]
<ast.Tuple object at 0x7da18dc07700> assign[=] <ast.ListComp object at 0x7da18dc04340>
variable[mean] assign[=] call[name[tf].reduce_mean, parameter[name[x]]]
variable[l2norm] assign[=] call[name[tf].reduce_sum, parameter[call[name[tf].squared_difference, parameter[name[x], name[mean]]]]]
variable[norm_x] assign[=] binary_operation[binary_operation[name[x] - name[mean]] * call[name[tf].rsqrt, parameter[binary_operation[name[l2norm] + name[epsilon]]]]]
return[binary_operation[binary_operation[name[norm_x] * name[scale]] + name[bias]]] | keyword[def] identifier[l2_norm] ( identifier[x] , identifier[filters] = keyword[None] , identifier[epsilon] = literal[int] , identifier[name] = keyword[None] , identifier[reuse] = keyword[None] ):
literal[string]
keyword[if] identifier[filters] keyword[is] keyword[None] :
identifier[filters] = identifier[shape_list] ( identifier[x] )[- literal[int] ]
keyword[with] identifier[tf] . identifier[variable_scope] ( identifier[name] , identifier[default_name] = literal[string] , identifier[values] =[ identifier[x] ], identifier[reuse] = identifier[reuse] ):
identifier[scale] = identifier[tf] . identifier[get_variable] (
literal[string] ,[ identifier[filters] ], identifier[initializer] = identifier[tf] . identifier[ones_initializer] ())
identifier[bias] = identifier[tf] . identifier[get_variable] (
literal[string] ,[ identifier[filters] ], identifier[initializer] = identifier[tf] . identifier[zeros_initializer] ())
identifier[epsilon] , identifier[scale] , identifier[bias] =[ identifier[cast_like] ( identifier[t] , identifier[x] ) keyword[for] identifier[t] keyword[in] [ identifier[epsilon] , identifier[scale] , identifier[bias] ]]
identifier[mean] = identifier[tf] . identifier[reduce_mean] ( identifier[x] , identifier[axis] =[- literal[int] ], identifier[keepdims] = keyword[True] )
identifier[l2norm] = identifier[tf] . identifier[reduce_sum] (
identifier[tf] . identifier[squared_difference] ( identifier[x] , identifier[mean] ), identifier[axis] =[- literal[int] ], identifier[keepdims] = keyword[True] )
identifier[norm_x] =( identifier[x] - identifier[mean] )* identifier[tf] . identifier[rsqrt] ( identifier[l2norm] + identifier[epsilon] )
keyword[return] identifier[norm_x] * identifier[scale] + identifier[bias] | def l2_norm(x, filters=None, epsilon=1e-06, name=None, reuse=None):
"""Layer normalization with l2 norm."""
if filters is None:
filters = shape_list(x)[-1] # depends on [control=['if'], data=['filters']]
with tf.variable_scope(name, default_name='l2_norm', values=[x], reuse=reuse):
scale = tf.get_variable('l2_norm_scale', [filters], initializer=tf.ones_initializer())
bias = tf.get_variable('l2_norm_bias', [filters], initializer=tf.zeros_initializer())
(epsilon, scale, bias) = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
l2norm = tf.reduce_sum(tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)
return norm_x * scale + bias # depends on [control=['with'], data=[]] |
def _subtract(start, stop, intervals):
"""
Subtract intervals from a spanning interval.
"""
remainder_start = start
sub_stop = None
for sub_start, sub_stop in _collapse(intervals):
if remainder_start < sub_start:
yield _Interval(remainder_start, sub_start)
remainder_start = sub_stop
if sub_stop is not None and sub_stop < stop:
yield _Interval(sub_stop, stop) | def function[_subtract, parameter[start, stop, intervals]]:
constant[
Subtract intervals from a spanning interval.
]
variable[remainder_start] assign[=] name[start]
variable[sub_stop] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da1b08e7970>, <ast.Name object at 0x7da1b08e5180>]]] in starred[call[name[_collapse], parameter[name[intervals]]]] begin[:]
if compare[name[remainder_start] less[<] name[sub_start]] begin[:]
<ast.Yield object at 0x7da1b08e57b0>
variable[remainder_start] assign[=] name[sub_stop]
if <ast.BoolOp object at 0x7da1b08e6470> begin[:]
<ast.Yield object at 0x7da1b08e6020> | keyword[def] identifier[_subtract] ( identifier[start] , identifier[stop] , identifier[intervals] ):
literal[string]
identifier[remainder_start] = identifier[start]
identifier[sub_stop] = keyword[None]
keyword[for] identifier[sub_start] , identifier[sub_stop] keyword[in] identifier[_collapse] ( identifier[intervals] ):
keyword[if] identifier[remainder_start] < identifier[sub_start] :
keyword[yield] identifier[_Interval] ( identifier[remainder_start] , identifier[sub_start] )
identifier[remainder_start] = identifier[sub_stop]
keyword[if] identifier[sub_stop] keyword[is] keyword[not] keyword[None] keyword[and] identifier[sub_stop] < identifier[stop] :
keyword[yield] identifier[_Interval] ( identifier[sub_stop] , identifier[stop] ) | def _subtract(start, stop, intervals):
"""
Subtract intervals from a spanning interval.
"""
remainder_start = start
sub_stop = None
for (sub_start, sub_stop) in _collapse(intervals):
if remainder_start < sub_start:
yield _Interval(remainder_start, sub_start) # depends on [control=['if'], data=['remainder_start', 'sub_start']]
remainder_start = sub_stop # depends on [control=['for'], data=[]]
if sub_stop is not None and sub_stop < stop:
yield _Interval(sub_stop, stop) # depends on [control=['if'], data=[]] |
def encrypt_and_hash(self, plaintext: bytes) -> bytes:
"""
Sets ciphertext = EncryptWithAd(h, plaintext), calls MixHash(ciphertext), and returns ciphertext. Note that if
k is empty, the EncryptWithAd() call will set ciphertext equal to plaintext.
:param plaintext: bytes sequence
:return: ciphertext bytes sequence
"""
ciphertext = self.cipher_state.encrypt_with_ad(self.h, plaintext)
self.mix_hash(ciphertext)
return ciphertext | def function[encrypt_and_hash, parameter[self, plaintext]]:
constant[
Sets ciphertext = EncryptWithAd(h, plaintext), calls MixHash(ciphertext), and returns ciphertext. Note that if
k is empty, the EncryptWithAd() call will set ciphertext equal to plaintext.
:param plaintext: bytes sequence
:return: ciphertext bytes sequence
]
variable[ciphertext] assign[=] call[name[self].cipher_state.encrypt_with_ad, parameter[name[self].h, name[plaintext]]]
call[name[self].mix_hash, parameter[name[ciphertext]]]
return[name[ciphertext]] | keyword[def] identifier[encrypt_and_hash] ( identifier[self] , identifier[plaintext] : identifier[bytes] )-> identifier[bytes] :
literal[string]
identifier[ciphertext] = identifier[self] . identifier[cipher_state] . identifier[encrypt_with_ad] ( identifier[self] . identifier[h] , identifier[plaintext] )
identifier[self] . identifier[mix_hash] ( identifier[ciphertext] )
keyword[return] identifier[ciphertext] | def encrypt_and_hash(self, plaintext: bytes) -> bytes:
"""
Sets ciphertext = EncryptWithAd(h, plaintext), calls MixHash(ciphertext), and returns ciphertext. Note that if
k is empty, the EncryptWithAd() call will set ciphertext equal to plaintext.
:param plaintext: bytes sequence
:return: ciphertext bytes sequence
"""
ciphertext = self.cipher_state.encrypt_with_ad(self.h, plaintext)
self.mix_hash(ciphertext)
return ciphertext |
def adjoint(self):
"""Adjoint operator represented by the adjoint matrix.
Returns
-------
adjoint : `MatrixOperator`
"""
return MatrixOperator(self.matrix.conj().T,
domain=self.range, range=self.domain,
axis=self.axis) | def function[adjoint, parameter[self]]:
constant[Adjoint operator represented by the adjoint matrix.
Returns
-------
adjoint : `MatrixOperator`
]
return[call[name[MatrixOperator], parameter[call[name[self].matrix.conj, parameter[]].T]]] | keyword[def] identifier[adjoint] ( identifier[self] ):
literal[string]
keyword[return] identifier[MatrixOperator] ( identifier[self] . identifier[matrix] . identifier[conj] (). identifier[T] ,
identifier[domain] = identifier[self] . identifier[range] , identifier[range] = identifier[self] . identifier[domain] ,
identifier[axis] = identifier[self] . identifier[axis] ) | def adjoint(self):
"""Adjoint operator represented by the adjoint matrix.
Returns
-------
adjoint : `MatrixOperator`
"""
return MatrixOperator(self.matrix.conj().T, domain=self.range, range=self.domain, axis=self.axis) |
def send(self, packet_buffer):
"""
send a buffer as a packet to the network interface
:param packet_buffer: buffer to send (length shouldn't exceed MAX_INT)
"""
if self._handle is None:
raise self.DeviceIsNotOpen()
buffer_length = len(packet_buffer)
buf_send = ctypes.cast(ctypes.create_string_buffer(packet_buffer, buffer_length),
ctypes.POINTER(ctypes.c_ubyte))
wtypes.pcap_sendpacket(self._handle, buf_send, buffer_length) | def function[send, parameter[self, packet_buffer]]:
constant[
send a buffer as a packet to the network interface
:param packet_buffer: buffer to send (length shouldn't exceed MAX_INT)
]
if compare[name[self]._handle is constant[None]] begin[:]
<ast.Raise object at 0x7da1b10cdb70>
variable[buffer_length] assign[=] call[name[len], parameter[name[packet_buffer]]]
variable[buf_send] assign[=] call[name[ctypes].cast, parameter[call[name[ctypes].create_string_buffer, parameter[name[packet_buffer], name[buffer_length]]], call[name[ctypes].POINTER, parameter[name[ctypes].c_ubyte]]]]
call[name[wtypes].pcap_sendpacket, parameter[name[self]._handle, name[buf_send], name[buffer_length]]] | keyword[def] identifier[send] ( identifier[self] , identifier[packet_buffer] ):
literal[string]
keyword[if] identifier[self] . identifier[_handle] keyword[is] keyword[None] :
keyword[raise] identifier[self] . identifier[DeviceIsNotOpen] ()
identifier[buffer_length] = identifier[len] ( identifier[packet_buffer] )
identifier[buf_send] = identifier[ctypes] . identifier[cast] ( identifier[ctypes] . identifier[create_string_buffer] ( identifier[packet_buffer] , identifier[buffer_length] ),
identifier[ctypes] . identifier[POINTER] ( identifier[ctypes] . identifier[c_ubyte] ))
identifier[wtypes] . identifier[pcap_sendpacket] ( identifier[self] . identifier[_handle] , identifier[buf_send] , identifier[buffer_length] ) | def send(self, packet_buffer):
"""
send a buffer as a packet to the network interface
:param packet_buffer: buffer to send (length shouldn't exceed MAX_INT)
"""
if self._handle is None:
raise self.DeviceIsNotOpen() # depends on [control=['if'], data=[]]
buffer_length = len(packet_buffer)
buf_send = ctypes.cast(ctypes.create_string_buffer(packet_buffer, buffer_length), ctypes.POINTER(ctypes.c_ubyte))
wtypes.pcap_sendpacket(self._handle, buf_send, buffer_length) |
def _regularize(x, y, ties):
"""Regularize the values, make them ordered and remove duplicates.
If the ``ties`` parameter is explicitly set to 'ordered' then order
is already assumed. Otherwise, the removal process will happen.
Parameters
----------
x : array-like, shape=(n_samples,)
The x vector.
y : array-like, shape=(n_samples,)
The y vector.
ties : str
One of {'ordered', 'mean'}, handles the ties.
"""
x, y = [
column_or_1d(check_array(arr, ensure_2d=False,
force_all_finite=False,
dtype=DTYPE))
for arr in (x, y)
]
nx = x.shape[0]
if nx != y.shape[0]:
raise ValueError('array dim mismatch: %i != %i' % (nx, y.shape[0]))
# manipulate x if needed. if ties is 'ordered' we assume that x is
# already ordered and everything has been handled already...
if ties != 'ordered':
o = np.argsort(x)
# keep ordered with one another
x = x[o]
y = y[o]
# what if any are the same?
ux = np.unique(x)
if ux.shape[0] < nx:
# Do we want to warn for this?
# warnings.warn('collapsing to unique "x" values')
# vectorize this function to apply to each "cell" in the array
def tie_apply(f, u_val):
vals = y[x == u_val] # mask y where x == the unique value
return f(vals)
# replace the duplicates in the y array with the "tie" func
func = VALID_TIES.get(ties, _identity)
# maybe expensive to vectorize on the fly? Not sure; would need
# to do some benchmarking. However, we need to in order to keep y
# and x in scope...
y = np.vectorize(tie_apply)(func, ux)
# does ux need ordering? hmm..
x = ux
return x, y | def function[_regularize, parameter[x, y, ties]]:
constant[Regularize the values, make them ordered and remove duplicates.
If the ``ties`` parameter is explicitly set to 'ordered' then order
is already assumed. Otherwise, the removal process will happen.
Parameters
----------
x : array-like, shape=(n_samples,)
The x vector.
y : array-like, shape=(n_samples,)
The y vector.
ties : str
One of {'ordered', 'mean'}, handles the ties.
]
<ast.Tuple object at 0x7da1b1eb5e10> assign[=] <ast.ListComp object at 0x7da1b1eb55d0>
variable[nx] assign[=] call[name[x].shape][constant[0]]
if compare[name[nx] not_equal[!=] call[name[y].shape][constant[0]]] begin[:]
<ast.Raise object at 0x7da1b1d38eb0>
if compare[name[ties] not_equal[!=] constant[ordered]] begin[:]
variable[o] assign[=] call[name[np].argsort, parameter[name[x]]]
variable[x] assign[=] call[name[x]][name[o]]
variable[y] assign[=] call[name[y]][name[o]]
variable[ux] assign[=] call[name[np].unique, parameter[name[x]]]
if compare[call[name[ux].shape][constant[0]] less[<] name[nx]] begin[:]
def function[tie_apply, parameter[f, u_val]]:
variable[vals] assign[=] call[name[y]][compare[name[x] equal[==] name[u_val]]]
return[call[name[f], parameter[name[vals]]]]
variable[func] assign[=] call[name[VALID_TIES].get, parameter[name[ties], name[_identity]]]
variable[y] assign[=] call[call[name[np].vectorize, parameter[name[tie_apply]]], parameter[name[func], name[ux]]]
variable[x] assign[=] name[ux]
return[tuple[[<ast.Name object at 0x7da1b1edbeb0>, <ast.Name object at 0x7da1b1edb010>]]] | keyword[def] identifier[_regularize] ( identifier[x] , identifier[y] , identifier[ties] ):
literal[string]
identifier[x] , identifier[y] =[
identifier[column_or_1d] ( identifier[check_array] ( identifier[arr] , identifier[ensure_2d] = keyword[False] ,
identifier[force_all_finite] = keyword[False] ,
identifier[dtype] = identifier[DTYPE] ))
keyword[for] identifier[arr] keyword[in] ( identifier[x] , identifier[y] )
]
identifier[nx] = identifier[x] . identifier[shape] [ literal[int] ]
keyword[if] identifier[nx] != identifier[y] . identifier[shape] [ literal[int] ]:
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[nx] , identifier[y] . identifier[shape] [ literal[int] ]))
keyword[if] identifier[ties] != literal[string] :
identifier[o] = identifier[np] . identifier[argsort] ( identifier[x] )
identifier[x] = identifier[x] [ identifier[o] ]
identifier[y] = identifier[y] [ identifier[o] ]
identifier[ux] = identifier[np] . identifier[unique] ( identifier[x] )
keyword[if] identifier[ux] . identifier[shape] [ literal[int] ]< identifier[nx] :
keyword[def] identifier[tie_apply] ( identifier[f] , identifier[u_val] ):
identifier[vals] = identifier[y] [ identifier[x] == identifier[u_val] ]
keyword[return] identifier[f] ( identifier[vals] )
identifier[func] = identifier[VALID_TIES] . identifier[get] ( identifier[ties] , identifier[_identity] )
identifier[y] = identifier[np] . identifier[vectorize] ( identifier[tie_apply] )( identifier[func] , identifier[ux] )
identifier[x] = identifier[ux]
keyword[return] identifier[x] , identifier[y] | def _regularize(x, y, ties):
"""Regularize the values, make them ordered and remove duplicates.
If the ``ties`` parameter is explicitly set to 'ordered' then order
is already assumed. Otherwise, the removal process will happen.
Parameters
----------
x : array-like, shape=(n_samples,)
The x vector.
y : array-like, shape=(n_samples,)
The y vector.
ties : str
One of {'ordered', 'mean'}, handles the ties.
"""
(x, y) = [column_or_1d(check_array(arr, ensure_2d=False, force_all_finite=False, dtype=DTYPE)) for arr in (x, y)]
nx = x.shape[0]
if nx != y.shape[0]:
raise ValueError('array dim mismatch: %i != %i' % (nx, y.shape[0])) # depends on [control=['if'], data=['nx']]
# manipulate x if needed. if ties is 'ordered' we assume that x is
# already ordered and everything has been handled already...
if ties != 'ordered':
o = np.argsort(x)
# keep ordered with one another
x = x[o]
y = y[o]
# what if any are the same?
ux = np.unique(x)
if ux.shape[0] < nx:
# Do we want to warn for this?
# warnings.warn('collapsing to unique "x" values')
# vectorize this function to apply to each "cell" in the array
def tie_apply(f, u_val):
vals = y[x == u_val] # mask y where x == the unique value
return f(vals)
# replace the duplicates in the y array with the "tie" func
func = VALID_TIES.get(ties, _identity)
# maybe expensive to vectorize on the fly? Not sure; would need
# to do some benchmarking. However, we need to in order to keep y
# and x in scope...
y = np.vectorize(tie_apply)(func, ux)
# does ux need ordering? hmm..
x = ux # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['ties']]
return (x, y) |
def deserialize_header(stream):
# type: (IO) -> MessageHeader
"""Deserializes the header from a source stream
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Deserialized MessageHeader object
:rtype: :class:`aws_encryption_sdk.structures.MessageHeader` and bytes
:raises NotSupportedError: if unsupported data types are found
:raises UnknownIdentityError: if unknown data types are found
:raises SerializationError: if IV length does not match algorithm
"""
_LOGGER.debug("Starting header deserialization")
tee = io.BytesIO()
tee_stream = TeeStream(stream, tee)
version_id, message_type_id = unpack_values(">BB", tee_stream)
header = dict()
header["version"] = _verified_version_from_id(version_id)
header["type"] = _verified_message_type_from_id(message_type_id)
algorithm_id, message_id, ser_encryption_context_length = unpack_values(">H16sH", tee_stream)
header["algorithm"] = _verified_algorithm_from_id(algorithm_id)
header["message_id"] = message_id
header["encryption_context"] = deserialize_encryption_context(tee_stream.read(ser_encryption_context_length))
header["encrypted_data_keys"] = _deserialize_encrypted_data_keys(tee_stream)
(content_type_id,) = unpack_values(">B", tee_stream)
header["content_type"] = _verified_content_type_from_id(content_type_id)
(content_aad_length,) = unpack_values(">I", tee_stream)
header["content_aad_length"] = _verified_content_aad_length(content_aad_length)
(iv_length,) = unpack_values(">B", tee_stream)
header["header_iv_length"] = _verified_iv_length(iv_length, header["algorithm"])
(frame_length,) = unpack_values(">I", tee_stream)
header["frame_length"] = _verified_frame_length(frame_length, header["content_type"])
return MessageHeader(**header), tee.getvalue() | def function[deserialize_header, parameter[stream]]:
constant[Deserializes the header from a source stream
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Deserialized MessageHeader object
:rtype: :class:`aws_encryption_sdk.structures.MessageHeader` and bytes
:raises NotSupportedError: if unsupported data types are found
:raises UnknownIdentityError: if unknown data types are found
:raises SerializationError: if IV length does not match algorithm
]
call[name[_LOGGER].debug, parameter[constant[Starting header deserialization]]]
variable[tee] assign[=] call[name[io].BytesIO, parameter[]]
variable[tee_stream] assign[=] call[name[TeeStream], parameter[name[stream], name[tee]]]
<ast.Tuple object at 0x7da20e9618a0> assign[=] call[name[unpack_values], parameter[constant[>BB], name[tee_stream]]]
variable[header] assign[=] call[name[dict], parameter[]]
call[name[header]][constant[version]] assign[=] call[name[_verified_version_from_id], parameter[name[version_id]]]
call[name[header]][constant[type]] assign[=] call[name[_verified_message_type_from_id], parameter[name[message_type_id]]]
<ast.Tuple object at 0x7da18f810af0> assign[=] call[name[unpack_values], parameter[constant[>H16sH], name[tee_stream]]]
call[name[header]][constant[algorithm]] assign[=] call[name[_verified_algorithm_from_id], parameter[name[algorithm_id]]]
call[name[header]][constant[message_id]] assign[=] name[message_id]
call[name[header]][constant[encryption_context]] assign[=] call[name[deserialize_encryption_context], parameter[call[name[tee_stream].read, parameter[name[ser_encryption_context_length]]]]]
call[name[header]][constant[encrypted_data_keys]] assign[=] call[name[_deserialize_encrypted_data_keys], parameter[name[tee_stream]]]
<ast.Tuple object at 0x7da18f8120b0> assign[=] call[name[unpack_values], parameter[constant[>B], name[tee_stream]]]
call[name[header]][constant[content_type]] assign[=] call[name[_verified_content_type_from_id], parameter[name[content_type_id]]]
<ast.Tuple object at 0x7da204565900> assign[=] call[name[unpack_values], parameter[constant[>I], name[tee_stream]]]
call[name[header]][constant[content_aad_length]] assign[=] call[name[_verified_content_aad_length], parameter[name[content_aad_length]]]
<ast.Tuple object at 0x7da2045679d0> assign[=] call[name[unpack_values], parameter[constant[>B], name[tee_stream]]]
call[name[header]][constant[header_iv_length]] assign[=] call[name[_verified_iv_length], parameter[name[iv_length], call[name[header]][constant[algorithm]]]]
<ast.Tuple object at 0x7da204566c20> assign[=] call[name[unpack_values], parameter[constant[>I], name[tee_stream]]]
call[name[header]][constant[frame_length]] assign[=] call[name[_verified_frame_length], parameter[name[frame_length], call[name[header]][constant[content_type]]]]
return[tuple[[<ast.Call object at 0x7da204564bb0>, <ast.Call object at 0x7da204566f50>]]] | keyword[def] identifier[deserialize_header] ( identifier[stream] ):
literal[string]
identifier[_LOGGER] . identifier[debug] ( literal[string] )
identifier[tee] = identifier[io] . identifier[BytesIO] ()
identifier[tee_stream] = identifier[TeeStream] ( identifier[stream] , identifier[tee] )
identifier[version_id] , identifier[message_type_id] = identifier[unpack_values] ( literal[string] , identifier[tee_stream] )
identifier[header] = identifier[dict] ()
identifier[header] [ literal[string] ]= identifier[_verified_version_from_id] ( identifier[version_id] )
identifier[header] [ literal[string] ]= identifier[_verified_message_type_from_id] ( identifier[message_type_id] )
identifier[algorithm_id] , identifier[message_id] , identifier[ser_encryption_context_length] = identifier[unpack_values] ( literal[string] , identifier[tee_stream] )
identifier[header] [ literal[string] ]= identifier[_verified_algorithm_from_id] ( identifier[algorithm_id] )
identifier[header] [ literal[string] ]= identifier[message_id]
identifier[header] [ literal[string] ]= identifier[deserialize_encryption_context] ( identifier[tee_stream] . identifier[read] ( identifier[ser_encryption_context_length] ))
identifier[header] [ literal[string] ]= identifier[_deserialize_encrypted_data_keys] ( identifier[tee_stream] )
( identifier[content_type_id] ,)= identifier[unpack_values] ( literal[string] , identifier[tee_stream] )
identifier[header] [ literal[string] ]= identifier[_verified_content_type_from_id] ( identifier[content_type_id] )
( identifier[content_aad_length] ,)= identifier[unpack_values] ( literal[string] , identifier[tee_stream] )
identifier[header] [ literal[string] ]= identifier[_verified_content_aad_length] ( identifier[content_aad_length] )
( identifier[iv_length] ,)= identifier[unpack_values] ( literal[string] , identifier[tee_stream] )
identifier[header] [ literal[string] ]= identifier[_verified_iv_length] ( identifier[iv_length] , identifier[header] [ literal[string] ])
( identifier[frame_length] ,)= identifier[unpack_values] ( literal[string] , identifier[tee_stream] )
identifier[header] [ literal[string] ]= identifier[_verified_frame_length] ( identifier[frame_length] , identifier[header] [ literal[string] ])
keyword[return] identifier[MessageHeader] (** identifier[header] ), identifier[tee] . identifier[getvalue] () | def deserialize_header(stream):
# type: (IO) -> MessageHeader
'Deserializes the header from a source stream\n\n :param stream: Source data stream\n :type stream: io.BytesIO\n :returns: Deserialized MessageHeader object\n :rtype: :class:`aws_encryption_sdk.structures.MessageHeader` and bytes\n :raises NotSupportedError: if unsupported data types are found\n :raises UnknownIdentityError: if unknown data types are found\n :raises SerializationError: if IV length does not match algorithm\n '
_LOGGER.debug('Starting header deserialization')
tee = io.BytesIO()
tee_stream = TeeStream(stream, tee)
(version_id, message_type_id) = unpack_values('>BB', tee_stream)
header = dict()
header['version'] = _verified_version_from_id(version_id)
header['type'] = _verified_message_type_from_id(message_type_id)
(algorithm_id, message_id, ser_encryption_context_length) = unpack_values('>H16sH', tee_stream)
header['algorithm'] = _verified_algorithm_from_id(algorithm_id)
header['message_id'] = message_id
header['encryption_context'] = deserialize_encryption_context(tee_stream.read(ser_encryption_context_length))
header['encrypted_data_keys'] = _deserialize_encrypted_data_keys(tee_stream)
(content_type_id,) = unpack_values('>B', tee_stream)
header['content_type'] = _verified_content_type_from_id(content_type_id)
(content_aad_length,) = unpack_values('>I', tee_stream)
header['content_aad_length'] = _verified_content_aad_length(content_aad_length)
(iv_length,) = unpack_values('>B', tee_stream)
header['header_iv_length'] = _verified_iv_length(iv_length, header['algorithm'])
(frame_length,) = unpack_values('>I', tee_stream)
header['frame_length'] = _verified_frame_length(frame_length, header['content_type'])
return (MessageHeader(**header), tee.getvalue()) |
def cli_progress(addr, offset, size):
"""Prints a progress report suitable for use on the command line."""
width = 25
done = offset * width // size
print("\r0x{:08x} {:7d} [{}{}] {:3d}% "
.format(addr, size, '=' * done, ' ' * (width - done),
offset * 100 // size), end="")
try:
sys.stdout.flush()
except OSError:
pass # Ignore Windows CLI "WinError 87" on Python 3.6
if offset == size:
print("") | def function[cli_progress, parameter[addr, offset, size]]:
constant[Prints a progress report suitable for use on the command line.]
variable[width] assign[=] constant[25]
variable[done] assign[=] binary_operation[binary_operation[name[offset] * name[width]] <ast.FloorDiv object at 0x7da2590d6bc0> name[size]]
call[name[print], parameter[call[constant[
0x{:08x} {:7d} [{}{}] {:3d}% ].format, parameter[name[addr], name[size], binary_operation[constant[=] * name[done]], binary_operation[constant[ ] * binary_operation[name[width] - name[done]]], binary_operation[binary_operation[name[offset] * constant[100]] <ast.FloorDiv object at 0x7da2590d6bc0> name[size]]]]]]
<ast.Try object at 0x7da20c993970>
if compare[name[offset] equal[==] name[size]] begin[:]
call[name[print], parameter[constant[]]] | keyword[def] identifier[cli_progress] ( identifier[addr] , identifier[offset] , identifier[size] ):
literal[string]
identifier[width] = literal[int]
identifier[done] = identifier[offset] * identifier[width] // identifier[size]
identifier[print] ( literal[string]
. identifier[format] ( identifier[addr] , identifier[size] , literal[string] * identifier[done] , literal[string] *( identifier[width] - identifier[done] ),
identifier[offset] * literal[int] // identifier[size] ), identifier[end] = literal[string] )
keyword[try] :
identifier[sys] . identifier[stdout] . identifier[flush] ()
keyword[except] identifier[OSError] :
keyword[pass]
keyword[if] identifier[offset] == identifier[size] :
identifier[print] ( literal[string] ) | def cli_progress(addr, offset, size):
"""Prints a progress report suitable for use on the command line."""
width = 25
done = offset * width // size
print('\r0x{:08x} {:7d} [{}{}] {:3d}% '.format(addr, size, '=' * done, ' ' * (width - done), offset * 100 // size), end='')
try:
sys.stdout.flush() # depends on [control=['try'], data=[]]
except OSError:
pass # Ignore Windows CLI "WinError 87" on Python 3.6 # depends on [control=['except'], data=[]]
if offset == size:
print('') # depends on [control=['if'], data=[]] |
def _sslobj(sock):
"""Returns the underlying PySLLSocket object with which the C extension
functions interface.
"""
pass
if isinstance(sock._sslobj, _ssl._SSLSocket):
return sock._sslobj
else:
return sock._sslobj._sslobj | def function[_sslobj, parameter[sock]]:
constant[Returns the underlying PySLLSocket object with which the C extension
functions interface.
]
pass
if call[name[isinstance], parameter[name[sock]._sslobj, name[_ssl]._SSLSocket]] begin[:]
return[name[sock]._sslobj] | keyword[def] identifier[_sslobj] ( identifier[sock] ):
literal[string]
keyword[pass]
keyword[if] identifier[isinstance] ( identifier[sock] . identifier[_sslobj] , identifier[_ssl] . identifier[_SSLSocket] ):
keyword[return] identifier[sock] . identifier[_sslobj]
keyword[else] :
keyword[return] identifier[sock] . identifier[_sslobj] . identifier[_sslobj] | def _sslobj(sock):
"""Returns the underlying PySLLSocket object with which the C extension
functions interface.
"""
pass
if isinstance(sock._sslobj, _ssl._SSLSocket):
return sock._sslobj # depends on [control=['if'], data=[]]
else:
return sock._sslobj._sslobj |
def __update_count(self):
""" Update internal counters """
self._ntypes = self.count_types()
self._nvars = self.count_vars()
self._nfuns = self.count_funs() | def function[__update_count, parameter[self]]:
constant[ Update internal counters ]
name[self]._ntypes assign[=] call[name[self].count_types, parameter[]]
name[self]._nvars assign[=] call[name[self].count_vars, parameter[]]
name[self]._nfuns assign[=] call[name[self].count_funs, parameter[]] | keyword[def] identifier[__update_count] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_ntypes] = identifier[self] . identifier[count_types] ()
identifier[self] . identifier[_nvars] = identifier[self] . identifier[count_vars] ()
identifier[self] . identifier[_nfuns] = identifier[self] . identifier[count_funs] () | def __update_count(self):
""" Update internal counters """
self._ntypes = self.count_types()
self._nvars = self.count_vars()
self._nfuns = self.count_funs() |
def send_url(amount, redirect_url, url, api):
'''
return payment gateway url to redirect user
to it for payment.
'''
values = {'api': api, 'amount': amount, 'redirect': redirect_url}
send_request = requests.post(SEND_URL_FINAL, data=values)
id_get = send_request.text
print(id_get)
if int(id_get) > 0:
print(".معتبر است id_get")
payment_gateway_url = '%s%s' % (GATEWAY_URL_FINAL, id_get)
return payment_gateway_url
elif id_get == "-1":
print(
" apiارسالی با نوع apiتعریف شده در paylineسازگار نیست.")
elif id_get == "-2":
print(
"مقدار amountداده عددي نمی باشد و یا کمتر از 1000 ریال است.")
elif id_get == "-3":
print("مقدار redirectرشته nullاست.")
elif id_get == "-4":
print(
"درگاهی با اطلاعات ارسالی یافت نشده و یا در حالت انتظار می باشد")
else:
print("some other error(s) occurred.") | def function[send_url, parameter[amount, redirect_url, url, api]]:
constant[
return payment gateway url to redirect user
to it for payment.
]
variable[values] assign[=] dictionary[[<ast.Constant object at 0x7da18dc07a90>, <ast.Constant object at 0x7da18dc04e20>, <ast.Constant object at 0x7da18dc05300>], [<ast.Name object at 0x7da18dc07670>, <ast.Name object at 0x7da18dc045b0>, <ast.Name object at 0x7da18dc06da0>]]
variable[send_request] assign[=] call[name[requests].post, parameter[name[SEND_URL_FINAL]]]
variable[id_get] assign[=] name[send_request].text
call[name[print], parameter[name[id_get]]]
if compare[call[name[int], parameter[name[id_get]]] greater[>] constant[0]] begin[:]
call[name[print], parameter[constant[.معتبر است id_get]]]
variable[payment_gateway_url] assign[=] binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18dc9baf0>, <ast.Name object at 0x7da18dc99990>]]]
return[name[payment_gateway_url]] | keyword[def] identifier[send_url] ( identifier[amount] , identifier[redirect_url] , identifier[url] , identifier[api] ):
literal[string]
identifier[values] ={ literal[string] : identifier[api] , literal[string] : identifier[amount] , literal[string] : identifier[redirect_url] }
identifier[send_request] = identifier[requests] . identifier[post] ( identifier[SEND_URL_FINAL] , identifier[data] = identifier[values] )
identifier[id_get] = identifier[send_request] . identifier[text]
identifier[print] ( identifier[id_get] )
keyword[if] identifier[int] ( identifier[id_get] )> literal[int] :
identifier[print] ( literal[string] )
identifier[payment_gateway_url] = literal[string] %( identifier[GATEWAY_URL_FINAL] , identifier[id_get] )
keyword[return] identifier[payment_gateway_url]
keyword[elif] identifier[id_get] == literal[string] :
identifier[print] (
literal[string] )
keyword[elif] identifier[id_get] == literal[string] :
identifier[print] (
literal[string] )
keyword[elif] identifier[id_get] == literal[string] :
identifier[print] ( literal[string] )
keyword[elif] identifier[id_get] == literal[string] :
identifier[print] (
literal[string] )
keyword[else] :
identifier[print] ( literal[string] ) | def send_url(amount, redirect_url, url, api):
"""
return payment gateway url to redirect user
to it for payment.
"""
values = {'api': api, 'amount': amount, 'redirect': redirect_url}
send_request = requests.post(SEND_URL_FINAL, data=values)
id_get = send_request.text
print(id_get)
if int(id_get) > 0:
print('.معتبر است id_get')
payment_gateway_url = '%s%s' % (GATEWAY_URL_FINAL, id_get)
return payment_gateway_url # depends on [control=['if'], data=[]]
elif id_get == '-1':
print('\u202b\u202a api\u202cارسالی با نوع \u202a api\u202cتعریف شده در \u202a payline\u202cسازگار نیست.\u202c') # depends on [control=['if'], data=[]]
elif id_get == '-2':
print('\u202bمقدار \u202a amount\u202cداده عددي نمی باشد و یا کمتر از 1000 ریال است.\u202c') # depends on [control=['if'], data=[]]
elif id_get == '-3':
print('\u202bمقدار \u202a redirect\u202cرشته \u202a null\u202cاست.\u202c') # depends on [control=['if'], data=[]]
elif id_get == '-4':
print('\u202bدرگاهی با اطلاعات ارسالی یافت نشده و یا در حالت انتظار می باشد\u202c') # depends on [control=['if'], data=[]]
else:
print('some other error(s) occurred.') |
def find(self, *strings, **kwargs):
"""
Search the entire editor for lines that match the string.
.. code-block:: Python
string = '''word one
word two
three'''
ed = Editor(string)
ed.find('word') # [(0, "word one"), (1, "word two")]
ed.find('word', 'three') # {'word': [...], 'three': [(2, "three")]}
Args:
strings (str): Any number of strings to search for
keys_only (bool): Only return keys
start (int): Optional line to start searching on
stop (int): Optional line to stop searching on
Returns:
results: If multiple strings searched a dictionary of string key, (line number, line) values (else just values)
"""
start = kwargs.pop("start", 0)
stop = kwargs.pop("stop", None)
keys_only = kwargs.pop("keys_only", False)
results = {string: [] for string in strings}
stop = len(self) if stop is None else stop
for i, line in enumerate(self[start:stop]):
for string in strings:
if string in line:
if keys_only:
results[string].append(i)
else:
results[string].append((i, line))
if len(strings) == 1:
return results[strings[0]]
return results | def function[find, parameter[self]]:
constant[
Search the entire editor for lines that match the string.
.. code-block:: Python
string = '''word one
word two
three'''
ed = Editor(string)
ed.find('word') # [(0, "word one"), (1, "word two")]
ed.find('word', 'three') # {'word': [...], 'three': [(2, "three")]}
Args:
strings (str): Any number of strings to search for
keys_only (bool): Only return keys
start (int): Optional line to start searching on
stop (int): Optional line to stop searching on
Returns:
results: If multiple strings searched a dictionary of string key, (line number, line) values (else just values)
]
variable[start] assign[=] call[name[kwargs].pop, parameter[constant[start], constant[0]]]
variable[stop] assign[=] call[name[kwargs].pop, parameter[constant[stop], constant[None]]]
variable[keys_only] assign[=] call[name[kwargs].pop, parameter[constant[keys_only], constant[False]]]
variable[results] assign[=] <ast.DictComp object at 0x7da18c4cfa90>
variable[stop] assign[=] <ast.IfExp object at 0x7da18c4cce50>
for taget[tuple[[<ast.Name object at 0x7da18c4cc070>, <ast.Name object at 0x7da18c4ce9e0>]]] in starred[call[name[enumerate], parameter[call[name[self]][<ast.Slice object at 0x7da18c4cdff0>]]]] begin[:]
for taget[name[string]] in starred[name[strings]] begin[:]
if compare[name[string] in name[line]] begin[:]
if name[keys_only] begin[:]
call[call[name[results]][name[string]].append, parameter[name[i]]]
if compare[call[name[len], parameter[name[strings]]] equal[==] constant[1]] begin[:]
return[call[name[results]][call[name[strings]][constant[0]]]]
return[name[results]] | keyword[def] identifier[find] ( identifier[self] ,* identifier[strings] ,** identifier[kwargs] ):
literal[string]
identifier[start] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[int] )
identifier[stop] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[keys_only] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[results] ={ identifier[string] :[] keyword[for] identifier[string] keyword[in] identifier[strings] }
identifier[stop] = identifier[len] ( identifier[self] ) keyword[if] identifier[stop] keyword[is] keyword[None] keyword[else] identifier[stop]
keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[self] [ identifier[start] : identifier[stop] ]):
keyword[for] identifier[string] keyword[in] identifier[strings] :
keyword[if] identifier[string] keyword[in] identifier[line] :
keyword[if] identifier[keys_only] :
identifier[results] [ identifier[string] ]. identifier[append] ( identifier[i] )
keyword[else] :
identifier[results] [ identifier[string] ]. identifier[append] (( identifier[i] , identifier[line] ))
keyword[if] identifier[len] ( identifier[strings] )== literal[int] :
keyword[return] identifier[results] [ identifier[strings] [ literal[int] ]]
keyword[return] identifier[results] | def find(self, *strings, **kwargs):
"""
Search the entire editor for lines that match the string.
.. code-block:: Python
string = '''word one
word two
three'''
ed = Editor(string)
ed.find('word') # [(0, "word one"), (1, "word two")]
ed.find('word', 'three') # {'word': [...], 'three': [(2, "three")]}
Args:
strings (str): Any number of strings to search for
keys_only (bool): Only return keys
start (int): Optional line to start searching on
stop (int): Optional line to stop searching on
Returns:
results: If multiple strings searched a dictionary of string key, (line number, line) values (else just values)
"""
start = kwargs.pop('start', 0)
stop = kwargs.pop('stop', None)
keys_only = kwargs.pop('keys_only', False)
results = {string: [] for string in strings}
stop = len(self) if stop is None else stop
for (i, line) in enumerate(self[start:stop]):
for string in strings:
if string in line:
if keys_only:
results[string].append(i) # depends on [control=['if'], data=[]]
else:
results[string].append((i, line)) # depends on [control=['if'], data=['string', 'line']] # depends on [control=['for'], data=['string']] # depends on [control=['for'], data=[]]
if len(strings) == 1:
return results[strings[0]] # depends on [control=['if'], data=[]]
return results |
def _is_charge_balanced(struct):
"""
checks if the structure object is charge balanced
"""
if sum([s.specie.oxi_state for s in struct.sites]) == 0.0:
return True
else:
return False | def function[_is_charge_balanced, parameter[struct]]:
constant[
checks if the structure object is charge balanced
]
if compare[call[name[sum], parameter[<ast.ListComp object at 0x7da1b1c92440>]] equal[==] constant[0.0]] begin[:]
return[constant[True]] | keyword[def] identifier[_is_charge_balanced] ( identifier[struct] ):
literal[string]
keyword[if] identifier[sum] ([ identifier[s] . identifier[specie] . identifier[oxi_state] keyword[for] identifier[s] keyword[in] identifier[struct] . identifier[sites] ])== literal[int] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def _is_charge_balanced(struct):
"""
checks if the structure object is charge balanced
"""
if sum([s.specie.oxi_state for s in struct.sites]) == 0.0:
return True # depends on [control=['if'], data=[]]
else:
return False |
def get_debug_info():
"""Return a list of lines with backend info.
"""
from . import __version__
d = OrderedDict()
d['Version'] = '%s' % __version__
for key, val in PyVisaLibrary.get_session_classes().items():
key_name = '%s %s' % (key[0].name.upper(), key[1])
try:
d[key_name] = getattr(val, 'session_issue').split('\n')
except AttributeError:
d[key_name] = 'Available ' + val.get_low_level_info()
return d | def function[get_debug_info, parameter[]]:
constant[Return a list of lines with backend info.
]
from relative_module[None] import module[__version__]
variable[d] assign[=] call[name[OrderedDict], parameter[]]
call[name[d]][constant[Version]] assign[=] binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> name[__version__]]
for taget[tuple[[<ast.Name object at 0x7da20e9b00d0>, <ast.Name object at 0x7da20e9b1900>]]] in starred[call[call[name[PyVisaLibrary].get_session_classes, parameter[]].items, parameter[]]] begin[:]
variable[key_name] assign[=] binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20e9b2860>, <ast.Subscript object at 0x7da20e9b3460>]]]
<ast.Try object at 0x7da20e9b0fa0>
return[name[d]] | keyword[def] identifier[get_debug_info] ():
literal[string]
keyword[from] . keyword[import] identifier[__version__]
identifier[d] = identifier[OrderedDict] ()
identifier[d] [ literal[string] ]= literal[string] % identifier[__version__]
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[PyVisaLibrary] . identifier[get_session_classes] (). identifier[items] ():
identifier[key_name] = literal[string] %( identifier[key] [ literal[int] ]. identifier[name] . identifier[upper] (), identifier[key] [ literal[int] ])
keyword[try] :
identifier[d] [ identifier[key_name] ]= identifier[getattr] ( identifier[val] , literal[string] ). identifier[split] ( literal[string] )
keyword[except] identifier[AttributeError] :
identifier[d] [ identifier[key_name] ]= literal[string] + identifier[val] . identifier[get_low_level_info] ()
keyword[return] identifier[d] | def get_debug_info():
"""Return a list of lines with backend info.
"""
from . import __version__
d = OrderedDict()
d['Version'] = '%s' % __version__
for (key, val) in PyVisaLibrary.get_session_classes().items():
key_name = '%s %s' % (key[0].name.upper(), key[1])
try:
d[key_name] = getattr(val, 'session_issue').split('\n') # depends on [control=['try'], data=[]]
except AttributeError:
d[key_name] = 'Available ' + val.get_low_level_info() # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
return d |
def area(poly):
"""Calculation of zone area"""
poly_xy = []
num = len(poly)
for i in range(num):
poly[i] = poly[i][0:2] + (0,)
poly_xy.append(poly[i])
return surface.area(poly) | def function[area, parameter[poly]]:
constant[Calculation of zone area]
variable[poly_xy] assign[=] list[[]]
variable[num] assign[=] call[name[len], parameter[name[poly]]]
for taget[name[i]] in starred[call[name[range], parameter[name[num]]]] begin[:]
call[name[poly]][name[i]] assign[=] binary_operation[call[call[name[poly]][name[i]]][<ast.Slice object at 0x7da20c6ab7f0>] + tuple[[<ast.Constant object at 0x7da20c6a9ff0>]]]
call[name[poly_xy].append, parameter[call[name[poly]][name[i]]]]
return[call[name[surface].area, parameter[name[poly]]]] | keyword[def] identifier[area] ( identifier[poly] ):
literal[string]
identifier[poly_xy] =[]
identifier[num] = identifier[len] ( identifier[poly] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num] ):
identifier[poly] [ identifier[i] ]= identifier[poly] [ identifier[i] ][ literal[int] : literal[int] ]+( literal[int] ,)
identifier[poly_xy] . identifier[append] ( identifier[poly] [ identifier[i] ])
keyword[return] identifier[surface] . identifier[area] ( identifier[poly] ) | def area(poly):
"""Calculation of zone area"""
poly_xy = []
num = len(poly)
for i in range(num):
poly[i] = poly[i][0:2] + (0,)
poly_xy.append(poly[i]) # depends on [control=['for'], data=['i']]
return surface.area(poly) |
def get_parent_dir(name):
"""Get the parent directory of a filename."""
parent_dir = os.path.dirname(os.path.dirname(name))
if parent_dir:
return parent_dir
return os.path.abspath('.') | def function[get_parent_dir, parameter[name]]:
constant[Get the parent directory of a filename.]
variable[parent_dir] assign[=] call[name[os].path.dirname, parameter[call[name[os].path.dirname, parameter[name[name]]]]]
if name[parent_dir] begin[:]
return[name[parent_dir]]
return[call[name[os].path.abspath, parameter[constant[.]]]] | keyword[def] identifier[get_parent_dir] ( identifier[name] ):
literal[string]
identifier[parent_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[name] ))
keyword[if] identifier[parent_dir] :
keyword[return] identifier[parent_dir]
keyword[return] identifier[os] . identifier[path] . identifier[abspath] ( literal[string] ) | def get_parent_dir(name):
"""Get the parent directory of a filename."""
parent_dir = os.path.dirname(os.path.dirname(name))
if parent_dir:
return parent_dir # depends on [control=['if'], data=[]]
return os.path.abspath('.') |
def global_optimum(self):
"""Return a globally optimal solution to this problem.
This function returns a globally optimal solution (i.e., a
solution that lives on the Pareto front). Since there are many
solutions that are Pareto-optimal, this function randomly
chooses one to return.
"""
x = [random.uniform(0, 1) for _ in range(self.objectives - 1)]
x.extend([0.5 for _ in range(self.dimensions - self.objectives + 1)])
return x | def function[global_optimum, parameter[self]]:
constant[Return a globally optimal solution to this problem.
This function returns a globally optimal solution (i.e., a
solution that lives on the Pareto front). Since there are many
solutions that are Pareto-optimal, this function randomly
chooses one to return.
]
variable[x] assign[=] <ast.ListComp object at 0x7da1b1390d90>
call[name[x].extend, parameter[<ast.ListComp object at 0x7da1b1392e60>]]
return[name[x]] | keyword[def] identifier[global_optimum] ( identifier[self] ):
literal[string]
identifier[x] =[ identifier[random] . identifier[uniform] ( literal[int] , literal[int] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[self] . identifier[objectives] - literal[int] )]
identifier[x] . identifier[extend] ([ literal[int] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[self] . identifier[dimensions] - identifier[self] . identifier[objectives] + literal[int] )])
keyword[return] identifier[x] | def global_optimum(self):
"""Return a globally optimal solution to this problem.
This function returns a globally optimal solution (i.e., a
solution that lives on the Pareto front). Since there are many
solutions that are Pareto-optimal, this function randomly
chooses one to return.
"""
x = [random.uniform(0, 1) for _ in range(self.objectives - 1)]
x.extend([0.5 for _ in range(self.dimensions - self.objectives + 1)])
return x |
def count(self, val):
"""Return the number of occurrences of *val* in the list."""
# pylint: disable=arguments-differ
_maxes = self._maxes
if not _maxes:
return 0
pos_left = bisect_left(_maxes, val)
if pos_left == len(_maxes):
return 0
_lists = self._lists
idx_left = bisect_left(_lists[pos_left], val)
pos_right = bisect_right(_maxes, val)
if pos_right == len(_maxes):
return self._len - self._loc(pos_left, idx_left)
idx_right = bisect_right(_lists[pos_right], val)
if pos_left == pos_right:
return idx_right - idx_left
right = self._loc(pos_right, idx_right)
left = self._loc(pos_left, idx_left)
return right - left | def function[count, parameter[self, val]]:
constant[Return the number of occurrences of *val* in the list.]
variable[_maxes] assign[=] name[self]._maxes
if <ast.UnaryOp object at 0x7da18bc704f0> begin[:]
return[constant[0]]
variable[pos_left] assign[=] call[name[bisect_left], parameter[name[_maxes], name[val]]]
if compare[name[pos_left] equal[==] call[name[len], parameter[name[_maxes]]]] begin[:]
return[constant[0]]
variable[_lists] assign[=] name[self]._lists
variable[idx_left] assign[=] call[name[bisect_left], parameter[call[name[_lists]][name[pos_left]], name[val]]]
variable[pos_right] assign[=] call[name[bisect_right], parameter[name[_maxes], name[val]]]
if compare[name[pos_right] equal[==] call[name[len], parameter[name[_maxes]]]] begin[:]
return[binary_operation[name[self]._len - call[name[self]._loc, parameter[name[pos_left], name[idx_left]]]]]
variable[idx_right] assign[=] call[name[bisect_right], parameter[call[name[_lists]][name[pos_right]], name[val]]]
if compare[name[pos_left] equal[==] name[pos_right]] begin[:]
return[binary_operation[name[idx_right] - name[idx_left]]]
variable[right] assign[=] call[name[self]._loc, parameter[name[pos_right], name[idx_right]]]
variable[left] assign[=] call[name[self]._loc, parameter[name[pos_left], name[idx_left]]]
return[binary_operation[name[right] - name[left]]] | keyword[def] identifier[count] ( identifier[self] , identifier[val] ):
literal[string]
identifier[_maxes] = identifier[self] . identifier[_maxes]
keyword[if] keyword[not] identifier[_maxes] :
keyword[return] literal[int]
identifier[pos_left] = identifier[bisect_left] ( identifier[_maxes] , identifier[val] )
keyword[if] identifier[pos_left] == identifier[len] ( identifier[_maxes] ):
keyword[return] literal[int]
identifier[_lists] = identifier[self] . identifier[_lists]
identifier[idx_left] = identifier[bisect_left] ( identifier[_lists] [ identifier[pos_left] ], identifier[val] )
identifier[pos_right] = identifier[bisect_right] ( identifier[_maxes] , identifier[val] )
keyword[if] identifier[pos_right] == identifier[len] ( identifier[_maxes] ):
keyword[return] identifier[self] . identifier[_len] - identifier[self] . identifier[_loc] ( identifier[pos_left] , identifier[idx_left] )
identifier[idx_right] = identifier[bisect_right] ( identifier[_lists] [ identifier[pos_right] ], identifier[val] )
keyword[if] identifier[pos_left] == identifier[pos_right] :
keyword[return] identifier[idx_right] - identifier[idx_left]
identifier[right] = identifier[self] . identifier[_loc] ( identifier[pos_right] , identifier[idx_right] )
identifier[left] = identifier[self] . identifier[_loc] ( identifier[pos_left] , identifier[idx_left] )
keyword[return] identifier[right] - identifier[left] | def count(self, val):
"""Return the number of occurrences of *val* in the list."""
# pylint: disable=arguments-differ
_maxes = self._maxes
if not _maxes:
return 0 # depends on [control=['if'], data=[]]
pos_left = bisect_left(_maxes, val)
if pos_left == len(_maxes):
return 0 # depends on [control=['if'], data=[]]
_lists = self._lists
idx_left = bisect_left(_lists[pos_left], val)
pos_right = bisect_right(_maxes, val)
if pos_right == len(_maxes):
return self._len - self._loc(pos_left, idx_left) # depends on [control=['if'], data=[]]
idx_right = bisect_right(_lists[pos_right], val)
if pos_left == pos_right:
return idx_right - idx_left # depends on [control=['if'], data=[]]
right = self._loc(pos_right, idx_right)
left = self._loc(pos_left, idx_left)
return right - left |
def get_match_history(self, account_id=None, **kwargs):
"""Returns a dictionary containing a list of the most recent Dota matches
:param account_id: (int, optional)
:param hero_id: (int, optional)
:param game_mode: (int, optional) see ``ref/modes.json``
:param skill: (int, optional) see ``ref/skill.json``
:param min_players: (int, optional) only return matches with minimum
amount of players
:param league_id: (int, optional) for ids use ``get_league_listing()``
:param start_at_match_id: (int, optional) start at matches equal to or
older than this match id
:param matches_requested: (int, optional) defaults to ``100``
:param tournament_games_only: (str, optional) limit results to
tournament matches only
:return: dictionary of matches, see :doc:`responses </responses>`
"""
if 'account_id' not in kwargs:
kwargs['account_id'] = account_id
url = self.__build_url(urls.GET_MATCH_HISTORY, **kwargs)
req = self.executor(url)
if self.logger:
self.logger.info('URL: {0}'.format(url))
if not self.__check_http_err(req.status_code):
return response.build(req, url, self.raw_mode) | def function[get_match_history, parameter[self, account_id]]:
constant[Returns a dictionary containing a list of the most recent Dota matches
:param account_id: (int, optional)
:param hero_id: (int, optional)
:param game_mode: (int, optional) see ``ref/modes.json``
:param skill: (int, optional) see ``ref/skill.json``
:param min_players: (int, optional) only return matches with minimum
amount of players
:param league_id: (int, optional) for ids use ``get_league_listing()``
:param start_at_match_id: (int, optional) start at matches equal to or
older than this match id
:param matches_requested: (int, optional) defaults to ``100``
:param tournament_games_only: (str, optional) limit results to
tournament matches only
:return: dictionary of matches, see :doc:`responses </responses>`
]
if compare[constant[account_id] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[account_id]] assign[=] name[account_id]
variable[url] assign[=] call[name[self].__build_url, parameter[name[urls].GET_MATCH_HISTORY]]
variable[req] assign[=] call[name[self].executor, parameter[name[url]]]
if name[self].logger begin[:]
call[name[self].logger.info, parameter[call[constant[URL: {0}].format, parameter[name[url]]]]]
if <ast.UnaryOp object at 0x7da1b11d5180> begin[:]
return[call[name[response].build, parameter[name[req], name[url], name[self].raw_mode]]] | keyword[def] identifier[get_match_history] ( identifier[self] , identifier[account_id] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[account_id]
identifier[url] = identifier[self] . identifier[__build_url] ( identifier[urls] . identifier[GET_MATCH_HISTORY] ,** identifier[kwargs] )
identifier[req] = identifier[self] . identifier[executor] ( identifier[url] )
keyword[if] identifier[self] . identifier[logger] :
identifier[self] . identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[url] ))
keyword[if] keyword[not] identifier[self] . identifier[__check_http_err] ( identifier[req] . identifier[status_code] ):
keyword[return] identifier[response] . identifier[build] ( identifier[req] , identifier[url] , identifier[self] . identifier[raw_mode] ) | def get_match_history(self, account_id=None, **kwargs):
"""Returns a dictionary containing a list of the most recent Dota matches
:param account_id: (int, optional)
:param hero_id: (int, optional)
:param game_mode: (int, optional) see ``ref/modes.json``
:param skill: (int, optional) see ``ref/skill.json``
:param min_players: (int, optional) only return matches with minimum
amount of players
:param league_id: (int, optional) for ids use ``get_league_listing()``
:param start_at_match_id: (int, optional) start at matches equal to or
older than this match id
:param matches_requested: (int, optional) defaults to ``100``
:param tournament_games_only: (str, optional) limit results to
tournament matches only
:return: dictionary of matches, see :doc:`responses </responses>`
"""
if 'account_id' not in kwargs:
kwargs['account_id'] = account_id # depends on [control=['if'], data=['kwargs']]
url = self.__build_url(urls.GET_MATCH_HISTORY, **kwargs)
req = self.executor(url)
if self.logger:
self.logger.info('URL: {0}'.format(url)) # depends on [control=['if'], data=[]]
if not self.__check_http_err(req.status_code):
return response.build(req, url, self.raw_mode) # depends on [control=['if'], data=[]] |
def create_checkout(self, **params):
"""https://developers.coinbase.com/api/v2#create-checkout"""
for required in ['amount', 'currency', 'name']:
if required not in params:
raise ValueError("Missing required parameter: %s" % required)
response = self._post('v2', 'checkouts', data=params)
return self._make_api_object(response, Checkout) | def function[create_checkout, parameter[self]]:
constant[https://developers.coinbase.com/api/v2#create-checkout]
for taget[name[required]] in starred[list[[<ast.Constant object at 0x7da2047ea1d0>, <ast.Constant object at 0x7da2047e9a20>, <ast.Constant object at 0x7da2047e8550>]]] begin[:]
if compare[name[required] <ast.NotIn object at 0x7da2590d7190> name[params]] begin[:]
<ast.Raise object at 0x7da18dc04eb0>
variable[response] assign[=] call[name[self]._post, parameter[constant[v2], constant[checkouts]]]
return[call[name[self]._make_api_object, parameter[name[response], name[Checkout]]]] | keyword[def] identifier[create_checkout] ( identifier[self] ,** identifier[params] ):
literal[string]
keyword[for] identifier[required] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[required] keyword[not] keyword[in] identifier[params] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[required] )
identifier[response] = identifier[self] . identifier[_post] ( literal[string] , literal[string] , identifier[data] = identifier[params] )
keyword[return] identifier[self] . identifier[_make_api_object] ( identifier[response] , identifier[Checkout] ) | def create_checkout(self, **params):
"""https://developers.coinbase.com/api/v2#create-checkout"""
for required in ['amount', 'currency', 'name']:
if required not in params:
raise ValueError('Missing required parameter: %s' % required) # depends on [control=['if'], data=['required']] # depends on [control=['for'], data=['required']]
response = self._post('v2', 'checkouts', data=params)
return self._make_api_object(response, Checkout) |
def enable_external_loaders(obj):
"""Enable external service loaders like `VAULT_` and `REDIS_`
looks forenv variables like `REDIS_ENABLED_FOR_DYNACONF`
"""
for name, loader in ct.EXTERNAL_LOADERS.items():
enabled = getattr(
obj, "{}_ENABLED_FOR_DYNACONF".format(name.upper()), False
)
if (
enabled
and enabled not in false_values
and loader not in obj.LOADERS_FOR_DYNACONF
): # noqa
obj.logger.debug("loaders: Enabling %s", loader)
obj.LOADERS_FOR_DYNACONF.insert(0, loader) | def function[enable_external_loaders, parameter[obj]]:
constant[Enable external service loaders like `VAULT_` and `REDIS_`
looks forenv variables like `REDIS_ENABLED_FOR_DYNACONF`
]
for taget[tuple[[<ast.Name object at 0x7da1b180dba0>, <ast.Name object at 0x7da1b180cd90>]]] in starred[call[name[ct].EXTERNAL_LOADERS.items, parameter[]]] begin[:]
variable[enabled] assign[=] call[name[getattr], parameter[name[obj], call[constant[{}_ENABLED_FOR_DYNACONF].format, parameter[call[name[name].upper, parameter[]]]], constant[False]]]
if <ast.BoolOp object at 0x7da1b180da50> begin[:]
call[name[obj].logger.debug, parameter[constant[loaders: Enabling %s], name[loader]]]
call[name[obj].LOADERS_FOR_DYNACONF.insert, parameter[constant[0], name[loader]]] | keyword[def] identifier[enable_external_loaders] ( identifier[obj] ):
literal[string]
keyword[for] identifier[name] , identifier[loader] keyword[in] identifier[ct] . identifier[EXTERNAL_LOADERS] . identifier[items] ():
identifier[enabled] = identifier[getattr] (
identifier[obj] , literal[string] . identifier[format] ( identifier[name] . identifier[upper] ()), keyword[False]
)
keyword[if] (
identifier[enabled]
keyword[and] identifier[enabled] keyword[not] keyword[in] identifier[false_values]
keyword[and] identifier[loader] keyword[not] keyword[in] identifier[obj] . identifier[LOADERS_FOR_DYNACONF]
):
identifier[obj] . identifier[logger] . identifier[debug] ( literal[string] , identifier[loader] )
identifier[obj] . identifier[LOADERS_FOR_DYNACONF] . identifier[insert] ( literal[int] , identifier[loader] ) | def enable_external_loaders(obj):
"""Enable external service loaders like `VAULT_` and `REDIS_`
looks forenv variables like `REDIS_ENABLED_FOR_DYNACONF`
"""
for (name, loader) in ct.EXTERNAL_LOADERS.items():
enabled = getattr(obj, '{}_ENABLED_FOR_DYNACONF'.format(name.upper()), False)
if enabled and enabled not in false_values and (loader not in obj.LOADERS_FOR_DYNACONF): # noqa
obj.logger.debug('loaders: Enabling %s', loader)
obj.LOADERS_FOR_DYNACONF.insert(0, loader) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def fix_fasta(fasta):
"""
remove pesky characters from fasta file header
"""
for seq in parse_fasta(fasta):
seq[0] = remove_char(seq[0])
if len(seq[1]) > 0:
yield seq | def function[fix_fasta, parameter[fasta]]:
constant[
remove pesky characters from fasta file header
]
for taget[name[seq]] in starred[call[name[parse_fasta], parameter[name[fasta]]]] begin[:]
call[name[seq]][constant[0]] assign[=] call[name[remove_char], parameter[call[name[seq]][constant[0]]]]
if compare[call[name[len], parameter[call[name[seq]][constant[1]]]] greater[>] constant[0]] begin[:]
<ast.Yield object at 0x7da18bc71b70> | keyword[def] identifier[fix_fasta] ( identifier[fasta] ):
literal[string]
keyword[for] identifier[seq] keyword[in] identifier[parse_fasta] ( identifier[fasta] ):
identifier[seq] [ literal[int] ]= identifier[remove_char] ( identifier[seq] [ literal[int] ])
keyword[if] identifier[len] ( identifier[seq] [ literal[int] ])> literal[int] :
keyword[yield] identifier[seq] | def fix_fasta(fasta):
"""
remove pesky characters from fasta file header
"""
for seq in parse_fasta(fasta):
seq[0] = remove_char(seq[0])
if len(seq[1]) > 0:
yield seq # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['seq']] |
def str_to_time(self):
"""
Formats a XCCDF dateTime string to a datetime object.
:returns: datetime object.
:rtype: datetime.datetime
"""
return datetime(*list(map(int, re.split(r'-|:|T', self.time)))) | def function[str_to_time, parameter[self]]:
constant[
Formats a XCCDF dateTime string to a datetime object.
:returns: datetime object.
:rtype: datetime.datetime
]
return[call[name[datetime], parameter[<ast.Starred object at 0x7da1b0aa6860>]]] | keyword[def] identifier[str_to_time] ( identifier[self] ):
literal[string]
keyword[return] identifier[datetime] (* identifier[list] ( identifier[map] ( identifier[int] , identifier[re] . identifier[split] ( literal[string] , identifier[self] . identifier[time] )))) | def str_to_time(self):
"""
Formats a XCCDF dateTime string to a datetime object.
:returns: datetime object.
:rtype: datetime.datetime
"""
return datetime(*list(map(int, re.split('-|:|T', self.time)))) |
def bundles_list(self, io_handler, name=None):
"""
Lists the bundles in the framework and their state. Possibility to
filter on the bundle name.
"""
# Head of the table
headers = ("ID", "Name", "State", "Version")
# Get the bundles
bundles = self._context.get_bundles()
# The framework is not in the result of get_bundles()
bundles.insert(0, self._context.get_framework())
if name is not None:
# Filter the list
bundles = [
bundle
for bundle in bundles
if name in bundle.get_symbolic_name()
]
# Make the entries
lines = [
[
str(entry)
for entry in (
bundle.get_bundle_id(),
bundle.get_symbolic_name(),
self._utils.bundlestate_to_str(bundle.get_state()),
bundle.get_version(),
)
]
for bundle in bundles
]
# Print'em all
io_handler.write(self._utils.make_table(headers, lines))
if name is None:
io_handler.write_line("{0} bundles installed", len(lines))
else:
io_handler.write_line("{0} filtered bundles", len(lines)) | def function[bundles_list, parameter[self, io_handler, name]]:
constant[
Lists the bundles in the framework and their state. Possibility to
filter on the bundle name.
]
variable[headers] assign[=] tuple[[<ast.Constant object at 0x7da20c6e4190>, <ast.Constant object at 0x7da20c6e6e60>, <ast.Constant object at 0x7da20c6e4820>, <ast.Constant object at 0x7da20c6e7dc0>]]
variable[bundles] assign[=] call[name[self]._context.get_bundles, parameter[]]
call[name[bundles].insert, parameter[constant[0], call[name[self]._context.get_framework, parameter[]]]]
if compare[name[name] is_not constant[None]] begin[:]
variable[bundles] assign[=] <ast.ListComp object at 0x7da20c6e5690>
variable[lines] assign[=] <ast.ListComp object at 0x7da20c6e7520>
call[name[io_handler].write, parameter[call[name[self]._utils.make_table, parameter[name[headers], name[lines]]]]]
if compare[name[name] is constant[None]] begin[:]
call[name[io_handler].write_line, parameter[constant[{0} bundles installed], call[name[len], parameter[name[lines]]]]] | keyword[def] identifier[bundles_list] ( identifier[self] , identifier[io_handler] , identifier[name] = keyword[None] ):
literal[string]
identifier[headers] =( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[bundles] = identifier[self] . identifier[_context] . identifier[get_bundles] ()
identifier[bundles] . identifier[insert] ( literal[int] , identifier[self] . identifier[_context] . identifier[get_framework] ())
keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[bundles] =[
identifier[bundle]
keyword[for] identifier[bundle] keyword[in] identifier[bundles]
keyword[if] identifier[name] keyword[in] identifier[bundle] . identifier[get_symbolic_name] ()
]
identifier[lines] =[
[
identifier[str] ( identifier[entry] )
keyword[for] identifier[entry] keyword[in] (
identifier[bundle] . identifier[get_bundle_id] (),
identifier[bundle] . identifier[get_symbolic_name] (),
identifier[self] . identifier[_utils] . identifier[bundlestate_to_str] ( identifier[bundle] . identifier[get_state] ()),
identifier[bundle] . identifier[get_version] (),
)
]
keyword[for] identifier[bundle] keyword[in] identifier[bundles]
]
identifier[io_handler] . identifier[write] ( identifier[self] . identifier[_utils] . identifier[make_table] ( identifier[headers] , identifier[lines] ))
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[io_handler] . identifier[write_line] ( literal[string] , identifier[len] ( identifier[lines] ))
keyword[else] :
identifier[io_handler] . identifier[write_line] ( literal[string] , identifier[len] ( identifier[lines] )) | def bundles_list(self, io_handler, name=None):
"""
Lists the bundles in the framework and their state. Possibility to
filter on the bundle name.
"""
# Head of the table
headers = ('ID', 'Name', 'State', 'Version')
# Get the bundles
bundles = self._context.get_bundles()
# The framework is not in the result of get_bundles()
bundles.insert(0, self._context.get_framework())
if name is not None:
# Filter the list
bundles = [bundle for bundle in bundles if name in bundle.get_symbolic_name()] # depends on [control=['if'], data=['name']]
# Make the entries
lines = [[str(entry) for entry in (bundle.get_bundle_id(), bundle.get_symbolic_name(), self._utils.bundlestate_to_str(bundle.get_state()), bundle.get_version())] for bundle in bundles]
# Print'em all
io_handler.write(self._utils.make_table(headers, lines))
if name is None:
io_handler.write_line('{0} bundles installed', len(lines)) # depends on [control=['if'], data=[]]
else:
io_handler.write_line('{0} filtered bundles', len(lines)) |
def Laliberte_density_w(T):
r'''Calculate the density of water using the form proposed by [1]_.
No parameters are needed, just a temperature. Units are Kelvin and kg/m^3h.
.. math::
\rho_w = \frac{\left\{\left([(-2.8054253\times 10^{-10}\cdot t +
1.0556302\times 10^{-7})t - 4.6170461\times 10^{-5}]t
-0.0079870401\right)t + 16.945176 \right\}t + 999.83952}
{1 + 0.01687985\cdot t}
Parameters
----------
T : float
Temperature of fluid [K]
Returns
-------
rho_w : float
Water density, [kg/m^3]
Notes
-----
Original source not cited
No temperature range is used.
Examples
--------
>>> Laliberte_density_w(298.15)
997.0448954179155
>>> Laliberte_density_w(273.15 + 50)
988.0362916114763
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
t = T-273.15
rho_w = (((((-2.8054253E-10*t + 1.0556302E-7)*t - 4.6170461E-5)*t - 0.0079870401)*t + 16.945176)*t + 999.83952) \
/ (1 + 0.01687985*t)
return rho_w | def function[Laliberte_density_w, parameter[T]]:
constant[Calculate the density of water using the form proposed by [1]_.
No parameters are needed, just a temperature. Units are Kelvin and kg/m^3h.
.. math::
\rho_w = \frac{\left\{\left([(-2.8054253\times 10^{-10}\cdot t +
1.0556302\times 10^{-7})t - 4.6170461\times 10^{-5}]t
-0.0079870401\right)t + 16.945176 \right\}t + 999.83952}
{1 + 0.01687985\cdot t}
Parameters
----------
T : float
Temperature of fluid [K]
Returns
-------
rho_w : float
Water density, [kg/m^3]
Notes
-----
Original source not cited
No temperature range is used.
Examples
--------
>>> Laliberte_density_w(298.15)
997.0448954179155
>>> Laliberte_density_w(273.15 + 50)
988.0362916114763
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
]
variable[t] assign[=] binary_operation[name[T] - constant[273.15]]
variable[rho_w] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b26ac310> * name[t]] + constant[1.0556302e-07]] * name[t]] - constant[4.6170461e-05]] * name[t]] - constant[0.0079870401]] * name[t]] + constant[16.945176]] * name[t]] + constant[999.83952]] / binary_operation[constant[1] + binary_operation[constant[0.01687985] * name[t]]]]
return[name[rho_w]] | keyword[def] identifier[Laliberte_density_w] ( identifier[T] ):
literal[string]
identifier[t] = identifier[T] - literal[int]
identifier[rho_w] =(((((- literal[int] * identifier[t] + literal[int] )* identifier[t] - literal[int] )* identifier[t] - literal[int] )* identifier[t] + literal[int] )* identifier[t] + literal[int] )/( literal[int] + literal[int] * identifier[t] )
keyword[return] identifier[rho_w] | def Laliberte_density_w(T):
"""Calculate the density of water using the form proposed by [1]_.
No parameters are needed, just a temperature. Units are Kelvin and kg/m^3h.
.. math::
\\rho_w = \\frac{\\left\\{\\left([(-2.8054253\\times 10^{-10}\\cdot t +
1.0556302\\times 10^{-7})t - 4.6170461\\times 10^{-5}]t
-0.0079870401\\right)t + 16.945176 \\right\\}t + 999.83952}
{1 + 0.01687985\\cdot t}
Parameters
----------
T : float
Temperature of fluid [K]
Returns
-------
rho_w : float
Water density, [kg/m^3]
Notes
-----
Original source not cited
No temperature range is used.
Examples
--------
>>> Laliberte_density_w(298.15)
997.0448954179155
>>> Laliberte_density_w(273.15 + 50)
988.0362916114763
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
"""
t = T - 273.15
rho_w = (((((-2.8054253e-10 * t + 1.0556302e-07) * t - 4.6170461e-05) * t - 0.0079870401) * t + 16.945176) * t + 999.83952) / (1 + 0.01687985 * t)
return rho_w |
def make_withitem(queue, stack):
"""
Make an ast.withitem node.
"""
context_expr = make_expr(stack)
# This is a POP_TOP for just "with <expr>:".
# This is a STORE_NAME(name) for "with <expr> as <name>:".
as_instr = queue.popleft()
if isinstance(as_instr, (instrs.STORE_FAST,
instrs.STORE_NAME,
instrs.STORE_DEREF,
instrs.STORE_GLOBAL)):
return ast.withitem(
context_expr=context_expr,
optional_vars=make_assign_target(as_instr, queue, stack),
)
elif isinstance(as_instr, instrs.POP_TOP):
return ast.withitem(context_expr=context_expr, optional_vars=None)
else:
raise DecompilationError(
"Don't know how to make withitem from %s" % as_instr,
) | def function[make_withitem, parameter[queue, stack]]:
constant[
Make an ast.withitem node.
]
variable[context_expr] assign[=] call[name[make_expr], parameter[name[stack]]]
variable[as_instr] assign[=] call[name[queue].popleft, parameter[]]
if call[name[isinstance], parameter[name[as_instr], tuple[[<ast.Attribute object at 0x7da1b05b6b90>, <ast.Attribute object at 0x7da1b05b6dd0>, <ast.Attribute object at 0x7da1b05b6f80>, <ast.Attribute object at 0x7da1b05b6a70>]]]] begin[:]
return[call[name[ast].withitem, parameter[]]] | keyword[def] identifier[make_withitem] ( identifier[queue] , identifier[stack] ):
literal[string]
identifier[context_expr] = identifier[make_expr] ( identifier[stack] )
identifier[as_instr] = identifier[queue] . identifier[popleft] ()
keyword[if] identifier[isinstance] ( identifier[as_instr] ,( identifier[instrs] . identifier[STORE_FAST] ,
identifier[instrs] . identifier[STORE_NAME] ,
identifier[instrs] . identifier[STORE_DEREF] ,
identifier[instrs] . identifier[STORE_GLOBAL] )):
keyword[return] identifier[ast] . identifier[withitem] (
identifier[context_expr] = identifier[context_expr] ,
identifier[optional_vars] = identifier[make_assign_target] ( identifier[as_instr] , identifier[queue] , identifier[stack] ),
)
keyword[elif] identifier[isinstance] ( identifier[as_instr] , identifier[instrs] . identifier[POP_TOP] ):
keyword[return] identifier[ast] . identifier[withitem] ( identifier[context_expr] = identifier[context_expr] , identifier[optional_vars] = keyword[None] )
keyword[else] :
keyword[raise] identifier[DecompilationError] (
literal[string] % identifier[as_instr] ,
) | def make_withitem(queue, stack):
"""
Make an ast.withitem node.
"""
context_expr = make_expr(stack)
# This is a POP_TOP for just "with <expr>:".
# This is a STORE_NAME(name) for "with <expr> as <name>:".
as_instr = queue.popleft()
if isinstance(as_instr, (instrs.STORE_FAST, instrs.STORE_NAME, instrs.STORE_DEREF, instrs.STORE_GLOBAL)):
return ast.withitem(context_expr=context_expr, optional_vars=make_assign_target(as_instr, queue, stack)) # depends on [control=['if'], data=[]]
elif isinstance(as_instr, instrs.POP_TOP):
return ast.withitem(context_expr=context_expr, optional_vars=None) # depends on [control=['if'], data=[]]
else:
raise DecompilationError("Don't know how to make withitem from %s" % as_instr) |
def V_vertical_torispherical_concave(D, f, k, h):
r'''Calculates volume of a vertical tank with a concave torispherical bottom,
according to [1]_. No provision for the top of the tank is made here.
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_1(h=a_1 + a_2 -h),\; 0 \le h < a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_2(h=a_1 + a_2 -h),\; a_2 \le h < a_1 + a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + 0,\; h \ge a_1 + a_2
.. math::
v_1 = \frac{\pi}{4}\left(\frac{2a_1^3}{3} + \frac{a_1D_1^2}{2}\right)
+\pi u\left[\left(\frac{D}{2}-kD\right)^2 +s\right]
+ \frac{\pi tu^2}{2} - \frac{\pi u^3}{3} + \pi D(1-2k)\left[
\frac{2u-t}{4}\sqrt{s+tu-u^2} + \frac{t\sqrt{s}}{4}
+ \frac{k^2D^2}{2}\left(\cos^{-1}\frac{t-2u}{2kD}-\alpha\right)\right]
.. math::
v_2 = \frac{\pi h^2}{4}\left(2a_1 + \frac{D_1^2}{2a_1} - \frac{4h}{3}\right)
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
.. math::
D_1 = 2fD\sin\alpha
.. math::
s = (kD\sin\alpha)^2
.. math::
t = 2a_2
.. math::
u = h - fD(1-\cos\alpha)
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
h : float
Height, as measured up to where the fluid ends, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_vertical_torispherical_concave(D=113., f=0.71, k=0.081, h=15)/231
103.88569287163769
References
----------
.. [1] Jones, D. "Compute Fluid Volumes in Vertical Tanks." Chemical
Processing. December 18, 2003.
http://www.chemicalprocessing.com/articles/2003/193/
'''
alpha = asin((1-2*k)/(2.*(f-k)))
a1 = f*D*(1-cos(alpha))
a2 = k*D*cos(alpha)
D1 = 2*f*D*sin(alpha)
s = (k*D*sin(alpha))**2
t = 2*a2
def V1(h):
u = h-f*D*(1-cos(alpha))
v1 = pi/4*(2*a1**3/3. + a1*D1**2/2.) + pi*u*((D/2.-k*D)**2 +s)
v1 += pi*t*u**2/2. - pi*u**3/3.
v1 += pi*D*(1-2*k)*((2*u-t)/4.*(s+t*u-u**2)**0.5 + t*s**0.5/4.
+ k**2*D**2/2.*(acos((t-2*u)/(2*k*D)) -alpha))
return v1
def V2(h):
v2 = pi*h**2/4.*(2*a1 + D1**2/(2.*a1) - 4*h/3.)
return v2
if 0 <= h < a2:
Vf = pi*D**2*h/4 - V1(a1+a2) + V1(a1+a2-h)
elif a2 <= h < a1 + a2:
Vf = pi*D**2*h/4 - V1(a1+a2) + V2(a1+a2-h)
else:
Vf = pi*D**2*h/4 - V1(a1+a2)
return Vf | def function[V_vertical_torispherical_concave, parameter[D, f, k, h]]:
constant[Calculates volume of a vertical tank with a concave torispherical bottom,
according to [1]_. No provision for the top of the tank is made here.
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_1(h=a_1 + a_2 -h),\; 0 \le h < a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_2(h=a_1 + a_2 -h),\; a_2 \le h < a_1 + a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + 0,\; h \ge a_1 + a_2
.. math::
v_1 = \frac{\pi}{4}\left(\frac{2a_1^3}{3} + \frac{a_1D_1^2}{2}\right)
+\pi u\left[\left(\frac{D}{2}-kD\right)^2 +s\right]
+ \frac{\pi tu^2}{2} - \frac{\pi u^3}{3} + \pi D(1-2k)\left[
\frac{2u-t}{4}\sqrt{s+tu-u^2} + \frac{t\sqrt{s}}{4}
+ \frac{k^2D^2}{2}\left(\cos^{-1}\frac{t-2u}{2kD}-\alpha\right)\right]
.. math::
v_2 = \frac{\pi h^2}{4}\left(2a_1 + \frac{D_1^2}{2a_1} - \frac{4h}{3}\right)
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
.. math::
D_1 = 2fD\sin\alpha
.. math::
s = (kD\sin\alpha)^2
.. math::
t = 2a_2
.. math::
u = h - fD(1-\cos\alpha)
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
h : float
Height, as measured up to where the fluid ends, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_vertical_torispherical_concave(D=113., f=0.71, k=0.081, h=15)/231
103.88569287163769
References
----------
.. [1] Jones, D. "Compute Fluid Volumes in Vertical Tanks." Chemical
Processing. December 18, 2003.
http://www.chemicalprocessing.com/articles/2003/193/
]
variable[alpha] assign[=] call[name[asin], parameter[binary_operation[binary_operation[constant[1] - binary_operation[constant[2] * name[k]]] / binary_operation[constant[2.0] * binary_operation[name[f] - name[k]]]]]]
variable[a1] assign[=] binary_operation[binary_operation[name[f] * name[D]] * binary_operation[constant[1] - call[name[cos], parameter[name[alpha]]]]]
variable[a2] assign[=] binary_operation[binary_operation[name[k] * name[D]] * call[name[cos], parameter[name[alpha]]]]
variable[D1] assign[=] binary_operation[binary_operation[binary_operation[constant[2] * name[f]] * name[D]] * call[name[sin], parameter[name[alpha]]]]
variable[s] assign[=] binary_operation[binary_operation[binary_operation[name[k] * name[D]] * call[name[sin], parameter[name[alpha]]]] ** constant[2]]
variable[t] assign[=] binary_operation[constant[2] * name[a2]]
def function[V1, parameter[h]]:
variable[u] assign[=] binary_operation[name[h] - binary_operation[binary_operation[name[f] * name[D]] * binary_operation[constant[1] - call[name[cos], parameter[name[alpha]]]]]]
variable[v1] assign[=] binary_operation[binary_operation[binary_operation[name[pi] / constant[4]] * binary_operation[binary_operation[binary_operation[constant[2] * binary_operation[name[a1] ** constant[3]]] / constant[3.0]] + binary_operation[binary_operation[name[a1] * binary_operation[name[D1] ** constant[2]]] / constant[2.0]]]] + binary_operation[binary_operation[name[pi] * name[u]] * binary_operation[binary_operation[binary_operation[binary_operation[name[D] / constant[2.0]] - binary_operation[name[k] * name[D]]] ** constant[2]] + name[s]]]]
<ast.AugAssign object at 0x7da1b11c6050>
<ast.AugAssign object at 0x7da1b11c5cc0>
return[name[v1]]
def function[V2, parameter[h]]:
variable[v2] assign[=] binary_operation[binary_operation[binary_operation[name[pi] * binary_operation[name[h] ** constant[2]]] / constant[4.0]] * binary_operation[binary_operation[binary_operation[constant[2] * name[a1]] + binary_operation[binary_operation[name[D1] ** constant[2]] / binary_operation[constant[2.0] * name[a1]]]] - binary_operation[binary_operation[constant[4] * name[h]] / constant[3.0]]]]
return[name[v2]]
if compare[constant[0] less_or_equal[<=] name[h]] begin[:]
variable[Vf] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[pi] * binary_operation[name[D] ** constant[2]]] * name[h]] / constant[4]] - call[name[V1], parameter[binary_operation[name[a1] + name[a2]]]]] + call[name[V1], parameter[binary_operation[binary_operation[name[a1] + name[a2]] - name[h]]]]]
return[name[Vf]] | keyword[def] identifier[V_vertical_torispherical_concave] ( identifier[D] , identifier[f] , identifier[k] , identifier[h] ):
literal[string]
identifier[alpha] = identifier[asin] (( literal[int] - literal[int] * identifier[k] )/( literal[int] *( identifier[f] - identifier[k] )))
identifier[a1] = identifier[f] * identifier[D] *( literal[int] - identifier[cos] ( identifier[alpha] ))
identifier[a2] = identifier[k] * identifier[D] * identifier[cos] ( identifier[alpha] )
identifier[D1] = literal[int] * identifier[f] * identifier[D] * identifier[sin] ( identifier[alpha] )
identifier[s] =( identifier[k] * identifier[D] * identifier[sin] ( identifier[alpha] ))** literal[int]
identifier[t] = literal[int] * identifier[a2]
keyword[def] identifier[V1] ( identifier[h] ):
identifier[u] = identifier[h] - identifier[f] * identifier[D] *( literal[int] - identifier[cos] ( identifier[alpha] ))
identifier[v1] = identifier[pi] / literal[int] *( literal[int] * identifier[a1] ** literal[int] / literal[int] + identifier[a1] * identifier[D1] ** literal[int] / literal[int] )+ identifier[pi] * identifier[u] *(( identifier[D] / literal[int] - identifier[k] * identifier[D] )** literal[int] + identifier[s] )
identifier[v1] += identifier[pi] * identifier[t] * identifier[u] ** literal[int] / literal[int] - identifier[pi] * identifier[u] ** literal[int] / literal[int]
identifier[v1] += identifier[pi] * identifier[D] *( literal[int] - literal[int] * identifier[k] )*(( literal[int] * identifier[u] - identifier[t] )/ literal[int] *( identifier[s] + identifier[t] * identifier[u] - identifier[u] ** literal[int] )** literal[int] + identifier[t] * identifier[s] ** literal[int] / literal[int]
+ identifier[k] ** literal[int] * identifier[D] ** literal[int] / literal[int] *( identifier[acos] (( identifier[t] - literal[int] * identifier[u] )/( literal[int] * identifier[k] * identifier[D] ))- identifier[alpha] ))
keyword[return] identifier[v1]
keyword[def] identifier[V2] ( identifier[h] ):
identifier[v2] = identifier[pi] * identifier[h] ** literal[int] / literal[int] *( literal[int] * identifier[a1] + identifier[D1] ** literal[int] /( literal[int] * identifier[a1] )- literal[int] * identifier[h] / literal[int] )
keyword[return] identifier[v2]
keyword[if] literal[int] <= identifier[h] < identifier[a2] :
identifier[Vf] = identifier[pi] * identifier[D] ** literal[int] * identifier[h] / literal[int] - identifier[V1] ( identifier[a1] + identifier[a2] )+ identifier[V1] ( identifier[a1] + identifier[a2] - identifier[h] )
keyword[elif] identifier[a2] <= identifier[h] < identifier[a1] + identifier[a2] :
identifier[Vf] = identifier[pi] * identifier[D] ** literal[int] * identifier[h] / literal[int] - identifier[V1] ( identifier[a1] + identifier[a2] )+ identifier[V2] ( identifier[a1] + identifier[a2] - identifier[h] )
keyword[else] :
identifier[Vf] = identifier[pi] * identifier[D] ** literal[int] * identifier[h] / literal[int] - identifier[V1] ( identifier[a1] + identifier[a2] )
keyword[return] identifier[Vf] | def V_vertical_torispherical_concave(D, f, k, h):
"""Calculates volume of a vertical tank with a concave torispherical bottom,
according to [1]_. No provision for the top of the tank is made here.
.. math::
V = \\frac{\\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_1(h=a_1 + a_2 -h),\\; 0 \\le h < a_2
.. math::
V = \\frac{\\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_2(h=a_1 + a_2 -h),\\; a_2 \\le h < a_1 + a_2
.. math::
V = \\frac{\\pi D^2 h}{4} - v_1(h=a_1+a_2) + 0,\\; h \\ge a_1 + a_2
.. math::
v_1 = \\frac{\\pi}{4}\\left(\\frac{2a_1^3}{3} + \\frac{a_1D_1^2}{2}\\right)
+\\pi u\\left[\\left(\\frac{D}{2}-kD\\right)^2 +s\\right]
+ \\frac{\\pi tu^2}{2} - \\frac{\\pi u^3}{3} + \\pi D(1-2k)\\left[
\\frac{2u-t}{4}\\sqrt{s+tu-u^2} + \\frac{t\\sqrt{s}}{4}
+ \\frac{k^2D^2}{2}\\left(\\cos^{-1}\\frac{t-2u}{2kD}-\\alpha\\right)\\right]
.. math::
v_2 = \\frac{\\pi h^2}{4}\\left(2a_1 + \\frac{D_1^2}{2a_1} - \\frac{4h}{3}\\right)
.. math::
\\alpha = \\sin^{-1}\\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\\cos\\alpha)
.. math::
a_2 = kD\\cos\\alpha
.. math::
D_1 = 2fD\\sin\\alpha
.. math::
s = (kD\\sin\\alpha)^2
.. math::
t = 2a_2
.. math::
u = h - fD(1-\\cos\\alpha)
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
h : float
Height, as measured up to where the fluid ends, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_vertical_torispherical_concave(D=113., f=0.71, k=0.081, h=15)/231
103.88569287163769
References
----------
.. [1] Jones, D. "Compute Fluid Volumes in Vertical Tanks." Chemical
Processing. December 18, 2003.
http://www.chemicalprocessing.com/articles/2003/193/
"""
alpha = asin((1 - 2 * k) / (2.0 * (f - k)))
a1 = f * D * (1 - cos(alpha))
a2 = k * D * cos(alpha)
D1 = 2 * f * D * sin(alpha)
s = (k * D * sin(alpha)) ** 2
t = 2 * a2
def V1(h):
u = h - f * D * (1 - cos(alpha))
v1 = pi / 4 * (2 * a1 ** 3 / 3.0 + a1 * D1 ** 2 / 2.0) + pi * u * ((D / 2.0 - k * D) ** 2 + s)
v1 += pi * t * u ** 2 / 2.0 - pi * u ** 3 / 3.0
v1 += pi * D * (1 - 2 * k) * ((2 * u - t) / 4.0 * (s + t * u - u ** 2) ** 0.5 + t * s ** 0.5 / 4.0 + k ** 2 * D ** 2 / 2.0 * (acos((t - 2 * u) / (2 * k * D)) - alpha))
return v1
def V2(h):
v2 = pi * h ** 2 / 4.0 * (2 * a1 + D1 ** 2 / (2.0 * a1) - 4 * h / 3.0)
return v2
if 0 <= h < a2:
Vf = pi * D ** 2 * h / 4 - V1(a1 + a2) + V1(a1 + a2 - h) # depends on [control=['if'], data=['h']]
elif a2 <= h < a1 + a2:
Vf = pi * D ** 2 * h / 4 - V1(a1 + a2) + V2(a1 + a2 - h) # depends on [control=['if'], data=['a2', 'h']]
else:
Vf = pi * D ** 2 * h / 4 - V1(a1 + a2)
return Vf |
def contains(bank, key):
'''
Checks if the specified bank contains the specified key.
'''
if key is None:
return True # any key could be a branch and a leaf at the same time in Consul
else:
try:
c_key = '{0}/{1}'.format(bank, key)
_, value = api.kv.get(c_key)
except Exception as exc:
raise SaltCacheError(
'There was an error getting the key, {0}: {1}'.format(
c_key, exc
)
)
return value is not None | def function[contains, parameter[bank, key]]:
constant[
Checks if the specified bank contains the specified key.
]
if compare[name[key] is constant[None]] begin[:]
return[constant[True]] | keyword[def] identifier[contains] ( identifier[bank] , identifier[key] ):
literal[string]
keyword[if] identifier[key] keyword[is] keyword[None] :
keyword[return] keyword[True]
keyword[else] :
keyword[try] :
identifier[c_key] = literal[string] . identifier[format] ( identifier[bank] , identifier[key] )
identifier[_] , identifier[value] = identifier[api] . identifier[kv] . identifier[get] ( identifier[c_key] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
keyword[raise] identifier[SaltCacheError] (
literal[string] . identifier[format] (
identifier[c_key] , identifier[exc]
)
)
keyword[return] identifier[value] keyword[is] keyword[not] keyword[None] | def contains(bank, key):
"""
Checks if the specified bank contains the specified key.
"""
if key is None:
return True # any key could be a branch and a leaf at the same time in Consul # depends on [control=['if'], data=[]]
else:
try:
c_key = '{0}/{1}'.format(bank, key)
(_, value) = api.kv.get(c_key) # depends on [control=['try'], data=[]]
except Exception as exc:
raise SaltCacheError('There was an error getting the key, {0}: {1}'.format(c_key, exc)) # depends on [control=['except'], data=['exc']]
return value is not None |
def create(cls, name, engines, include_core_files=False,
include_slapcat_output=False, comment=None):
"""
Create an sginfo task.
:param str name: name of task
:param engines: list of engines to apply the sginfo task
:type engines: list(Engine)
:param bool include_core_files: include core files in the
sginfo backup (default: False)
:param bool include_slapcat_output: include output from a
slapcat command in output (default: False)
:raises ElementNotFound: engine not found
:raises CreateElementFailed: create the task failed
:return: the task
:rtype: SGInfoTask
"""
json = {
'name': name,
'comment': comment,
'resources': [engine.href for engine in engines],
'include_core_files': include_core_files,
'include_slapcat_output': include_slapcat_output}
return ElementCreator(cls, json) | def function[create, parameter[cls, name, engines, include_core_files, include_slapcat_output, comment]]:
constant[
Create an sginfo task.
:param str name: name of task
:param engines: list of engines to apply the sginfo task
:type engines: list(Engine)
:param bool include_core_files: include core files in the
sginfo backup (default: False)
:param bool include_slapcat_output: include output from a
slapcat command in output (default: False)
:raises ElementNotFound: engine not found
:raises CreateElementFailed: create the task failed
:return: the task
:rtype: SGInfoTask
]
variable[json] assign[=] dictionary[[<ast.Constant object at 0x7da1b1a29d80>, <ast.Constant object at 0x7da1b1a2b730>, <ast.Constant object at 0x7da1b1a2ac80>, <ast.Constant object at 0x7da1b1a288e0>, <ast.Constant object at 0x7da1b1a280a0>], [<ast.Name object at 0x7da1b1a2ae90>, <ast.Name object at 0x7da1b1a2bd90>, <ast.ListComp object at 0x7da1b1a28e80>, <ast.Name object at 0x7da1b1a29cf0>, <ast.Name object at 0x7da1b1a2b0d0>]]
return[call[name[ElementCreator], parameter[name[cls], name[json]]]] | keyword[def] identifier[create] ( identifier[cls] , identifier[name] , identifier[engines] , identifier[include_core_files] = keyword[False] ,
identifier[include_slapcat_output] = keyword[False] , identifier[comment] = keyword[None] ):
literal[string]
identifier[json] ={
literal[string] : identifier[name] ,
literal[string] : identifier[comment] ,
literal[string] :[ identifier[engine] . identifier[href] keyword[for] identifier[engine] keyword[in] identifier[engines] ],
literal[string] : identifier[include_core_files] ,
literal[string] : identifier[include_slapcat_output] }
keyword[return] identifier[ElementCreator] ( identifier[cls] , identifier[json] ) | def create(cls, name, engines, include_core_files=False, include_slapcat_output=False, comment=None):
"""
Create an sginfo task.
:param str name: name of task
:param engines: list of engines to apply the sginfo task
:type engines: list(Engine)
:param bool include_core_files: include core files in the
sginfo backup (default: False)
:param bool include_slapcat_output: include output from a
slapcat command in output (default: False)
:raises ElementNotFound: engine not found
:raises CreateElementFailed: create the task failed
:return: the task
:rtype: SGInfoTask
"""
json = {'name': name, 'comment': comment, 'resources': [engine.href for engine in engines], 'include_core_files': include_core_files, 'include_slapcat_output': include_slapcat_output}
return ElementCreator(cls, json) |
def _apply_scope(self, scope, builder):
"""
Apply a single scope on the given builder instance.
:param scope: The scope to apply
:type scope: callable or Scope
:param builder: The builder to apply the scope to
:type builder: Builder
"""
if callable(scope):
scope(builder)
elif isinstance(scope, Scope):
scope.apply(builder, self.get_model()) | def function[_apply_scope, parameter[self, scope, builder]]:
constant[
Apply a single scope on the given builder instance.
:param scope: The scope to apply
:type scope: callable or Scope
:param builder: The builder to apply the scope to
:type builder: Builder
]
if call[name[callable], parameter[name[scope]]] begin[:]
call[name[scope], parameter[name[builder]]] | keyword[def] identifier[_apply_scope] ( identifier[self] , identifier[scope] , identifier[builder] ):
literal[string]
keyword[if] identifier[callable] ( identifier[scope] ):
identifier[scope] ( identifier[builder] )
keyword[elif] identifier[isinstance] ( identifier[scope] , identifier[Scope] ):
identifier[scope] . identifier[apply] ( identifier[builder] , identifier[self] . identifier[get_model] ()) | def _apply_scope(self, scope, builder):
"""
Apply a single scope on the given builder instance.
:param scope: The scope to apply
:type scope: callable or Scope
:param builder: The builder to apply the scope to
:type builder: Builder
"""
if callable(scope):
scope(builder) # depends on [control=['if'], data=[]]
elif isinstance(scope, Scope):
scope.apply(builder, self.get_model()) # depends on [control=['if'], data=[]] |
def error_map_source(self, kwargs_source, x_grid, y_grid, cov_param):
"""
variance of the linear source reconstruction in the source plane coordinates,
computed by the diagonal elements of the covariance matrix of the source reconstruction as a sum of the errors
of the basis set.
:param kwargs_source: keyword arguments of source model
:param x_grid: x-axis of positions to compute error map
:param y_grid: y-axis of positions to compute error map
:param cov_param: covariance matrix of liner inversion parameters
:return: diagonal covariance errors at the positions (x_grid, y_grid)
"""
error_map = np.zeros_like(x_grid)
basis_functions, n_source = self.SourceModel.functions_split(x_grid, y_grid, kwargs_source)
basis_functions = np.array(basis_functions)
if cov_param is not None:
for i in range(len(error_map)):
error_map[i] = basis_functions[:, i].T.dot(cov_param[:n_source, :n_source]).dot(basis_functions[:, i])
return error_map | def function[error_map_source, parameter[self, kwargs_source, x_grid, y_grid, cov_param]]:
constant[
variance of the linear source reconstruction in the source plane coordinates,
computed by the diagonal elements of the covariance matrix of the source reconstruction as a sum of the errors
of the basis set.
:param kwargs_source: keyword arguments of source model
:param x_grid: x-axis of positions to compute error map
:param y_grid: y-axis of positions to compute error map
:param cov_param: covariance matrix of liner inversion parameters
:return: diagonal covariance errors at the positions (x_grid, y_grid)
]
variable[error_map] assign[=] call[name[np].zeros_like, parameter[name[x_grid]]]
<ast.Tuple object at 0x7da20c6e5270> assign[=] call[name[self].SourceModel.functions_split, parameter[name[x_grid], name[y_grid], name[kwargs_source]]]
variable[basis_functions] assign[=] call[name[np].array, parameter[name[basis_functions]]]
if compare[name[cov_param] is_not constant[None]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[error_map]]]]]] begin[:]
call[name[error_map]][name[i]] assign[=] call[call[call[name[basis_functions]][tuple[[<ast.Slice object at 0x7da1b04a5c30>, <ast.Name object at 0x7da1b04a57b0>]]].T.dot, parameter[call[name[cov_param]][tuple[[<ast.Slice object at 0x7da1b04a4160>, <ast.Slice object at 0x7da1b04a66b0>]]]]].dot, parameter[call[name[basis_functions]][tuple[[<ast.Slice object at 0x7da1b04a56c0>, <ast.Name object at 0x7da1b04a7490>]]]]]
return[name[error_map]] | keyword[def] identifier[error_map_source] ( identifier[self] , identifier[kwargs_source] , identifier[x_grid] , identifier[y_grid] , identifier[cov_param] ):
literal[string]
identifier[error_map] = identifier[np] . identifier[zeros_like] ( identifier[x_grid] )
identifier[basis_functions] , identifier[n_source] = identifier[self] . identifier[SourceModel] . identifier[functions_split] ( identifier[x_grid] , identifier[y_grid] , identifier[kwargs_source] )
identifier[basis_functions] = identifier[np] . identifier[array] ( identifier[basis_functions] )
keyword[if] identifier[cov_param] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[error_map] )):
identifier[error_map] [ identifier[i] ]= identifier[basis_functions] [:, identifier[i] ]. identifier[T] . identifier[dot] ( identifier[cov_param] [: identifier[n_source] ,: identifier[n_source] ]). identifier[dot] ( identifier[basis_functions] [:, identifier[i] ])
keyword[return] identifier[error_map] | def error_map_source(self, kwargs_source, x_grid, y_grid, cov_param):
"""
variance of the linear source reconstruction in the source plane coordinates,
computed by the diagonal elements of the covariance matrix of the source reconstruction as a sum of the errors
of the basis set.
:param kwargs_source: keyword arguments of source model
:param x_grid: x-axis of positions to compute error map
:param y_grid: y-axis of positions to compute error map
:param cov_param: covariance matrix of liner inversion parameters
:return: diagonal covariance errors at the positions (x_grid, y_grid)
"""
error_map = np.zeros_like(x_grid)
(basis_functions, n_source) = self.SourceModel.functions_split(x_grid, y_grid, kwargs_source)
basis_functions = np.array(basis_functions)
if cov_param is not None:
for i in range(len(error_map)):
error_map[i] = basis_functions[:, i].T.dot(cov_param[:n_source, :n_source]).dot(basis_functions[:, i]) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=['cov_param']]
return error_map |
def expressionToAST(ex):
"""Take an expression tree made out of expressions.ExpressionNode,
and convert to an AST tree.
This is necessary as ExpressionNode overrides many methods to act
like a number.
"""
return ASTNode(ex.astType, ex.astKind, ex.value,
[expressionToAST(c) for c in ex.children]) | def function[expressionToAST, parameter[ex]]:
constant[Take an expression tree made out of expressions.ExpressionNode,
and convert to an AST tree.
This is necessary as ExpressionNode overrides many methods to act
like a number.
]
return[call[name[ASTNode], parameter[name[ex].astType, name[ex].astKind, name[ex].value, <ast.ListComp object at 0x7da18bc703a0>]]] | keyword[def] identifier[expressionToAST] ( identifier[ex] ):
literal[string]
keyword[return] identifier[ASTNode] ( identifier[ex] . identifier[astType] , identifier[ex] . identifier[astKind] , identifier[ex] . identifier[value] ,
[ identifier[expressionToAST] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[ex] . identifier[children] ]) | def expressionToAST(ex):
"""Take an expression tree made out of expressions.ExpressionNode,
and convert to an AST tree.
This is necessary as ExpressionNode overrides many methods to act
like a number.
"""
return ASTNode(ex.astType, ex.astKind, ex.value, [expressionToAST(c) for c in ex.children]) |
def GET_AUTH(self, courseid, taskid): # pylint: disable=arguments-differ
""" GET request """
course, task = self.get_course_and_check_rights(courseid, taskid)
return self.page(course, task) | def function[GET_AUTH, parameter[self, courseid, taskid]]:
constant[ GET request ]
<ast.Tuple object at 0x7da20c6a8730> assign[=] call[name[self].get_course_and_check_rights, parameter[name[courseid], name[taskid]]]
return[call[name[self].page, parameter[name[course], name[task]]]] | keyword[def] identifier[GET_AUTH] ( identifier[self] , identifier[courseid] , identifier[taskid] ):
literal[string]
identifier[course] , identifier[task] = identifier[self] . identifier[get_course_and_check_rights] ( identifier[courseid] , identifier[taskid] )
keyword[return] identifier[self] . identifier[page] ( identifier[course] , identifier[task] ) | def GET_AUTH(self, courseid, taskid): # pylint: disable=arguments-differ
' GET request '
(course, task) = self.get_course_and_check_rights(courseid, taskid)
return self.page(course, task) |
def _parse_acl(acl, user, group):
'''
Parse a single ACL rule
'''
comps = acl.split(':')
vals = {}
# What type of rule is this?
vals['type'] = 'acl'
if comps[0] == 'default':
vals['type'] = 'default'
comps.pop(0)
# If a user is not specified, use the owner of the file
if comps[0] == 'user' and not comps[1]:
comps[1] = user
elif comps[0] == 'group' and not comps[1]:
comps[1] = group
vals[comps[0]] = comps[1]
# Set the permissions fields
octal = 0
vals['permissions'] = {}
if 'r' in comps[-1]:
octal += 4
vals['permissions']['read'] = True
else:
vals['permissions']['read'] = False
if 'w' in comps[-1]:
octal += 2
vals['permissions']['write'] = True
else:
vals['permissions']['write'] = False
if 'x' in comps[-1]:
octal += 1
vals['permissions']['execute'] = True
else:
vals['permissions']['execute'] = False
vals['octal'] = octal
return vals | def function[_parse_acl, parameter[acl, user, group]]:
constant[
Parse a single ACL rule
]
variable[comps] assign[=] call[name[acl].split, parameter[constant[:]]]
variable[vals] assign[=] dictionary[[], []]
call[name[vals]][constant[type]] assign[=] constant[acl]
if compare[call[name[comps]][constant[0]] equal[==] constant[default]] begin[:]
call[name[vals]][constant[type]] assign[=] constant[default]
call[name[comps].pop, parameter[constant[0]]]
if <ast.BoolOp object at 0x7da1b1f3dea0> begin[:]
call[name[comps]][constant[1]] assign[=] name[user]
call[name[vals]][call[name[comps]][constant[0]]] assign[=] call[name[comps]][constant[1]]
variable[octal] assign[=] constant[0]
call[name[vals]][constant[permissions]] assign[=] dictionary[[], []]
if compare[constant[r] in call[name[comps]][<ast.UnaryOp object at 0x7da1b1f3e470>]] begin[:]
<ast.AugAssign object at 0x7da1b1f3c790>
call[call[name[vals]][constant[permissions]]][constant[read]] assign[=] constant[True]
if compare[constant[w] in call[name[comps]][<ast.UnaryOp object at 0x7da1b1f3e800>]] begin[:]
<ast.AugAssign object at 0x7da1b1f3c1f0>
call[call[name[vals]][constant[permissions]]][constant[write]] assign[=] constant[True]
if compare[constant[x] in call[name[comps]][<ast.UnaryOp object at 0x7da1b1f3e440>]] begin[:]
<ast.AugAssign object at 0x7da1b1f3c3d0>
call[call[name[vals]][constant[permissions]]][constant[execute]] assign[=] constant[True]
call[name[vals]][constant[octal]] assign[=] name[octal]
return[name[vals]] | keyword[def] identifier[_parse_acl] ( identifier[acl] , identifier[user] , identifier[group] ):
literal[string]
identifier[comps] = identifier[acl] . identifier[split] ( literal[string] )
identifier[vals] ={}
identifier[vals] [ literal[string] ]= literal[string]
keyword[if] identifier[comps] [ literal[int] ]== literal[string] :
identifier[vals] [ literal[string] ]= literal[string]
identifier[comps] . identifier[pop] ( literal[int] )
keyword[if] identifier[comps] [ literal[int] ]== literal[string] keyword[and] keyword[not] identifier[comps] [ literal[int] ]:
identifier[comps] [ literal[int] ]= identifier[user]
keyword[elif] identifier[comps] [ literal[int] ]== literal[string] keyword[and] keyword[not] identifier[comps] [ literal[int] ]:
identifier[comps] [ literal[int] ]= identifier[group]
identifier[vals] [ identifier[comps] [ literal[int] ]]= identifier[comps] [ literal[int] ]
identifier[octal] = literal[int]
identifier[vals] [ literal[string] ]={}
keyword[if] literal[string] keyword[in] identifier[comps] [- literal[int] ]:
identifier[octal] += literal[int]
identifier[vals] [ literal[string] ][ literal[string] ]= keyword[True]
keyword[else] :
identifier[vals] [ literal[string] ][ literal[string] ]= keyword[False]
keyword[if] literal[string] keyword[in] identifier[comps] [- literal[int] ]:
identifier[octal] += literal[int]
identifier[vals] [ literal[string] ][ literal[string] ]= keyword[True]
keyword[else] :
identifier[vals] [ literal[string] ][ literal[string] ]= keyword[False]
keyword[if] literal[string] keyword[in] identifier[comps] [- literal[int] ]:
identifier[octal] += literal[int]
identifier[vals] [ literal[string] ][ literal[string] ]= keyword[True]
keyword[else] :
identifier[vals] [ literal[string] ][ literal[string] ]= keyword[False]
identifier[vals] [ literal[string] ]= identifier[octal]
keyword[return] identifier[vals] | def _parse_acl(acl, user, group):
"""
Parse a single ACL rule
"""
comps = acl.split(':')
vals = {}
# What type of rule is this?
vals['type'] = 'acl'
if comps[0] == 'default':
vals['type'] = 'default'
comps.pop(0) # depends on [control=['if'], data=[]]
# If a user is not specified, use the owner of the file
if comps[0] == 'user' and (not comps[1]):
comps[1] = user # depends on [control=['if'], data=[]]
elif comps[0] == 'group' and (not comps[1]):
comps[1] = group # depends on [control=['if'], data=[]]
vals[comps[0]] = comps[1]
# Set the permissions fields
octal = 0
vals['permissions'] = {}
if 'r' in comps[-1]:
octal += 4
vals['permissions']['read'] = True # depends on [control=['if'], data=[]]
else:
vals['permissions']['read'] = False
if 'w' in comps[-1]:
octal += 2
vals['permissions']['write'] = True # depends on [control=['if'], data=[]]
else:
vals['permissions']['write'] = False
if 'x' in comps[-1]:
octal += 1
vals['permissions']['execute'] = True # depends on [control=['if'], data=[]]
else:
vals['permissions']['execute'] = False
vals['octal'] = octal
return vals |
def getFile(self, file_xml_uri):
""" This will execute cmd to fetch file data from FMServer """
find = re.match('/fmi/xml/cnt/([\w\d.-]+)\.([\w]+)?-*', file_xml_uri)
file_name = find.group(1)
file_extension = find.group(2)
file_binary = self._doRequest(is_file=True, file_xml_uri=file_xml_uri)
return (file_name, file_extension, file_binary) | def function[getFile, parameter[self, file_xml_uri]]:
constant[ This will execute cmd to fetch file data from FMServer ]
variable[find] assign[=] call[name[re].match, parameter[constant[/fmi/xml/cnt/([\w\d.-]+)\.([\w]+)?-*], name[file_xml_uri]]]
variable[file_name] assign[=] call[name[find].group, parameter[constant[1]]]
variable[file_extension] assign[=] call[name[find].group, parameter[constant[2]]]
variable[file_binary] assign[=] call[name[self]._doRequest, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b2850100>, <ast.Name object at 0x7da1b28510f0>, <ast.Name object at 0x7da1b28522c0>]]] | keyword[def] identifier[getFile] ( identifier[self] , identifier[file_xml_uri] ):
literal[string]
identifier[find] = identifier[re] . identifier[match] ( literal[string] , identifier[file_xml_uri] )
identifier[file_name] = identifier[find] . identifier[group] ( literal[int] )
identifier[file_extension] = identifier[find] . identifier[group] ( literal[int] )
identifier[file_binary] = identifier[self] . identifier[_doRequest] ( identifier[is_file] = keyword[True] , identifier[file_xml_uri] = identifier[file_xml_uri] )
keyword[return] ( identifier[file_name] , identifier[file_extension] , identifier[file_binary] ) | def getFile(self, file_xml_uri):
""" This will execute cmd to fetch file data from FMServer """
find = re.match('/fmi/xml/cnt/([\\w\\d.-]+)\\.([\\w]+)?-*', file_xml_uri)
file_name = find.group(1)
file_extension = find.group(2)
file_binary = self._doRequest(is_file=True, file_xml_uri=file_xml_uri)
return (file_name, file_extension, file_binary) |
def swapColors(self):
"""
Swaps the current :py:class:`Color` with the secondary :py:class:`Color`.
:rtype: Nothing.
"""
rgba = self.color.get_0_255()
self.color = self.secondColor
self.secondColor = Color(rgba, '0-255') | def function[swapColors, parameter[self]]:
constant[
Swaps the current :py:class:`Color` with the secondary :py:class:`Color`.
:rtype: Nothing.
]
variable[rgba] assign[=] call[name[self].color.get_0_255, parameter[]]
name[self].color assign[=] name[self].secondColor
name[self].secondColor assign[=] call[name[Color], parameter[name[rgba], constant[0-255]]] | keyword[def] identifier[swapColors] ( identifier[self] ):
literal[string]
identifier[rgba] = identifier[self] . identifier[color] . identifier[get_0_255] ()
identifier[self] . identifier[color] = identifier[self] . identifier[secondColor]
identifier[self] . identifier[secondColor] = identifier[Color] ( identifier[rgba] , literal[string] ) | def swapColors(self):
"""
Swaps the current :py:class:`Color` with the secondary :py:class:`Color`.
:rtype: Nothing.
"""
rgba = self.color.get_0_255()
self.color = self.secondColor
self.secondColor = Color(rgba, '0-255') |
def extract_native_client_tarball(dir):
r'''
Download a native_client.tar.xz file from TaskCluster and extract it to dir.
'''
assert_valid_dir(dir)
target_tarball = os.path.join(dir, 'native_client.tar.xz')
if os.path.isfile(target_tarball) and os.stat(target_tarball).st_size == 0:
return
subprocess.check_call(['pixz', '-d', 'native_client.tar.xz'], cwd=dir)
subprocess.check_call(['tar', 'xf', 'native_client.tar'], cwd=dir)
os.unlink(os.path.join(dir, 'native_client.tar'))
open(target_tarball, 'w').close() | def function[extract_native_client_tarball, parameter[dir]]:
constant[
Download a native_client.tar.xz file from TaskCluster and extract it to dir.
]
call[name[assert_valid_dir], parameter[name[dir]]]
variable[target_tarball] assign[=] call[name[os].path.join, parameter[name[dir], constant[native_client.tar.xz]]]
if <ast.BoolOp object at 0x7da1b20fb340> begin[:]
return[None]
call[name[subprocess].check_call, parameter[list[[<ast.Constant object at 0x7da1b20f9e10>, <ast.Constant object at 0x7da1b20f8fd0>, <ast.Constant object at 0x7da1b20f8880>]]]]
call[name[subprocess].check_call, parameter[list[[<ast.Constant object at 0x7da1b20fb2e0>, <ast.Constant object at 0x7da1b20fac50>, <ast.Constant object at 0x7da1b20f9540>]]]]
call[name[os].unlink, parameter[call[name[os].path.join, parameter[name[dir], constant[native_client.tar]]]]]
call[call[name[open], parameter[name[target_tarball], constant[w]]].close, parameter[]] | keyword[def] identifier[extract_native_client_tarball] ( identifier[dir] ):
literal[string]
identifier[assert_valid_dir] ( identifier[dir] )
identifier[target_tarball] = identifier[os] . identifier[path] . identifier[join] ( identifier[dir] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[target_tarball] ) keyword[and] identifier[os] . identifier[stat] ( identifier[target_tarball] ). identifier[st_size] == literal[int] :
keyword[return]
identifier[subprocess] . identifier[check_call] ([ literal[string] , literal[string] , literal[string] ], identifier[cwd] = identifier[dir] )
identifier[subprocess] . identifier[check_call] ([ literal[string] , literal[string] , literal[string] ], identifier[cwd] = identifier[dir] )
identifier[os] . identifier[unlink] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dir] , literal[string] ))
identifier[open] ( identifier[target_tarball] , literal[string] ). identifier[close] () | def extract_native_client_tarball(dir):
"""
Download a native_client.tar.xz file from TaskCluster and extract it to dir.
"""
assert_valid_dir(dir)
target_tarball = os.path.join(dir, 'native_client.tar.xz')
if os.path.isfile(target_tarball) and os.stat(target_tarball).st_size == 0:
return # depends on [control=['if'], data=[]]
subprocess.check_call(['pixz', '-d', 'native_client.tar.xz'], cwd=dir)
subprocess.check_call(['tar', 'xf', 'native_client.tar'], cwd=dir)
os.unlink(os.path.join(dir, 'native_client.tar'))
open(target_tarball, 'w').close() |
def _matrix_add_column(matrix, column, default=0):
"""Given a matrix as a list of lists, add a column to the right, filling in
with a default value if necessary.
"""
height_difference = len(column) - len(matrix)
# The width of the matrix is the length of its longest row.
width = max(len(row) for row in matrix) if matrix else 0
# For now our offset is 0. We may need to shift our column down later.
offset = 0
# If we need extra rows, add them to the top of the matrix.
if height_difference > 0:
for _ in range(height_difference):
matrix.insert(0, [default] * width)
# If the column is shorter, we'll need to shift it down.
if height_difference < 0:
offset = -height_difference
#column = ([default] * offset) + column
for index, value in enumerate(column):
# The row index is the index in the column plus our offset.
row_index = index + offset
row = matrix[row_index]
# If this row is short, pad it with default values.
width_difference = width - len(row)
row.extend([default] * width_difference)
row.append(value) | def function[_matrix_add_column, parameter[matrix, column, default]]:
constant[Given a matrix as a list of lists, add a column to the right, filling in
with a default value if necessary.
]
variable[height_difference] assign[=] binary_operation[call[name[len], parameter[name[column]]] - call[name[len], parameter[name[matrix]]]]
variable[width] assign[=] <ast.IfExp object at 0x7da1b2780d90>
variable[offset] assign[=] constant[0]
if compare[name[height_difference] greater[>] constant[0]] begin[:]
for taget[name[_]] in starred[call[name[range], parameter[name[height_difference]]]] begin[:]
call[name[matrix].insert, parameter[constant[0], binary_operation[list[[<ast.Name object at 0x7da1b2782350>]] * name[width]]]]
if compare[name[height_difference] less[<] constant[0]] begin[:]
variable[offset] assign[=] <ast.UnaryOp object at 0x7da1b27828c0>
for taget[tuple[[<ast.Name object at 0x7da1b27801f0>, <ast.Name object at 0x7da1b2780490>]]] in starred[call[name[enumerate], parameter[name[column]]]] begin[:]
variable[row_index] assign[=] binary_operation[name[index] + name[offset]]
variable[row] assign[=] call[name[matrix]][name[row_index]]
variable[width_difference] assign[=] binary_operation[name[width] - call[name[len], parameter[name[row]]]]
call[name[row].extend, parameter[binary_operation[list[[<ast.Name object at 0x7da1b2780b80>]] * name[width_difference]]]]
call[name[row].append, parameter[name[value]]] | keyword[def] identifier[_matrix_add_column] ( identifier[matrix] , identifier[column] , identifier[default] = literal[int] ):
literal[string]
identifier[height_difference] = identifier[len] ( identifier[column] )- identifier[len] ( identifier[matrix] )
identifier[width] = identifier[max] ( identifier[len] ( identifier[row] ) keyword[for] identifier[row] keyword[in] identifier[matrix] ) keyword[if] identifier[matrix] keyword[else] literal[int]
identifier[offset] = literal[int]
keyword[if] identifier[height_difference] > literal[int] :
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[height_difference] ):
identifier[matrix] . identifier[insert] ( literal[int] ,[ identifier[default] ]* identifier[width] )
keyword[if] identifier[height_difference] < literal[int] :
identifier[offset] =- identifier[height_difference]
keyword[for] identifier[index] , identifier[value] keyword[in] identifier[enumerate] ( identifier[column] ):
identifier[row_index] = identifier[index] + identifier[offset]
identifier[row] = identifier[matrix] [ identifier[row_index] ]
identifier[width_difference] = identifier[width] - identifier[len] ( identifier[row] )
identifier[row] . identifier[extend] ([ identifier[default] ]* identifier[width_difference] )
identifier[row] . identifier[append] ( identifier[value] ) | def _matrix_add_column(matrix, column, default=0):
"""Given a matrix as a list of lists, add a column to the right, filling in
with a default value if necessary.
"""
height_difference = len(column) - len(matrix)
# The width of the matrix is the length of its longest row.
width = max((len(row) for row in matrix)) if matrix else 0
# For now our offset is 0. We may need to shift our column down later.
offset = 0
# If we need extra rows, add them to the top of the matrix.
if height_difference > 0:
for _ in range(height_difference):
matrix.insert(0, [default] * width) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['height_difference']]
# If the column is shorter, we'll need to shift it down.
if height_difference < 0:
offset = -height_difference # depends on [control=['if'], data=['height_difference']]
#column = ([default] * offset) + column
for (index, value) in enumerate(column):
# The row index is the index in the column plus our offset.
row_index = index + offset
row = matrix[row_index]
# If this row is short, pad it with default values.
width_difference = width - len(row)
row.extend([default] * width_difference)
row.append(value) # depends on [control=['for'], data=[]] |
def diff(xi, yi, order=1) -> np.ndarray:
"""Take the numerical derivative of a 1D array.
Output is mapped onto the original coordinates using linear interpolation.
Expects monotonic xi values.
Parameters
----------
xi : 1D array-like
Coordinates.
yi : 1D array-like
Values.
order : positive integer (optional)
Order of differentiation.
Returns
-------
1D numpy array
Numerical derivative. Has the same shape as the input arrays.
"""
yi = np.array(yi).copy()
flip = False
if xi[-1] < xi[0]:
xi = np.flipud(xi.copy())
yi = np.flipud(yi)
flip = True
midpoints = (xi[1:] + xi[:-1]) / 2
for _ in range(order):
d = np.diff(yi)
d /= np.diff(xi)
yi = np.interp(xi, midpoints, d)
if flip:
yi = np.flipud(yi)
return yi | def function[diff, parameter[xi, yi, order]]:
constant[Take the numerical derivative of a 1D array.
Output is mapped onto the original coordinates using linear interpolation.
Expects monotonic xi values.
Parameters
----------
xi : 1D array-like
Coordinates.
yi : 1D array-like
Values.
order : positive integer (optional)
Order of differentiation.
Returns
-------
1D numpy array
Numerical derivative. Has the same shape as the input arrays.
]
variable[yi] assign[=] call[call[name[np].array, parameter[name[yi]]].copy, parameter[]]
variable[flip] assign[=] constant[False]
if compare[call[name[xi]][<ast.UnaryOp object at 0x7da1b0e6de70>] less[<] call[name[xi]][constant[0]]] begin[:]
variable[xi] assign[=] call[name[np].flipud, parameter[call[name[xi].copy, parameter[]]]]
variable[yi] assign[=] call[name[np].flipud, parameter[name[yi]]]
variable[flip] assign[=] constant[True]
variable[midpoints] assign[=] binary_operation[binary_operation[call[name[xi]][<ast.Slice object at 0x7da1b0e6d960>] + call[name[xi]][<ast.Slice object at 0x7da1b0e6c9d0>]] / constant[2]]
for taget[name[_]] in starred[call[name[range], parameter[name[order]]]] begin[:]
variable[d] assign[=] call[name[np].diff, parameter[name[yi]]]
<ast.AugAssign object at 0x7da1b0e6d450>
variable[yi] assign[=] call[name[np].interp, parameter[name[xi], name[midpoints], name[d]]]
if name[flip] begin[:]
variable[yi] assign[=] call[name[np].flipud, parameter[name[yi]]]
return[name[yi]] | keyword[def] identifier[diff] ( identifier[xi] , identifier[yi] , identifier[order] = literal[int] )-> identifier[np] . identifier[ndarray] :
literal[string]
identifier[yi] = identifier[np] . identifier[array] ( identifier[yi] ). identifier[copy] ()
identifier[flip] = keyword[False]
keyword[if] identifier[xi] [- literal[int] ]< identifier[xi] [ literal[int] ]:
identifier[xi] = identifier[np] . identifier[flipud] ( identifier[xi] . identifier[copy] ())
identifier[yi] = identifier[np] . identifier[flipud] ( identifier[yi] )
identifier[flip] = keyword[True]
identifier[midpoints] =( identifier[xi] [ literal[int] :]+ identifier[xi] [:- literal[int] ])/ literal[int]
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[order] ):
identifier[d] = identifier[np] . identifier[diff] ( identifier[yi] )
identifier[d] /= identifier[np] . identifier[diff] ( identifier[xi] )
identifier[yi] = identifier[np] . identifier[interp] ( identifier[xi] , identifier[midpoints] , identifier[d] )
keyword[if] identifier[flip] :
identifier[yi] = identifier[np] . identifier[flipud] ( identifier[yi] )
keyword[return] identifier[yi] | def diff(xi, yi, order=1) -> np.ndarray:
"""Take the numerical derivative of a 1D array.
Output is mapped onto the original coordinates using linear interpolation.
Expects monotonic xi values.
Parameters
----------
xi : 1D array-like
Coordinates.
yi : 1D array-like
Values.
order : positive integer (optional)
Order of differentiation.
Returns
-------
1D numpy array
Numerical derivative. Has the same shape as the input arrays.
"""
yi = np.array(yi).copy()
flip = False
if xi[-1] < xi[0]:
xi = np.flipud(xi.copy())
yi = np.flipud(yi)
flip = True # depends on [control=['if'], data=[]]
midpoints = (xi[1:] + xi[:-1]) / 2
for _ in range(order):
d = np.diff(yi)
d /= np.diff(xi)
yi = np.interp(xi, midpoints, d) # depends on [control=['for'], data=[]]
if flip:
yi = np.flipud(yi) # depends on [control=['if'], data=[]]
return yi |
def describe_consumer_groups(self, group_ids, group_coordinator_id=None):
"""Describe a set of consumer groups.
Any errors are immediately raised.
:param group_ids: A list of consumer group IDs. These are typically the
group names as strings.
:param group_coordinator_id: The node_id of the groups' coordinator
broker. If set to None, it will query the cluster for each group to
find that group's coordinator. Explicitly specifying this can be
useful for avoiding extra network round trips if you already know
the group coordinator. This is only useful when all the group_ids
have the same coordinator, otherwise it will error. Default: None.
:return: A list of group descriptions. For now the group descriptions
are the raw results from the DescribeGroupsResponse. Long-term, we
plan to change this to return namedtuples as well as decoding the
partition assignments.
"""
group_descriptions = []
version = self._matching_api_version(DescribeGroupsRequest)
for group_id in group_ids:
if group_coordinator_id is not None:
this_groups_coordinator_id = group_coordinator_id
else:
this_groups_coordinator_id = self._find_group_coordinator_id(group_id)
if version <= 1:
# Note: KAFKA-6788 A potential optimization is to group the
# request per coordinator and send one request with a list of
# all consumer groups. Java still hasn't implemented this
# because the error checking is hard to get right when some
# groups error and others don't.
request = DescribeGroupsRequest[version](groups=(group_id,))
response = self._send_request_to_node(this_groups_coordinator_id, request)
assert len(response.groups) == 1
# TODO need to implement converting the response tuple into
# a more accessible interface like a namedtuple and then stop
# hardcoding tuple indices here. Several Java examples,
# including KafkaAdminClient.java
group_description = response.groups[0]
error_code = group_description[0]
error_type = Errors.for_code(error_code)
# Java has the note: KAFKA-6789, we can retry based on the error code
if error_type is not Errors.NoError:
raise error_type(
"Request '{}' failed with response '{}'."
.format(request, response))
# TODO Java checks the group protocol type, and if consumer
# (ConsumerProtocol.PROTOCOL_TYPE) or empty string, it decodes
# the members' partition assignments... that hasn't yet been
# implemented here so just return the raw struct results
group_descriptions.append(group_description)
else:
raise NotImplementedError(
"Support for DescribeGroups v{} has not yet been added to KafkaAdminClient."
.format(version))
return group_descriptions | def function[describe_consumer_groups, parameter[self, group_ids, group_coordinator_id]]:
constant[Describe a set of consumer groups.
Any errors are immediately raised.
:param group_ids: A list of consumer group IDs. These are typically the
group names as strings.
:param group_coordinator_id: The node_id of the groups' coordinator
broker. If set to None, it will query the cluster for each group to
find that group's coordinator. Explicitly specifying this can be
useful for avoiding extra network round trips if you already know
the group coordinator. This is only useful when all the group_ids
have the same coordinator, otherwise it will error. Default: None.
:return: A list of group descriptions. For now the group descriptions
are the raw results from the DescribeGroupsResponse. Long-term, we
plan to change this to return namedtuples as well as decoding the
partition assignments.
]
variable[group_descriptions] assign[=] list[[]]
variable[version] assign[=] call[name[self]._matching_api_version, parameter[name[DescribeGroupsRequest]]]
for taget[name[group_id]] in starred[name[group_ids]] begin[:]
if compare[name[group_coordinator_id] is_not constant[None]] begin[:]
variable[this_groups_coordinator_id] assign[=] name[group_coordinator_id]
if compare[name[version] less_or_equal[<=] constant[1]] begin[:]
variable[request] assign[=] call[call[name[DescribeGroupsRequest]][name[version]], parameter[]]
variable[response] assign[=] call[name[self]._send_request_to_node, parameter[name[this_groups_coordinator_id], name[request]]]
assert[compare[call[name[len], parameter[name[response].groups]] equal[==] constant[1]]]
variable[group_description] assign[=] call[name[response].groups][constant[0]]
variable[error_code] assign[=] call[name[group_description]][constant[0]]
variable[error_type] assign[=] call[name[Errors].for_code, parameter[name[error_code]]]
if compare[name[error_type] is_not name[Errors].NoError] begin[:]
<ast.Raise object at 0x7da1b1c994e0>
call[name[group_descriptions].append, parameter[name[group_description]]]
return[name[group_descriptions]] | keyword[def] identifier[describe_consumer_groups] ( identifier[self] , identifier[group_ids] , identifier[group_coordinator_id] = keyword[None] ):
literal[string]
identifier[group_descriptions] =[]
identifier[version] = identifier[self] . identifier[_matching_api_version] ( identifier[DescribeGroupsRequest] )
keyword[for] identifier[group_id] keyword[in] identifier[group_ids] :
keyword[if] identifier[group_coordinator_id] keyword[is] keyword[not] keyword[None] :
identifier[this_groups_coordinator_id] = identifier[group_coordinator_id]
keyword[else] :
identifier[this_groups_coordinator_id] = identifier[self] . identifier[_find_group_coordinator_id] ( identifier[group_id] )
keyword[if] identifier[version] <= literal[int] :
identifier[request] = identifier[DescribeGroupsRequest] [ identifier[version] ]( identifier[groups] =( identifier[group_id] ,))
identifier[response] = identifier[self] . identifier[_send_request_to_node] ( identifier[this_groups_coordinator_id] , identifier[request] )
keyword[assert] identifier[len] ( identifier[response] . identifier[groups] )== literal[int]
identifier[group_description] = identifier[response] . identifier[groups] [ literal[int] ]
identifier[error_code] = identifier[group_description] [ literal[int] ]
identifier[error_type] = identifier[Errors] . identifier[for_code] ( identifier[error_code] )
keyword[if] identifier[error_type] keyword[is] keyword[not] identifier[Errors] . identifier[NoError] :
keyword[raise] identifier[error_type] (
literal[string]
. identifier[format] ( identifier[request] , identifier[response] ))
identifier[group_descriptions] . identifier[append] ( identifier[group_description] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] (
literal[string]
. identifier[format] ( identifier[version] ))
keyword[return] identifier[group_descriptions] | def describe_consumer_groups(self, group_ids, group_coordinator_id=None):
"""Describe a set of consumer groups.
Any errors are immediately raised.
:param group_ids: A list of consumer group IDs. These are typically the
group names as strings.
:param group_coordinator_id: The node_id of the groups' coordinator
broker. If set to None, it will query the cluster for each group to
find that group's coordinator. Explicitly specifying this can be
useful for avoiding extra network round trips if you already know
the group coordinator. This is only useful when all the group_ids
have the same coordinator, otherwise it will error. Default: None.
:return: A list of group descriptions. For now the group descriptions
are the raw results from the DescribeGroupsResponse. Long-term, we
plan to change this to return namedtuples as well as decoding the
partition assignments.
"""
group_descriptions = []
version = self._matching_api_version(DescribeGroupsRequest)
for group_id in group_ids:
if group_coordinator_id is not None:
this_groups_coordinator_id = group_coordinator_id # depends on [control=['if'], data=['group_coordinator_id']]
else:
this_groups_coordinator_id = self._find_group_coordinator_id(group_id)
if version <= 1:
# Note: KAFKA-6788 A potential optimization is to group the
# request per coordinator and send one request with a list of
# all consumer groups. Java still hasn't implemented this
# because the error checking is hard to get right when some
# groups error and others don't.
request = DescribeGroupsRequest[version](groups=(group_id,))
response = self._send_request_to_node(this_groups_coordinator_id, request)
assert len(response.groups) == 1
# TODO need to implement converting the response tuple into
# a more accessible interface like a namedtuple and then stop
# hardcoding tuple indices here. Several Java examples,
# including KafkaAdminClient.java
group_description = response.groups[0]
error_code = group_description[0]
error_type = Errors.for_code(error_code)
# Java has the note: KAFKA-6789, we can retry based on the error code
if error_type is not Errors.NoError:
raise error_type("Request '{}' failed with response '{}'.".format(request, response)) # depends on [control=['if'], data=['error_type']]
# TODO Java checks the group protocol type, and if consumer
# (ConsumerProtocol.PROTOCOL_TYPE) or empty string, it decodes
# the members' partition assignments... that hasn't yet been
# implemented here so just return the raw struct results
group_descriptions.append(group_description) # depends on [control=['if'], data=['version']]
else:
raise NotImplementedError('Support for DescribeGroups v{} has not yet been added to KafkaAdminClient.'.format(version)) # depends on [control=['for'], data=['group_id']]
return group_descriptions |
def interactive_rewrite_schema(r, df, doc=None):
"""Rebuild the schema for a resource based on a dataframe and re-write the doc,
but only if running the notebook interactively, not while building"""
if 'metatab_doc' in caller_locals():
return False
if doc is None:
doc = open_source_package()
rewrite_schema(r, df, doc)
return True | def function[interactive_rewrite_schema, parameter[r, df, doc]]:
constant[Rebuild the schema for a resource based on a dataframe and re-write the doc,
but only if running the notebook interactively, not while building]
if compare[constant[metatab_doc] in call[name[caller_locals], parameter[]]] begin[:]
return[constant[False]]
if compare[name[doc] is constant[None]] begin[:]
variable[doc] assign[=] call[name[open_source_package], parameter[]]
call[name[rewrite_schema], parameter[name[r], name[df], name[doc]]]
return[constant[True]] | keyword[def] identifier[interactive_rewrite_schema] ( identifier[r] , identifier[df] , identifier[doc] = keyword[None] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[caller_locals] ():
keyword[return] keyword[False]
keyword[if] identifier[doc] keyword[is] keyword[None] :
identifier[doc] = identifier[open_source_package] ()
identifier[rewrite_schema] ( identifier[r] , identifier[df] , identifier[doc] )
keyword[return] keyword[True] | def interactive_rewrite_schema(r, df, doc=None):
"""Rebuild the schema for a resource based on a dataframe and re-write the doc,
but only if running the notebook interactively, not while building"""
if 'metatab_doc' in caller_locals():
return False # depends on [control=['if'], data=[]]
if doc is None:
doc = open_source_package() # depends on [control=['if'], data=['doc']]
rewrite_schema(r, df, doc)
return True |
def qteRunMacro(self, macroName: str, widgetObj: QtGui.QWidget=None,
keysequence: QtmacsKeysequence=None):
"""
Queue a previously registered macro for execution once the
event loop is idle.
The reason for queuing macros in the first place, instead of
running them straight away, is to ensure that the event loop
updates all the widgets in between any two macros. This will
avoid many spurious and hard to find bugs due to macros
assuming that all user interface elements have been updated
when in fact they were not.
|Args|
* ``macroName`` (**str**): name of macro.
* ``widgetObj`` (**QWidget**): widget (if any) on which the
macro should operate.
* ``keysequence`` (**QtmacsKeysequence**): key sequence that
triggered the macro.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Add the new macro to the queue and call qteUpdate to ensure
# that the macro is processed once the event loop is idle again.
self._qteMacroQueue.append((macroName, widgetObj, keysequence))
self.qteUpdate() | def function[qteRunMacro, parameter[self, macroName, widgetObj, keysequence]]:
constant[
Queue a previously registered macro for execution once the
event loop is idle.
The reason for queuing macros in the first place, instead of
running them straight away, is to ensure that the event loop
updates all the widgets in between any two macros. This will
avoid many spurious and hard to find bugs due to macros
assuming that all user interface elements have been updated
when in fact they were not.
|Args|
* ``macroName`` (**str**): name of macro.
* ``widgetObj`` (**QWidget**): widget (if any) on which the
macro should operate.
* ``keysequence`` (**QtmacsKeysequence**): key sequence that
triggered the macro.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
]
call[name[self]._qteMacroQueue.append, parameter[tuple[[<ast.Name object at 0x7da18bc726e0>, <ast.Name object at 0x7da18bc73310>, <ast.Name object at 0x7da18bc738b0>]]]]
call[name[self].qteUpdate, parameter[]] | keyword[def] identifier[qteRunMacro] ( identifier[self] , identifier[macroName] : identifier[str] , identifier[widgetObj] : identifier[QtGui] . identifier[QWidget] = keyword[None] ,
identifier[keysequence] : identifier[QtmacsKeysequence] = keyword[None] ):
literal[string]
identifier[self] . identifier[_qteMacroQueue] . identifier[append] (( identifier[macroName] , identifier[widgetObj] , identifier[keysequence] ))
identifier[self] . identifier[qteUpdate] () | def qteRunMacro(self, macroName: str, widgetObj: QtGui.QWidget=None, keysequence: QtmacsKeysequence=None):
"""
Queue a previously registered macro for execution once the
event loop is idle.
The reason for queuing macros in the first place, instead of
running them straight away, is to ensure that the event loop
updates all the widgets in between any two macros. This will
avoid many spurious and hard to find bugs due to macros
assuming that all user interface elements have been updated
when in fact they were not.
|Args|
* ``macroName`` (**str**): name of macro.
* ``widgetObj`` (**QWidget**): widget (if any) on which the
macro should operate.
* ``keysequence`` (**QtmacsKeysequence**): key sequence that
triggered the macro.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Add the new macro to the queue and call qteUpdate to ensure
# that the macro is processed once the event loop is idle again.
self._qteMacroQueue.append((macroName, widgetObj, keysequence))
self.qteUpdate() |
def sparkify(series):
u"""Converts <series> to a sparkline string.
Example:
>>> sparkify([ 0.5, 1.2, 3.5, 7.3, 8.0, 12.5, 13.2, 15.0, 14.2, 11.8, 6.1,
... 1.9 ])
u'▁▁▂▄▅▇▇██▆▄▂'
>>> sparkify([1, 1, -2, 3, -5, 8, -13])
u'▆▆▅▆▄█▁'
Raises ValueError if input data cannot be converted to float.
Raises TypeError if series is not an iterable.
"""
series = [ float(i) for i in series ]
minimum = min(series)
maximum = max(series)
data_range = maximum - minimum
if data_range == 0.0:
# Graph a baseline if every input value is equal.
return u''.join([ spark_chars[0] for i in series ])
coefficient = (len(spark_chars) - 1.0) / data_range
return u''.join([
spark_chars[
int(round((x - minimum) * coefficient))
] for x in series
]) | def function[sparkify, parameter[series]]:
constant[Converts <series> to a sparkline string.
Example:
>>> sparkify([ 0.5, 1.2, 3.5, 7.3, 8.0, 12.5, 13.2, 15.0, 14.2, 11.8, 6.1,
... 1.9 ])
u'▁▁▂▄▅▇▇██▆▄▂'
>>> sparkify([1, 1, -2, 3, -5, 8, -13])
u'▆▆▅▆▄█▁'
Raises ValueError if input data cannot be converted to float.
Raises TypeError if series is not an iterable.
]
variable[series] assign[=] <ast.ListComp object at 0x7da1b080b040>
variable[minimum] assign[=] call[name[min], parameter[name[series]]]
variable[maximum] assign[=] call[name[max], parameter[name[series]]]
variable[data_range] assign[=] binary_operation[name[maximum] - name[minimum]]
if compare[name[data_range] equal[==] constant[0.0]] begin[:]
return[call[constant[].join, parameter[<ast.ListComp object at 0x7da1b080a2c0>]]]
variable[coefficient] assign[=] binary_operation[binary_operation[call[name[len], parameter[name[spark_chars]]] - constant[1.0]] / name[data_range]]
return[call[constant[].join, parameter[<ast.ListComp object at 0x7da1b080a380>]]] | keyword[def] identifier[sparkify] ( identifier[series] ):
literal[string]
identifier[series] =[ identifier[float] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[series] ]
identifier[minimum] = identifier[min] ( identifier[series] )
identifier[maximum] = identifier[max] ( identifier[series] )
identifier[data_range] = identifier[maximum] - identifier[minimum]
keyword[if] identifier[data_range] == literal[int] :
keyword[return] literal[string] . identifier[join] ([ identifier[spark_chars] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[series] ])
identifier[coefficient] =( identifier[len] ( identifier[spark_chars] )- literal[int] )/ identifier[data_range]
keyword[return] literal[string] . identifier[join] ([
identifier[spark_chars] [
identifier[int] ( identifier[round] (( identifier[x] - identifier[minimum] )* identifier[coefficient] ))
] keyword[for] identifier[x] keyword[in] identifier[series]
]) | def sparkify(series):
u"""Converts <series> to a sparkline string.
Example:
>>> sparkify([ 0.5, 1.2, 3.5, 7.3, 8.0, 12.5, 13.2, 15.0, 14.2, 11.8, 6.1,
... 1.9 ])
u'▁▁▂▄▅▇▇██▆▄▂'
>>> sparkify([1, 1, -2, 3, -5, 8, -13])
u'▆▆▅▆▄█▁'
Raises ValueError if input data cannot be converted to float.
Raises TypeError if series is not an iterable.
"""
series = [float(i) for i in series]
minimum = min(series)
maximum = max(series)
data_range = maximum - minimum
if data_range == 0.0:
# Graph a baseline if every input value is equal.
return u''.join([spark_chars[0] for i in series]) # depends on [control=['if'], data=[]]
coefficient = (len(spark_chars) - 1.0) / data_range
return u''.join([spark_chars[int(round((x - minimum) * coefficient))] for x in series]) |
def _to_bytes(self, data):
"""
Normalize a text data to bytes (type `bytes`) so that the go bindings can
handle it easily.
"""
# TODO: On Python 3, move this `if` line to the `except` branch
# as the common case will indeed no longer be bytes.
if not isinstance(data, bytes):
try:
return data.encode('utf-8')
except Exception:
return None
return data | def function[_to_bytes, parameter[self, data]]:
constant[
Normalize a text data to bytes (type `bytes`) so that the go bindings can
handle it easily.
]
if <ast.UnaryOp object at 0x7da18bcc8220> begin[:]
<ast.Try object at 0x7da18bccbc10>
return[name[data]] | keyword[def] identifier[_to_bytes] ( identifier[self] , identifier[data] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[bytes] ):
keyword[try] :
keyword[return] identifier[data] . identifier[encode] ( literal[string] )
keyword[except] identifier[Exception] :
keyword[return] keyword[None]
keyword[return] identifier[data] | def _to_bytes(self, data):
"""
Normalize a text data to bytes (type `bytes`) so that the go bindings can
handle it easily.
"""
# TODO: On Python 3, move this `if` line to the `except` branch
# as the common case will indeed no longer be bytes.
if not isinstance(data, bytes):
try:
return data.encode('utf-8') # depends on [control=['try'], data=[]]
except Exception:
return None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return data |
def pfLon(jd, lat, lon):
""" Returns the ecliptic longitude of Pars Fortuna.
It considers diurnal or nocturnal conditions.
"""
sun = swe.sweObjectLon(const.SUN, jd)
moon = swe.sweObjectLon(const.MOON, jd)
asc = swe.sweHousesLon(jd, lat, lon,
const.HOUSES_DEFAULT)[1][0]
if isDiurnal(jd, lat, lon):
return angle.norm(asc + moon - sun)
else:
return angle.norm(asc + sun - moon) | def function[pfLon, parameter[jd, lat, lon]]:
constant[ Returns the ecliptic longitude of Pars Fortuna.
It considers diurnal or nocturnal conditions.
]
variable[sun] assign[=] call[name[swe].sweObjectLon, parameter[name[const].SUN, name[jd]]]
variable[moon] assign[=] call[name[swe].sweObjectLon, parameter[name[const].MOON, name[jd]]]
variable[asc] assign[=] call[call[call[name[swe].sweHousesLon, parameter[name[jd], name[lat], name[lon], name[const].HOUSES_DEFAULT]]][constant[1]]][constant[0]]
if call[name[isDiurnal], parameter[name[jd], name[lat], name[lon]]] begin[:]
return[call[name[angle].norm, parameter[binary_operation[binary_operation[name[asc] + name[moon]] - name[sun]]]]] | keyword[def] identifier[pfLon] ( identifier[jd] , identifier[lat] , identifier[lon] ):
literal[string]
identifier[sun] = identifier[swe] . identifier[sweObjectLon] ( identifier[const] . identifier[SUN] , identifier[jd] )
identifier[moon] = identifier[swe] . identifier[sweObjectLon] ( identifier[const] . identifier[MOON] , identifier[jd] )
identifier[asc] = identifier[swe] . identifier[sweHousesLon] ( identifier[jd] , identifier[lat] , identifier[lon] ,
identifier[const] . identifier[HOUSES_DEFAULT] )[ literal[int] ][ literal[int] ]
keyword[if] identifier[isDiurnal] ( identifier[jd] , identifier[lat] , identifier[lon] ):
keyword[return] identifier[angle] . identifier[norm] ( identifier[asc] + identifier[moon] - identifier[sun] )
keyword[else] :
keyword[return] identifier[angle] . identifier[norm] ( identifier[asc] + identifier[sun] - identifier[moon] ) | def pfLon(jd, lat, lon):
""" Returns the ecliptic longitude of Pars Fortuna.
It considers diurnal or nocturnal conditions.
"""
sun = swe.sweObjectLon(const.SUN, jd)
moon = swe.sweObjectLon(const.MOON, jd)
asc = swe.sweHousesLon(jd, lat, lon, const.HOUSES_DEFAULT)[1][0]
if isDiurnal(jd, lat, lon):
return angle.norm(asc + moon - sun) # depends on [control=['if'], data=[]]
else:
return angle.norm(asc + sun - moon) |
def write_lines(label, lines):
'''write a list of lines with a header for a section.
Parameters
==========
lines: one or more lines to write, with header appended
'''
result = []
continued = False
for line in lines:
if continued:
result.append(line)
else:
result.append('%s %s' %(label, line))
continued = False
if line.endswith('\\'):
continued = True
return result | def function[write_lines, parameter[label, lines]]:
constant[write a list of lines with a header for a section.
Parameters
==========
lines: one or more lines to write, with header appended
]
variable[result] assign[=] list[[]]
variable[continued] assign[=] constant[False]
for taget[name[line]] in starred[name[lines]] begin[:]
if name[continued] begin[:]
call[name[result].append, parameter[name[line]]]
variable[continued] assign[=] constant[False]
if call[name[line].endswith, parameter[constant[\]]] begin[:]
variable[continued] assign[=] constant[True]
return[name[result]] | keyword[def] identifier[write_lines] ( identifier[label] , identifier[lines] ):
literal[string]
identifier[result] =[]
identifier[continued] = keyword[False]
keyword[for] identifier[line] keyword[in] identifier[lines] :
keyword[if] identifier[continued] :
identifier[result] . identifier[append] ( identifier[line] )
keyword[else] :
identifier[result] . identifier[append] ( literal[string] %( identifier[label] , identifier[line] ))
identifier[continued] = keyword[False]
keyword[if] identifier[line] . identifier[endswith] ( literal[string] ):
identifier[continued] = keyword[True]
keyword[return] identifier[result] | def write_lines(label, lines):
"""write a list of lines with a header for a section.
Parameters
==========
lines: one or more lines to write, with header appended
"""
result = []
continued = False
for line in lines:
if continued:
result.append(line) # depends on [control=['if'], data=[]]
else:
result.append('%s %s' % (label, line))
continued = False
if line.endswith('\\'):
continued = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return result |
def _read_metadata(self, f, endianness):
"""
Read the metadata for a DAQmx raw segment. This is the raw
DAQmx-specific portion of the raw data index.
"""
self.data_type = types.tds_data_types[0xFFFFFFFF]
self.dimension = types.Uint32.read(f, endianness)
# In TDMS format version 2.0, 1 is the only valid value for dimension
if self.dimension != 1:
log.warning("Data dimension is not 1")
self.chunk_size = types.Uint64.read(f, endianness)
# size of vector of format changing scalers
self.scaler_vector_length = types.Uint32.read(f, endianness)
# Size of the vector
log.debug("mxDAQ format scaler vector size '%d'" %
(self.scaler_vector_length,))
if self.scaler_vector_length > 1:
log.error("mxDAQ multiple format changing scalers not implemented")
for idx in range(self.scaler_vector_length):
# WARNING: This code overwrites previous values with new
# values. At this time NI provides no documentation on
# how to use these scalers and sample TDMS files do not
# include more than one of these scalers.
self.scaler_data_type_code = types.Uint32.read(f, endianness)
self.scaler_data_type = (
types.tds_data_types[self.scaler_data_type_code])
# more info for format changing scaler
self.scaler_raw_buffer_index = types.Uint32.read(f, endianness)
self.scaler_raw_byte_offset = types.Uint32.read(f, endianness)
self.scaler_sample_format_bitmap = types.Uint32.read(f, endianness)
self.scale_id = types.Uint32.read(f, endianness)
raw_data_widths_length = types.Uint32.read(f, endianness)
self.raw_data_widths = np.zeros(raw_data_widths_length, dtype=np.int32)
for cnt in range(raw_data_widths_length):
self.raw_data_widths[cnt] = types.Uint32.read(f, endianness) | def function[_read_metadata, parameter[self, f, endianness]]:
constant[
Read the metadata for a DAQmx raw segment. This is the raw
DAQmx-specific portion of the raw data index.
]
name[self].data_type assign[=] call[name[types].tds_data_types][constant[4294967295]]
name[self].dimension assign[=] call[name[types].Uint32.read, parameter[name[f], name[endianness]]]
if compare[name[self].dimension not_equal[!=] constant[1]] begin[:]
call[name[log].warning, parameter[constant[Data dimension is not 1]]]
name[self].chunk_size assign[=] call[name[types].Uint64.read, parameter[name[f], name[endianness]]]
name[self].scaler_vector_length assign[=] call[name[types].Uint32.read, parameter[name[f], name[endianness]]]
call[name[log].debug, parameter[binary_operation[constant[mxDAQ format scaler vector size '%d'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b13a9420>]]]]]
if compare[name[self].scaler_vector_length greater[>] constant[1]] begin[:]
call[name[log].error, parameter[constant[mxDAQ multiple format changing scalers not implemented]]]
for taget[name[idx]] in starred[call[name[range], parameter[name[self].scaler_vector_length]]] begin[:]
name[self].scaler_data_type_code assign[=] call[name[types].Uint32.read, parameter[name[f], name[endianness]]]
name[self].scaler_data_type assign[=] call[name[types].tds_data_types][name[self].scaler_data_type_code]
name[self].scaler_raw_buffer_index assign[=] call[name[types].Uint32.read, parameter[name[f], name[endianness]]]
name[self].scaler_raw_byte_offset assign[=] call[name[types].Uint32.read, parameter[name[f], name[endianness]]]
name[self].scaler_sample_format_bitmap assign[=] call[name[types].Uint32.read, parameter[name[f], name[endianness]]]
name[self].scale_id assign[=] call[name[types].Uint32.read, parameter[name[f], name[endianness]]]
variable[raw_data_widths_length] assign[=] call[name[types].Uint32.read, parameter[name[f], name[endianness]]]
name[self].raw_data_widths assign[=] call[name[np].zeros, parameter[name[raw_data_widths_length]]]
for taget[name[cnt]] in starred[call[name[range], parameter[name[raw_data_widths_length]]]] begin[:]
call[name[self].raw_data_widths][name[cnt]] assign[=] call[name[types].Uint32.read, parameter[name[f], name[endianness]]] | keyword[def] identifier[_read_metadata] ( identifier[self] , identifier[f] , identifier[endianness] ):
literal[string]
identifier[self] . identifier[data_type] = identifier[types] . identifier[tds_data_types] [ literal[int] ]
identifier[self] . identifier[dimension] = identifier[types] . identifier[Uint32] . identifier[read] ( identifier[f] , identifier[endianness] )
keyword[if] identifier[self] . identifier[dimension] != literal[int] :
identifier[log] . identifier[warning] ( literal[string] )
identifier[self] . identifier[chunk_size] = identifier[types] . identifier[Uint64] . identifier[read] ( identifier[f] , identifier[endianness] )
identifier[self] . identifier[scaler_vector_length] = identifier[types] . identifier[Uint32] . identifier[read] ( identifier[f] , identifier[endianness] )
identifier[log] . identifier[debug] ( literal[string] %
( identifier[self] . identifier[scaler_vector_length] ,))
keyword[if] identifier[self] . identifier[scaler_vector_length] > literal[int] :
identifier[log] . identifier[error] ( literal[string] )
keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[self] . identifier[scaler_vector_length] ):
identifier[self] . identifier[scaler_data_type_code] = identifier[types] . identifier[Uint32] . identifier[read] ( identifier[f] , identifier[endianness] )
identifier[self] . identifier[scaler_data_type] =(
identifier[types] . identifier[tds_data_types] [ identifier[self] . identifier[scaler_data_type_code] ])
identifier[self] . identifier[scaler_raw_buffer_index] = identifier[types] . identifier[Uint32] . identifier[read] ( identifier[f] , identifier[endianness] )
identifier[self] . identifier[scaler_raw_byte_offset] = identifier[types] . identifier[Uint32] . identifier[read] ( identifier[f] , identifier[endianness] )
identifier[self] . identifier[scaler_sample_format_bitmap] = identifier[types] . identifier[Uint32] . identifier[read] ( identifier[f] , identifier[endianness] )
identifier[self] . identifier[scale_id] = identifier[types] . identifier[Uint32] . identifier[read] ( identifier[f] , identifier[endianness] )
identifier[raw_data_widths_length] = identifier[types] . identifier[Uint32] . identifier[read] ( identifier[f] , identifier[endianness] )
identifier[self] . identifier[raw_data_widths] = identifier[np] . identifier[zeros] ( identifier[raw_data_widths_length] , identifier[dtype] = identifier[np] . identifier[int32] )
keyword[for] identifier[cnt] keyword[in] identifier[range] ( identifier[raw_data_widths_length] ):
identifier[self] . identifier[raw_data_widths] [ identifier[cnt] ]= identifier[types] . identifier[Uint32] . identifier[read] ( identifier[f] , identifier[endianness] ) | def _read_metadata(self, f, endianness):
"""
Read the metadata for a DAQmx raw segment. This is the raw
DAQmx-specific portion of the raw data index.
"""
self.data_type = types.tds_data_types[4294967295]
self.dimension = types.Uint32.read(f, endianness)
# In TDMS format version 2.0, 1 is the only valid value for dimension
if self.dimension != 1:
log.warning('Data dimension is not 1') # depends on [control=['if'], data=[]]
self.chunk_size = types.Uint64.read(f, endianness)
# size of vector of format changing scalers
self.scaler_vector_length = types.Uint32.read(f, endianness)
# Size of the vector
log.debug("mxDAQ format scaler vector size '%d'" % (self.scaler_vector_length,))
if self.scaler_vector_length > 1:
log.error('mxDAQ multiple format changing scalers not implemented') # depends on [control=['if'], data=[]]
for idx in range(self.scaler_vector_length):
# WARNING: This code overwrites previous values with new
# values. At this time NI provides no documentation on
# how to use these scalers and sample TDMS files do not
# include more than one of these scalers.
self.scaler_data_type_code = types.Uint32.read(f, endianness)
self.scaler_data_type = types.tds_data_types[self.scaler_data_type_code]
# more info for format changing scaler
self.scaler_raw_buffer_index = types.Uint32.read(f, endianness)
self.scaler_raw_byte_offset = types.Uint32.read(f, endianness)
self.scaler_sample_format_bitmap = types.Uint32.read(f, endianness)
self.scale_id = types.Uint32.read(f, endianness) # depends on [control=['for'], data=[]]
raw_data_widths_length = types.Uint32.read(f, endianness)
self.raw_data_widths = np.zeros(raw_data_widths_length, dtype=np.int32)
for cnt in range(raw_data_widths_length):
self.raw_data_widths[cnt] = types.Uint32.read(f, endianness) # depends on [control=['for'], data=['cnt']] |
def getCert(username, password,
certHost=_SERVER,
certfile=None,
certQuery=_PROXY):
"""Access the cadc certificate server."""
if certfile is None:
certfile = tempfile.NamedTemporaryFile()
# Add the username and password.
# If we knew the realm, we could use it instead of ``None``.
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
top_level_url = "http://" + certHost
logging.debug(top_level_url)
password_mgr.add_password(None, top_level_url, username, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
logging.debug(str(handler))
# create "opener" (OpenerDirector instance)
opener = urllib2.build_opener(handler)
# Install the opener.
urllib2.install_opener(opener)
# buuld the url that with 'GET' a certificat using user_id/password info
url = "http://" + certHost + certQuery
logging.debug(url)
r = None
try:
r = opener.open(url)
except urllib2.HTTPError as e:
logging.debug(url)
logging.debug(str(e))
return False
logging.debug(str(r))
if r is not None:
while True:
buf = r.read()
logging.debug(buf)
if not buf:
break
certfile.write(buf)
r.close()
return certfile | def function[getCert, parameter[username, password, certHost, certfile, certQuery]]:
constant[Access the cadc certificate server.]
if compare[name[certfile] is constant[None]] begin[:]
variable[certfile] assign[=] call[name[tempfile].NamedTemporaryFile, parameter[]]
variable[password_mgr] assign[=] call[name[urllib2].HTTPPasswordMgrWithDefaultRealm, parameter[]]
variable[top_level_url] assign[=] binary_operation[constant[http://] + name[certHost]]
call[name[logging].debug, parameter[name[top_level_url]]]
call[name[password_mgr].add_password, parameter[constant[None], name[top_level_url], name[username], name[password]]]
variable[handler] assign[=] call[name[urllib2].HTTPBasicAuthHandler, parameter[name[password_mgr]]]
call[name[logging].debug, parameter[call[name[str], parameter[name[handler]]]]]
variable[opener] assign[=] call[name[urllib2].build_opener, parameter[name[handler]]]
call[name[urllib2].install_opener, parameter[name[opener]]]
variable[url] assign[=] binary_operation[binary_operation[constant[http://] + name[certHost]] + name[certQuery]]
call[name[logging].debug, parameter[name[url]]]
variable[r] assign[=] constant[None]
<ast.Try object at 0x7da1b191ffd0>
call[name[logging].debug, parameter[call[name[str], parameter[name[r]]]]]
if compare[name[r] is_not constant[None]] begin[:]
while constant[True] begin[:]
variable[buf] assign[=] call[name[r].read, parameter[]]
call[name[logging].debug, parameter[name[buf]]]
if <ast.UnaryOp object at 0x7da1b1a3f070> begin[:]
break
call[name[certfile].write, parameter[name[buf]]]
call[name[r].close, parameter[]]
return[name[certfile]] | keyword[def] identifier[getCert] ( identifier[username] , identifier[password] ,
identifier[certHost] = identifier[_SERVER] ,
identifier[certfile] = keyword[None] ,
identifier[certQuery] = identifier[_PROXY] ):
literal[string]
keyword[if] identifier[certfile] keyword[is] keyword[None] :
identifier[certfile] = identifier[tempfile] . identifier[NamedTemporaryFile] ()
identifier[password_mgr] = identifier[urllib2] . identifier[HTTPPasswordMgrWithDefaultRealm] ()
identifier[top_level_url] = literal[string] + identifier[certHost]
identifier[logging] . identifier[debug] ( identifier[top_level_url] )
identifier[password_mgr] . identifier[add_password] ( keyword[None] , identifier[top_level_url] , identifier[username] , identifier[password] )
identifier[handler] = identifier[urllib2] . identifier[HTTPBasicAuthHandler] ( identifier[password_mgr] )
identifier[logging] . identifier[debug] ( identifier[str] ( identifier[handler] ))
identifier[opener] = identifier[urllib2] . identifier[build_opener] ( identifier[handler] )
identifier[urllib2] . identifier[install_opener] ( identifier[opener] )
identifier[url] = literal[string] + identifier[certHost] + identifier[certQuery]
identifier[logging] . identifier[debug] ( identifier[url] )
identifier[r] = keyword[None]
keyword[try] :
identifier[r] = identifier[opener] . identifier[open] ( identifier[url] )
keyword[except] identifier[urllib2] . identifier[HTTPError] keyword[as] identifier[e] :
identifier[logging] . identifier[debug] ( identifier[url] )
identifier[logging] . identifier[debug] ( identifier[str] ( identifier[e] ))
keyword[return] keyword[False]
identifier[logging] . identifier[debug] ( identifier[str] ( identifier[r] ))
keyword[if] identifier[r] keyword[is] keyword[not] keyword[None] :
keyword[while] keyword[True] :
identifier[buf] = identifier[r] . identifier[read] ()
identifier[logging] . identifier[debug] ( identifier[buf] )
keyword[if] keyword[not] identifier[buf] :
keyword[break]
identifier[certfile] . identifier[write] ( identifier[buf] )
identifier[r] . identifier[close] ()
keyword[return] identifier[certfile] | def getCert(username, password, certHost=_SERVER, certfile=None, certQuery=_PROXY):
"""Access the cadc certificate server."""
if certfile is None:
certfile = tempfile.NamedTemporaryFile() # depends on [control=['if'], data=['certfile']]
# Add the username and password.
# If we knew the realm, we could use it instead of ``None``.
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
top_level_url = 'http://' + certHost
logging.debug(top_level_url)
password_mgr.add_password(None, top_level_url, username, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
logging.debug(str(handler))
# create "opener" (OpenerDirector instance)
opener = urllib2.build_opener(handler) # Install the opener.
urllib2.install_opener(opener)
# buuld the url that with 'GET' a certificat using user_id/password info
url = 'http://' + certHost + certQuery
logging.debug(url)
r = None
try:
r = opener.open(url) # depends on [control=['try'], data=[]]
except urllib2.HTTPError as e:
logging.debug(url)
logging.debug(str(e))
return False # depends on [control=['except'], data=['e']]
logging.debug(str(r))
if r is not None:
while True:
buf = r.read()
logging.debug(buf)
if not buf:
break # depends on [control=['if'], data=[]]
certfile.write(buf) # depends on [control=['while'], data=[]]
r.close() # depends on [control=['if'], data=['r']]
return certfile |
def doc_paragraph(s, indent=0):
'''Takes in a string without wrapping corresponding to a paragraph,
and returns a version of that string wrapped to be at most 80
characters in length on each line.
If indent is given, ensures each line is indented to that number
of spaces.
'''
return '\n'.join([' '*indent + l for l in wrap(s, width=80-indent)]) | def function[doc_paragraph, parameter[s, indent]]:
constant[Takes in a string without wrapping corresponding to a paragraph,
and returns a version of that string wrapped to be at most 80
characters in length on each line.
If indent is given, ensures each line is indented to that number
of spaces.
]
return[call[constant[
].join, parameter[<ast.ListComp object at 0x7da20c7ca1a0>]]] | keyword[def] identifier[doc_paragraph] ( identifier[s] , identifier[indent] = literal[int] ):
literal[string]
keyword[return] literal[string] . identifier[join] ([ literal[string] * identifier[indent] + identifier[l] keyword[for] identifier[l] keyword[in] identifier[wrap] ( identifier[s] , identifier[width] = literal[int] - identifier[indent] )]) | def doc_paragraph(s, indent=0):
"""Takes in a string without wrapping corresponding to a paragraph,
and returns a version of that string wrapped to be at most 80
characters in length on each line.
If indent is given, ensures each line is indented to that number
of spaces.
"""
return '\n'.join([' ' * indent + l for l in wrap(s, width=80 - indent)]) |
def remove_team(name, profile="github"):
'''
Remove a github team.
name
The name of the team to be removed.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.remove_team 'team_name'
.. versionadded:: 2016.11.0
'''
team_info = get_team(name, profile=profile)
if not team_info:
log.error('Team %s to be removed does not exist.', name)
return False
try:
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, 'org_name')
)
team = organization.get_team(team_info['id'])
team.delete()
return list_teams(ignore_cache=True, profile=profile).get(name) is None
except github.GithubException:
log.exception('Error deleting a team')
return False | def function[remove_team, parameter[name, profile]]:
constant[
Remove a github team.
name
The name of the team to be removed.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.remove_team 'team_name'
.. versionadded:: 2016.11.0
]
variable[team_info] assign[=] call[name[get_team], parameter[name[name]]]
if <ast.UnaryOp object at 0x7da1b1c7eb30> begin[:]
call[name[log].error, parameter[constant[Team %s to be removed does not exist.], name[name]]]
return[constant[False]]
<ast.Try object at 0x7da1b1c7feb0> | keyword[def] identifier[remove_team] ( identifier[name] , identifier[profile] = literal[string] ):
literal[string]
identifier[team_info] = identifier[get_team] ( identifier[name] , identifier[profile] = identifier[profile] )
keyword[if] keyword[not] identifier[team_info] :
identifier[log] . identifier[error] ( literal[string] , identifier[name] )
keyword[return] keyword[False]
keyword[try] :
identifier[client] = identifier[_get_client] ( identifier[profile] )
identifier[organization] = identifier[client] . identifier[get_organization] (
identifier[_get_config_value] ( identifier[profile] , literal[string] )
)
identifier[team] = identifier[organization] . identifier[get_team] ( identifier[team_info] [ literal[string] ])
identifier[team] . identifier[delete] ()
keyword[return] identifier[list_teams] ( identifier[ignore_cache] = keyword[True] , identifier[profile] = identifier[profile] ). identifier[get] ( identifier[name] ) keyword[is] keyword[None]
keyword[except] identifier[github] . identifier[GithubException] :
identifier[log] . identifier[exception] ( literal[string] )
keyword[return] keyword[False] | def remove_team(name, profile='github'):
"""
Remove a github team.
name
The name of the team to be removed.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.remove_team 'team_name'
.. versionadded:: 2016.11.0
"""
team_info = get_team(name, profile=profile)
if not team_info:
log.error('Team %s to be removed does not exist.', name)
return False # depends on [control=['if'], data=[]]
try:
client = _get_client(profile)
organization = client.get_organization(_get_config_value(profile, 'org_name'))
team = organization.get_team(team_info['id'])
team.delete()
return list_teams(ignore_cache=True, profile=profile).get(name) is None # depends on [control=['try'], data=[]]
except github.GithubException:
log.exception('Error deleting a team')
return False # depends on [control=['except'], data=[]] |
def update(self, **data):
"""
Update records in the table with +data+. Often combined with `where`,
as it acts on all records in the table unless restricted.
ex)
>>> Repo("foos").update(name="bar")
UPDATE foos SET name = "bar"
"""
data = data.items()
update_command_arg = ", ".join("{} = ?".format(entry[0])
for entry in data)
cmd = "update {table} set {update_command_arg} {where_clause}".format(
update_command_arg=update_command_arg,
where_clause=self.where_clause,
table=self.table_name).rstrip()
Repo.db.execute(cmd, [entry[1] for entry in data] + self.where_values) | def function[update, parameter[self]]:
constant[
Update records in the table with +data+. Often combined with `where`,
as it acts on all records in the table unless restricted.
ex)
>>> Repo("foos").update(name="bar")
UPDATE foos SET name = "bar"
]
variable[data] assign[=] call[name[data].items, parameter[]]
variable[update_command_arg] assign[=] call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da204566590>]]
variable[cmd] assign[=] call[call[constant[update {table} set {update_command_arg} {where_clause}].format, parameter[]].rstrip, parameter[]]
call[name[Repo].db.execute, parameter[name[cmd], binary_operation[<ast.ListComp object at 0x7da204565510> + name[self].where_values]]] | keyword[def] identifier[update] ( identifier[self] ,** identifier[data] ):
literal[string]
identifier[data] = identifier[data] . identifier[items] ()
identifier[update_command_arg] = literal[string] . identifier[join] ( literal[string] . identifier[format] ( identifier[entry] [ literal[int] ])
keyword[for] identifier[entry] keyword[in] identifier[data] )
identifier[cmd] = literal[string] . identifier[format] (
identifier[update_command_arg] = identifier[update_command_arg] ,
identifier[where_clause] = identifier[self] . identifier[where_clause] ,
identifier[table] = identifier[self] . identifier[table_name] ). identifier[rstrip] ()
identifier[Repo] . identifier[db] . identifier[execute] ( identifier[cmd] ,[ identifier[entry] [ literal[int] ] keyword[for] identifier[entry] keyword[in] identifier[data] ]+ identifier[self] . identifier[where_values] ) | def update(self, **data):
"""
Update records in the table with +data+. Often combined with `where`,
as it acts on all records in the table unless restricted.
ex)
>>> Repo("foos").update(name="bar")
UPDATE foos SET name = "bar"
"""
data = data.items()
update_command_arg = ', '.join(('{} = ?'.format(entry[0]) for entry in data))
cmd = 'update {table} set {update_command_arg} {where_clause}'.format(update_command_arg=update_command_arg, where_clause=self.where_clause, table=self.table_name).rstrip()
Repo.db.execute(cmd, [entry[1] for entry in data] + self.where_values) |
def fnv(data, hval_init, fnv_prime, fnv_size):
"""
Core FNV hash algorithm used in FNV0 and FNV1.
"""
assert isinstance(data, bytes)
hval = hval_init
for byte in data:
hval = (hval * fnv_prime) % fnv_size
hval = hval ^ _get_byte(byte)
return hval | def function[fnv, parameter[data, hval_init, fnv_prime, fnv_size]]:
constant[
Core FNV hash algorithm used in FNV0 and FNV1.
]
assert[call[name[isinstance], parameter[name[data], name[bytes]]]]
variable[hval] assign[=] name[hval_init]
for taget[name[byte]] in starred[name[data]] begin[:]
variable[hval] assign[=] binary_operation[binary_operation[name[hval] * name[fnv_prime]] <ast.Mod object at 0x7da2590d6920> name[fnv_size]]
variable[hval] assign[=] binary_operation[name[hval] <ast.BitXor object at 0x7da2590d6b00> call[name[_get_byte], parameter[name[byte]]]]
return[name[hval]] | keyword[def] identifier[fnv] ( identifier[data] , identifier[hval_init] , identifier[fnv_prime] , identifier[fnv_size] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[data] , identifier[bytes] )
identifier[hval] = identifier[hval_init]
keyword[for] identifier[byte] keyword[in] identifier[data] :
identifier[hval] =( identifier[hval] * identifier[fnv_prime] )% identifier[fnv_size]
identifier[hval] = identifier[hval] ^ identifier[_get_byte] ( identifier[byte] )
keyword[return] identifier[hval] | def fnv(data, hval_init, fnv_prime, fnv_size):
"""
Core FNV hash algorithm used in FNV0 and FNV1.
"""
assert isinstance(data, bytes)
hval = hval_init
for byte in data:
hval = hval * fnv_prime % fnv_size
hval = hval ^ _get_byte(byte) # depends on [control=['for'], data=['byte']]
return hval |
async def on_closing_async(self, reason):
"""
Overides partition pump on closing.
:param reason: The reason for the shutdown.
:type reason: str
"""
self.partition_receiver.eh_partition_pump.set_pump_status("Errored")
try:
await self.running
except TypeError:
_logger.debug("No partition pump running.")
except Exception as err: # pylint: disable=broad-except
_logger.info("Error on closing partition pump: %r", err)
await self.clean_up_clients_async() | <ast.AsyncFunctionDef object at 0x7da2046225f0> | keyword[async] keyword[def] identifier[on_closing_async] ( identifier[self] , identifier[reason] ):
literal[string]
identifier[self] . identifier[partition_receiver] . identifier[eh_partition_pump] . identifier[set_pump_status] ( literal[string] )
keyword[try] :
keyword[await] identifier[self] . identifier[running]
keyword[except] identifier[TypeError] :
identifier[_logger] . identifier[debug] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[_logger] . identifier[info] ( literal[string] , identifier[err] )
keyword[await] identifier[self] . identifier[clean_up_clients_async] () | async def on_closing_async(self, reason):
"""
Overides partition pump on closing.
:param reason: The reason for the shutdown.
:type reason: str
"""
self.partition_receiver.eh_partition_pump.set_pump_status('Errored')
try:
await self.running # depends on [control=['try'], data=[]]
except TypeError:
_logger.debug('No partition pump running.') # depends on [control=['except'], data=[]]
except Exception as err: # pylint: disable=broad-except
_logger.info('Error on closing partition pump: %r', err) # depends on [control=['except'], data=['err']]
await self.clean_up_clients_async() |
def qteKillApplet(self, appletID: str):
"""
Destroy the applet with ID ``appletID``.
This method removes ``appletID`` from Qtmacs permanently - no
questions asked. It is the responsibility of the (macro)
programmer to use it responsibly.
If the applet was visible then the method also takes care of
replacing with the next invisible applet, if one is available.
If ``appletID`` does not refer to a valid applet then nothing
happens.
|Args|
* ``appletID`` (**str**): name of applet to be destroyed.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Compile list of all applet IDs.
ID_list = [_.qteAppletID() for _ in self._qteAppletList]
if appletID not in ID_list:
# Do nothing if the applet does not exist.
return
else:
# Get a reference to the actual applet object based on the
# name.
idx = ID_list.index(appletID)
appObj = self._qteAppletList[idx]
# Mini applets are killed with a special method.
if self.qteIsMiniApplet(appObj):
self.qteKillMiniApplet()
return
# Inform the applet that it is about to be killed.
appObj.qteToBeKilled()
# Determine the window of the applet.
window = appObj.qteParentWindow()
# Get the previous invisible applet (*may* come in handy a few
# lines below).
newApplet = self.qteNextApplet(numSkip=-1, skipInvisible=False,
skipVisible=True)
# If there is no invisible applet available, or the only available
# applet is the one to be killed, then set newApplet to None.
if (newApplet is None) or (newApplet is appObj):
newApplet = None
else:
self.qteReplaceAppletInLayout(newApplet, appObj, window)
# Ensure that _qteActiveApplet does not point to the applet
# to be killed as it will otherwise result in a dangling
# pointer.
if self._qteActiveApplet is appObj:
self._qteActiveApplet = newApplet
# Remove the applet object from the applet list.
self.qteLogger.debug('Kill applet: <b>{}</b>'.format(appletID))
self._qteAppletList.remove(appObj)
# Close the applet and schedule it for destruction. Explicitly
# call the sip.delete() method to ensure that all signals are
# *immediately* disconnected, as otherwise there is a good
# chance that Qtmacs segfaults if Python/Qt thinks the slots
# are still connected when really the object does not exist
# anymore.
appObj.close()
sip.delete(appObj) | def function[qteKillApplet, parameter[self, appletID]]:
constant[
Destroy the applet with ID ``appletID``.
This method removes ``appletID`` from Qtmacs permanently - no
questions asked. It is the responsibility of the (macro)
programmer to use it responsibly.
If the applet was visible then the method also takes care of
replacing with the next invisible applet, if one is available.
If ``appletID`` does not refer to a valid applet then nothing
happens.
|Args|
* ``appletID`` (**str**): name of applet to be destroyed.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
]
variable[ID_list] assign[=] <ast.ListComp object at 0x7da1b23e76a0>
if compare[name[appletID] <ast.NotIn object at 0x7da2590d7190> name[ID_list]] begin[:]
return[None]
if call[name[self].qteIsMiniApplet, parameter[name[appObj]]] begin[:]
call[name[self].qteKillMiniApplet, parameter[]]
return[None]
call[name[appObj].qteToBeKilled, parameter[]]
variable[window] assign[=] call[name[appObj].qteParentWindow, parameter[]]
variable[newApplet] assign[=] call[name[self].qteNextApplet, parameter[]]
if <ast.BoolOp object at 0x7da1b23e5480> begin[:]
variable[newApplet] assign[=] constant[None]
if compare[name[self]._qteActiveApplet is name[appObj]] begin[:]
name[self]._qteActiveApplet assign[=] name[newApplet]
call[name[self].qteLogger.debug, parameter[call[constant[Kill applet: <b>{}</b>].format, parameter[name[appletID]]]]]
call[name[self]._qteAppletList.remove, parameter[name[appObj]]]
call[name[appObj].close, parameter[]]
call[name[sip].delete, parameter[name[appObj]]] | keyword[def] identifier[qteKillApplet] ( identifier[self] , identifier[appletID] : identifier[str] ):
literal[string]
identifier[ID_list] =[ identifier[_] . identifier[qteAppletID] () keyword[for] identifier[_] keyword[in] identifier[self] . identifier[_qteAppletList] ]
keyword[if] identifier[appletID] keyword[not] keyword[in] identifier[ID_list] :
keyword[return]
keyword[else] :
identifier[idx] = identifier[ID_list] . identifier[index] ( identifier[appletID] )
identifier[appObj] = identifier[self] . identifier[_qteAppletList] [ identifier[idx] ]
keyword[if] identifier[self] . identifier[qteIsMiniApplet] ( identifier[appObj] ):
identifier[self] . identifier[qteKillMiniApplet] ()
keyword[return]
identifier[appObj] . identifier[qteToBeKilled] ()
identifier[window] = identifier[appObj] . identifier[qteParentWindow] ()
identifier[newApplet] = identifier[self] . identifier[qteNextApplet] ( identifier[numSkip] =- literal[int] , identifier[skipInvisible] = keyword[False] ,
identifier[skipVisible] = keyword[True] )
keyword[if] ( identifier[newApplet] keyword[is] keyword[None] ) keyword[or] ( identifier[newApplet] keyword[is] identifier[appObj] ):
identifier[newApplet] = keyword[None]
keyword[else] :
identifier[self] . identifier[qteReplaceAppletInLayout] ( identifier[newApplet] , identifier[appObj] , identifier[window] )
keyword[if] identifier[self] . identifier[_qteActiveApplet] keyword[is] identifier[appObj] :
identifier[self] . identifier[_qteActiveApplet] = identifier[newApplet]
identifier[self] . identifier[qteLogger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[appletID] ))
identifier[self] . identifier[_qteAppletList] . identifier[remove] ( identifier[appObj] )
identifier[appObj] . identifier[close] ()
identifier[sip] . identifier[delete] ( identifier[appObj] ) | def qteKillApplet(self, appletID: str):
"""
Destroy the applet with ID ``appletID``.
This method removes ``appletID`` from Qtmacs permanently - no
questions asked. It is the responsibility of the (macro)
programmer to use it responsibly.
If the applet was visible then the method also takes care of
replacing with the next invisible applet, if one is available.
If ``appletID`` does not refer to a valid applet then nothing
happens.
|Args|
* ``appletID`` (**str**): name of applet to be destroyed.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Compile list of all applet IDs.
ID_list = [_.qteAppletID() for _ in self._qteAppletList]
if appletID not in ID_list:
# Do nothing if the applet does not exist.
return # depends on [control=['if'], data=[]]
else:
# Get a reference to the actual applet object based on the
# name.
idx = ID_list.index(appletID)
appObj = self._qteAppletList[idx]
# Mini applets are killed with a special method.
if self.qteIsMiniApplet(appObj):
self.qteKillMiniApplet()
return # depends on [control=['if'], data=[]]
# Inform the applet that it is about to be killed.
appObj.qteToBeKilled()
# Determine the window of the applet.
window = appObj.qteParentWindow()
# Get the previous invisible applet (*may* come in handy a few
# lines below).
newApplet = self.qteNextApplet(numSkip=-1, skipInvisible=False, skipVisible=True)
# If there is no invisible applet available, or the only available
# applet is the one to be killed, then set newApplet to None.
if newApplet is None or newApplet is appObj:
newApplet = None # depends on [control=['if'], data=[]]
else:
self.qteReplaceAppletInLayout(newApplet, appObj, window)
# Ensure that _qteActiveApplet does not point to the applet
# to be killed as it will otherwise result in a dangling
# pointer.
if self._qteActiveApplet is appObj:
self._qteActiveApplet = newApplet # depends on [control=['if'], data=[]]
# Remove the applet object from the applet list.
self.qteLogger.debug('Kill applet: <b>{}</b>'.format(appletID))
self._qteAppletList.remove(appObj)
# Close the applet and schedule it for destruction. Explicitly
# call the sip.delete() method to ensure that all signals are
# *immediately* disconnected, as otherwise there is a good
# chance that Qtmacs segfaults if Python/Qt thinks the slots
# are still connected when really the object does not exist
# anymore.
appObj.close()
sip.delete(appObj) |
def interp_value(mass, age, feh, icol,
grid, mass_col, ages, fehs, grid_Ns):
# return_box):
"""mass, age, feh are *single values* at which values are desired
icol is the column index of desired value
grid is nfeh x nage x max(nmass) x ncols array
mass_col is the column index of mass
ages is grid of ages
fehs is grid of fehs
grid_Ns keeps track of nmass in each slice (beyond this are nans)
"""
Nage = len(ages)
Nfeh = len(fehs)
ifeh = searchsorted(fehs, Nfeh, feh)
iage = searchsorted(ages, Nage, age)
pts = np.zeros((8,3))
vals = np.zeros(8)
i_f = ifeh - 1
i_a = iage - 1
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[0, 0] = grid[i_f, i_a, imass, mass_col]
pts[0, 1] = ages[i_a]
pts[0, 2] = fehs[i_f]
vals[0] = grid[i_f, i_a, imass, icol]
pts[1, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[1, 1] = ages[i_a]
pts[1, 2] = fehs[i_f]
vals[1] = grid[i_f, i_a, imass-1, icol]
i_f = ifeh - 1
i_a = iage
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[2, 0] = grid[i_f, i_a, imass, mass_col]
pts[2, 1] = ages[i_a]
pts[2, 2] = fehs[i_f]
vals[2] = grid[i_f, i_a, imass, icol]
pts[3, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[3, 1] = ages[i_a]
pts[3, 2] = fehs[i_f]
vals[3] = grid[i_f, i_a, imass-1, icol]
i_f = ifeh
i_a = iage - 1
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[4, 0] = grid[i_f, i_a, imass, mass_col]
pts[4, 1] = ages[i_a]
pts[4, 2] = fehs[i_f]
vals[4] = grid[i_f, i_a, imass, icol]
pts[5, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[5, 1] = ages[i_a]
pts[5, 2] = fehs[i_f]
vals[5] = grid[i_f, i_a, imass-1, icol]
i_f = ifeh
i_a = iage
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[6, 0] = grid[i_f, i_a, imass, mass_col]
pts[6, 1] = ages[i_a]
pts[6, 2] = fehs[i_f]
vals[6] = grid[i_f, i_a, imass, icol]
pts[7, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[7, 1] = ages[i_a]
pts[7, 2] = fehs[i_f]
vals[7] = grid[i_f, i_a, imass-1, icol]
# if return_box:
# return pts, vals
# else:
return interp_box(mass, age, feh, pts, vals) | def function[interp_value, parameter[mass, age, feh, icol, grid, mass_col, ages, fehs, grid_Ns]]:
constant[mass, age, feh are *single values* at which values are desired
icol is the column index of desired value
grid is nfeh x nage x max(nmass) x ncols array
mass_col is the column index of mass
ages is grid of ages
fehs is grid of fehs
grid_Ns keeps track of nmass in each slice (beyond this are nans)
]
variable[Nage] assign[=] call[name[len], parameter[name[ages]]]
variable[Nfeh] assign[=] call[name[len], parameter[name[fehs]]]
variable[ifeh] assign[=] call[name[searchsorted], parameter[name[fehs], name[Nfeh], name[feh]]]
variable[iage] assign[=] call[name[searchsorted], parameter[name[ages], name[Nage], name[age]]]
variable[pts] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da207f9b220>, <ast.Constant object at 0x7da207f9b580>]]]]
variable[vals] assign[=] call[name[np].zeros, parameter[constant[8]]]
variable[i_f] assign[=] binary_operation[name[ifeh] - constant[1]]
variable[i_a] assign[=] binary_operation[name[iage] - constant[1]]
variable[Nmass] assign[=] call[name[grid_Ns]][tuple[[<ast.Name object at 0x7da207f9b820>, <ast.Name object at 0x7da207f998d0>]]]
variable[imass] assign[=] call[name[searchsorted], parameter[call[name[grid]][tuple[[<ast.Name object at 0x7da207f9a470>, <ast.Name object at 0x7da207f993f0>, <ast.Slice object at 0x7da207f99ed0>, <ast.Name object at 0x7da207f9a7d0>]]], name[Nmass], name[mass]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da207f98d60>, <ast.Constant object at 0x7da207f99210>]]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da207f9ace0>, <ast.Name object at 0x7da207f99960>, <ast.Name object at 0x7da207f992d0>, <ast.Name object at 0x7da207f98af0>]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da207f98310>, <ast.Constant object at 0x7da207f9a830>]]] assign[=] call[name[ages]][name[i_a]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da207f98730>, <ast.Constant object at 0x7da207f9aad0>]]] assign[=] call[name[fehs]][name[i_f]]
call[name[vals]][constant[0]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da207f98fa0>, <ast.Name object at 0x7da207f98040>, <ast.Name object at 0x7da207f9bca0>, <ast.Name object at 0x7da207f98400>]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da207f9aef0>, <ast.Constant object at 0x7da207f99480>]]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da207f99780>, <ast.Name object at 0x7da207f98340>, <ast.BinOp object at 0x7da207f98e20>, <ast.Name object at 0x7da207f98bb0>]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da207f9b640>, <ast.Constant object at 0x7da207f98070>]]] assign[=] call[name[ages]][name[i_a]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da207f98550>, <ast.Constant object at 0x7da207f98c70>]]] assign[=] call[name[fehs]][name[i_f]]
call[name[vals]][constant[1]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da207f9bdc0>, <ast.Name object at 0x7da207f9b0d0>, <ast.BinOp object at 0x7da207f9a260>, <ast.Name object at 0x7da207f982b0>]]]
variable[i_f] assign[=] binary_operation[name[ifeh] - constant[1]]
variable[i_a] assign[=] name[iage]
variable[Nmass] assign[=] call[name[grid_Ns]][tuple[[<ast.Name object at 0x7da207f99120>, <ast.Name object at 0x7da207f9b040>]]]
variable[imass] assign[=] call[name[searchsorted], parameter[call[name[grid]][tuple[[<ast.Name object at 0x7da207f99510>, <ast.Name object at 0x7da207f987f0>, <ast.Slice object at 0x7da207f9a950>, <ast.Name object at 0x7da207f99810>]]], name[Nmass], name[mass]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da207f99c30>, <ast.Constant object at 0x7da207f9a740>]]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da207f9b8e0>, <ast.Name object at 0x7da207f999c0>, <ast.Name object at 0x7da207f9b880>, <ast.Name object at 0x7da207f98100>]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da207f98250>, <ast.Constant object at 0x7da207f9a1a0>]]] assign[=] call[name[ages]][name[i_a]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da207f98640>, <ast.Constant object at 0x7da207f9b700>]]] assign[=] call[name[fehs]][name[i_f]]
call[name[vals]][constant[2]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da1b060a260>, <ast.Name object at 0x7da1b06086a0>, <ast.Name object at 0x7da1b060a2c0>, <ast.Name object at 0x7da1b060ab00>]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da1b0608eb0>, <ast.Constant object at 0x7da20e956b90>]]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da20e956b30>, <ast.Name object at 0x7da20e956620>, <ast.BinOp object at 0x7da20e957d90>, <ast.Name object at 0x7da20e955ff0>]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da20e9579a0>, <ast.Constant object at 0x7da20e957fa0>]]] assign[=] call[name[ages]][name[i_a]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da20e956e60>, <ast.Constant object at 0x7da20e956440>]]] assign[=] call[name[fehs]][name[i_f]]
call[name[vals]][constant[3]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da2044c1f90>, <ast.Name object at 0x7da2044c2260>, <ast.BinOp object at 0x7da2044c29e0>, <ast.Name object at 0x7da2044c2fe0>]]]
variable[i_f] assign[=] name[ifeh]
variable[i_a] assign[=] binary_operation[name[iage] - constant[1]]
variable[Nmass] assign[=] call[name[grid_Ns]][tuple[[<ast.Name object at 0x7da2044c1300>, <ast.Name object at 0x7da2044c1480>]]]
variable[imass] assign[=] call[name[searchsorted], parameter[call[name[grid]][tuple[[<ast.Name object at 0x7da2044c2020>, <ast.Name object at 0x7da2044c1270>, <ast.Slice object at 0x7da2044c23b0>, <ast.Name object at 0x7da2044c27a0>]]], name[Nmass], name[mass]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da2044c3850>, <ast.Constant object at 0x7da2044c1ff0>]]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da2044c0ac0>, <ast.Name object at 0x7da2044c3580>, <ast.Name object at 0x7da2044c15d0>, <ast.Name object at 0x7da2044c2e00>]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da2044c3970>, <ast.Constant object at 0x7da2044c34f0>]]] assign[=] call[name[ages]][name[i_a]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da2044c3640>, <ast.Constant object at 0x7da2044c0f70>]]] assign[=] call[name[fehs]][name[i_f]]
call[name[vals]][constant[4]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da2044c0a90>, <ast.Name object at 0x7da2044c35e0>, <ast.Name object at 0x7da2044c1030>, <ast.Name object at 0x7da2044c3a30>]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da1b06d2e00>, <ast.Constant object at 0x7da1b06d10c0>]]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da1b06d1f00>, <ast.Name object at 0x7da1b06d2a10>, <ast.BinOp object at 0x7da1b06d0c70>, <ast.Name object at 0x7da1b06d3eb0>]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da1b06d05e0>, <ast.Constant object at 0x7da1b06d16f0>]]] assign[=] call[name[ages]][name[i_a]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da1b06d1ae0>, <ast.Constant object at 0x7da1b06d0ca0>]]] assign[=] call[name[fehs]][name[i_f]]
call[name[vals]][constant[5]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da1b06d2560>, <ast.Name object at 0x7da1b06d3940>, <ast.BinOp object at 0x7da1b06d1900>, <ast.Name object at 0x7da1b0617ee0>]]]
variable[i_f] assign[=] name[ifeh]
variable[i_a] assign[=] name[iage]
variable[Nmass] assign[=] call[name[grid_Ns]][tuple[[<ast.Name object at 0x7da1b06156c0>, <ast.Name object at 0x7da1b0617fa0>]]]
variable[imass] assign[=] call[name[searchsorted], parameter[call[name[grid]][tuple[[<ast.Name object at 0x7da1b0617d90>, <ast.Name object at 0x7da1b0615b70>, <ast.Slice object at 0x7da1b0616710>, <ast.Name object at 0x7da1b0617370>]]], name[Nmass], name[mass]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da1b06174f0>, <ast.Constant object at 0x7da1b0617b80>]]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da1b0614d00>, <ast.Name object at 0x7da1b0614100>, <ast.Name object at 0x7da1b0614910>, <ast.Name object at 0x7da1b0616f50>]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da1b0615600>, <ast.Constant object at 0x7da1b0616b90>]]] assign[=] call[name[ages]][name[i_a]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da1b06158d0>, <ast.Constant object at 0x7da1b0615a50>]]] assign[=] call[name[fehs]][name[i_f]]
call[name[vals]][constant[6]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da1b06162f0>, <ast.Name object at 0x7da1b0616950>, <ast.Name object at 0x7da1b0615d50>, <ast.Name object at 0x7da18f723850>]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da18f723f70>, <ast.Constant object at 0x7da18f721630>]]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da18f722d70>, <ast.Name object at 0x7da18f721690>, <ast.BinOp object at 0x7da18f7224d0>, <ast.Name object at 0x7da18f720460>]]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da18f723ac0>, <ast.Constant object at 0x7da18f723a30>]]] assign[=] call[name[ages]][name[i_a]]
call[name[pts]][tuple[[<ast.Constant object at 0x7da18f721120>, <ast.Constant object at 0x7da18f721990>]]] assign[=] call[name[fehs]][name[i_f]]
call[name[vals]][constant[7]] assign[=] call[name[grid]][tuple[[<ast.Name object at 0x7da18f7202b0>, <ast.Name object at 0x7da18f720550>, <ast.BinOp object at 0x7da18f723670>, <ast.Name object at 0x7da18f723d90>]]]
return[call[name[interp_box], parameter[name[mass], name[age], name[feh], name[pts], name[vals]]]] | keyword[def] identifier[interp_value] ( identifier[mass] , identifier[age] , identifier[feh] , identifier[icol] ,
identifier[grid] , identifier[mass_col] , identifier[ages] , identifier[fehs] , identifier[grid_Ns] ):
literal[string]
identifier[Nage] = identifier[len] ( identifier[ages] )
identifier[Nfeh] = identifier[len] ( identifier[fehs] )
identifier[ifeh] = identifier[searchsorted] ( identifier[fehs] , identifier[Nfeh] , identifier[feh] )
identifier[iage] = identifier[searchsorted] ( identifier[ages] , identifier[Nage] , identifier[age] )
identifier[pts] = identifier[np] . identifier[zeros] (( literal[int] , literal[int] ))
identifier[vals] = identifier[np] . identifier[zeros] ( literal[int] )
identifier[i_f] = identifier[ifeh] - literal[int]
identifier[i_a] = identifier[iage] - literal[int]
identifier[Nmass] = identifier[grid_Ns] [ identifier[i_f] , identifier[i_a] ]
identifier[imass] = identifier[searchsorted] ( identifier[grid] [ identifier[i_f] , identifier[i_a] ,:, identifier[mass_col] ], identifier[Nmass] , identifier[mass] )
identifier[pts] [ literal[int] , literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] , identifier[mass_col] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[ages] [ identifier[i_a] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[fehs] [ identifier[i_f] ]
identifier[vals] [ literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] , identifier[icol] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] - literal[int] , identifier[mass_col] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[ages] [ identifier[i_a] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[fehs] [ identifier[i_f] ]
identifier[vals] [ literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] - literal[int] , identifier[icol] ]
identifier[i_f] = identifier[ifeh] - literal[int]
identifier[i_a] = identifier[iage]
identifier[Nmass] = identifier[grid_Ns] [ identifier[i_f] , identifier[i_a] ]
identifier[imass] = identifier[searchsorted] ( identifier[grid] [ identifier[i_f] , identifier[i_a] ,:, identifier[mass_col] ], identifier[Nmass] , identifier[mass] )
identifier[pts] [ literal[int] , literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] , identifier[mass_col] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[ages] [ identifier[i_a] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[fehs] [ identifier[i_f] ]
identifier[vals] [ literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] , identifier[icol] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] - literal[int] , identifier[mass_col] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[ages] [ identifier[i_a] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[fehs] [ identifier[i_f] ]
identifier[vals] [ literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] - literal[int] , identifier[icol] ]
identifier[i_f] = identifier[ifeh]
identifier[i_a] = identifier[iage] - literal[int]
identifier[Nmass] = identifier[grid_Ns] [ identifier[i_f] , identifier[i_a] ]
identifier[imass] = identifier[searchsorted] ( identifier[grid] [ identifier[i_f] , identifier[i_a] ,:, identifier[mass_col] ], identifier[Nmass] , identifier[mass] )
identifier[pts] [ literal[int] , literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] , identifier[mass_col] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[ages] [ identifier[i_a] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[fehs] [ identifier[i_f] ]
identifier[vals] [ literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] , identifier[icol] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] - literal[int] , identifier[mass_col] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[ages] [ identifier[i_a] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[fehs] [ identifier[i_f] ]
identifier[vals] [ literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] - literal[int] , identifier[icol] ]
identifier[i_f] = identifier[ifeh]
identifier[i_a] = identifier[iage]
identifier[Nmass] = identifier[grid_Ns] [ identifier[i_f] , identifier[i_a] ]
identifier[imass] = identifier[searchsorted] ( identifier[grid] [ identifier[i_f] , identifier[i_a] ,:, identifier[mass_col] ], identifier[Nmass] , identifier[mass] )
identifier[pts] [ literal[int] , literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] , identifier[mass_col] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[ages] [ identifier[i_a] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[fehs] [ identifier[i_f] ]
identifier[vals] [ literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] , identifier[icol] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] - literal[int] , identifier[mass_col] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[ages] [ identifier[i_a] ]
identifier[pts] [ literal[int] , literal[int] ]= identifier[fehs] [ identifier[i_f] ]
identifier[vals] [ literal[int] ]= identifier[grid] [ identifier[i_f] , identifier[i_a] , identifier[imass] - literal[int] , identifier[icol] ]
keyword[return] identifier[interp_box] ( identifier[mass] , identifier[age] , identifier[feh] , identifier[pts] , identifier[vals] ) | def interp_value(mass, age, feh, icol, grid, mass_col, ages, fehs, grid_Ns):
# return_box):
'mass, age, feh are *single values* at which values are desired\n\n icol is the column index of desired value\n grid is nfeh x nage x max(nmass) x ncols array\n mass_col is the column index of mass\n ages is grid of ages\n fehs is grid of fehs\n grid_Ns keeps track of nmass in each slice (beyond this are nans)\n \n '
Nage = len(ages)
Nfeh = len(fehs)
ifeh = searchsorted(fehs, Nfeh, feh)
iage = searchsorted(ages, Nage, age)
pts = np.zeros((8, 3))
vals = np.zeros(8)
i_f = ifeh - 1
i_a = iage - 1
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[0, 0] = grid[i_f, i_a, imass, mass_col]
pts[0, 1] = ages[i_a]
pts[0, 2] = fehs[i_f]
vals[0] = grid[i_f, i_a, imass, icol]
pts[1, 0] = grid[i_f, i_a, imass - 1, mass_col]
pts[1, 1] = ages[i_a]
pts[1, 2] = fehs[i_f]
vals[1] = grid[i_f, i_a, imass - 1, icol]
i_f = ifeh - 1
i_a = iage
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[2, 0] = grid[i_f, i_a, imass, mass_col]
pts[2, 1] = ages[i_a]
pts[2, 2] = fehs[i_f]
vals[2] = grid[i_f, i_a, imass, icol]
pts[3, 0] = grid[i_f, i_a, imass - 1, mass_col]
pts[3, 1] = ages[i_a]
pts[3, 2] = fehs[i_f]
vals[3] = grid[i_f, i_a, imass - 1, icol]
i_f = ifeh
i_a = iage - 1
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[4, 0] = grid[i_f, i_a, imass, mass_col]
pts[4, 1] = ages[i_a]
pts[4, 2] = fehs[i_f]
vals[4] = grid[i_f, i_a, imass, icol]
pts[5, 0] = grid[i_f, i_a, imass - 1, mass_col]
pts[5, 1] = ages[i_a]
pts[5, 2] = fehs[i_f]
vals[5] = grid[i_f, i_a, imass - 1, icol]
i_f = ifeh
i_a = iage
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[6, 0] = grid[i_f, i_a, imass, mass_col]
pts[6, 1] = ages[i_a]
pts[6, 2] = fehs[i_f]
vals[6] = grid[i_f, i_a, imass, icol]
pts[7, 0] = grid[i_f, i_a, imass - 1, mass_col]
pts[7, 1] = ages[i_a]
pts[7, 2] = fehs[i_f]
vals[7] = grid[i_f, i_a, imass - 1, icol]
# if return_box:
# return pts, vals
# else:
return interp_box(mass, age, feh, pts, vals) |
def notify(self, msg):
"""Send a notification to all registered listeners.
msg : str
Message to send to each listener
"""
for listener in self.listeners:
self._send(listener, msg) | def function[notify, parameter[self, msg]]:
constant[Send a notification to all registered listeners.
msg : str
Message to send to each listener
]
for taget[name[listener]] in starred[name[self].listeners] begin[:]
call[name[self]._send, parameter[name[listener], name[msg]]] | keyword[def] identifier[notify] ( identifier[self] , identifier[msg] ):
literal[string]
keyword[for] identifier[listener] keyword[in] identifier[self] . identifier[listeners] :
identifier[self] . identifier[_send] ( identifier[listener] , identifier[msg] ) | def notify(self, msg):
"""Send a notification to all registered listeners.
msg : str
Message to send to each listener
"""
for listener in self.listeners:
self._send(listener, msg) # depends on [control=['for'], data=['listener']] |
def execute(self, env, args):
""" Creates a new task.
`env`
Runtime ``Environment`` instance.
`args`
Arguments object from arg parser.
"""
task_name = args.task_name
clone_task = args.clone_task
if not env.task.create(task_name, clone_task):
raise errors.FocusError(u'Could not create task "{0}"'
.format(task_name))
# open in task config in editor
if not args.skip_edit:
task_config = env.task.get_config_path(task_name)
if not _edit_task_config(env, task_config, confirm=True):
raise errors.FocusError(u'Could not open task config: {0}'
.format(task_config)) | def function[execute, parameter[self, env, args]]:
constant[ Creates a new task.
`env`
Runtime ``Environment`` instance.
`args`
Arguments object from arg parser.
]
variable[task_name] assign[=] name[args].task_name
variable[clone_task] assign[=] name[args].clone_task
if <ast.UnaryOp object at 0x7da1b13541c0> begin[:]
<ast.Raise object at 0x7da1b1357910>
if <ast.UnaryOp object at 0x7da1b1357640> begin[:]
variable[task_config] assign[=] call[name[env].task.get_config_path, parameter[name[task_name]]]
if <ast.UnaryOp object at 0x7da1b1354d00> begin[:]
<ast.Raise object at 0x7da1b13555a0> | keyword[def] identifier[execute] ( identifier[self] , identifier[env] , identifier[args] ):
literal[string]
identifier[task_name] = identifier[args] . identifier[task_name]
identifier[clone_task] = identifier[args] . identifier[clone_task]
keyword[if] keyword[not] identifier[env] . identifier[task] . identifier[create] ( identifier[task_name] , identifier[clone_task] ):
keyword[raise] identifier[errors] . identifier[FocusError] ( literal[string]
. identifier[format] ( identifier[task_name] ))
keyword[if] keyword[not] identifier[args] . identifier[skip_edit] :
identifier[task_config] = identifier[env] . identifier[task] . identifier[get_config_path] ( identifier[task_name] )
keyword[if] keyword[not] identifier[_edit_task_config] ( identifier[env] , identifier[task_config] , identifier[confirm] = keyword[True] ):
keyword[raise] identifier[errors] . identifier[FocusError] ( literal[string]
. identifier[format] ( identifier[task_config] )) | def execute(self, env, args):
""" Creates a new task.
`env`
Runtime ``Environment`` instance.
`args`
Arguments object from arg parser.
"""
task_name = args.task_name
clone_task = args.clone_task
if not env.task.create(task_name, clone_task):
raise errors.FocusError(u'Could not create task "{0}"'.format(task_name)) # depends on [control=['if'], data=[]]
# open in task config in editor
if not args.skip_edit:
task_config = env.task.get_config_path(task_name)
if not _edit_task_config(env, task_config, confirm=True):
raise errors.FocusError(u'Could not open task config: {0}'.format(task_config)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def _establish_authenticated_session(self, kik_node):
"""
Updates the kik node and creates a new connection to kik servers.
This new connection will be initiated with another payload which proves
we have the credentials for a specific user. This is how authentication is done.
:param kik_node: The user's kik node (everything before '@' in JID).
"""
self.kik_node = kik_node
log.info("[+] Closing current connection and creating a new authenticated one.")
self.disconnect()
self._connect() | def function[_establish_authenticated_session, parameter[self, kik_node]]:
constant[
Updates the kik node and creates a new connection to kik servers.
This new connection will be initiated with another payload which proves
we have the credentials for a specific user. This is how authentication is done.
:param kik_node: The user's kik node (everything before '@' in JID).
]
name[self].kik_node assign[=] name[kik_node]
call[name[log].info, parameter[constant[[+] Closing current connection and creating a new authenticated one.]]]
call[name[self].disconnect, parameter[]]
call[name[self]._connect, parameter[]] | keyword[def] identifier[_establish_authenticated_session] ( identifier[self] , identifier[kik_node] ):
literal[string]
identifier[self] . identifier[kik_node] = identifier[kik_node]
identifier[log] . identifier[info] ( literal[string] )
identifier[self] . identifier[disconnect] ()
identifier[self] . identifier[_connect] () | def _establish_authenticated_session(self, kik_node):
"""
Updates the kik node and creates a new connection to kik servers.
This new connection will be initiated with another payload which proves
we have the credentials for a specific user. This is how authentication is done.
:param kik_node: The user's kik node (everything before '@' in JID).
"""
self.kik_node = kik_node
log.info('[+] Closing current connection and creating a new authenticated one.')
self.disconnect()
self._connect() |
def prepare_query(query):
"""
Prepare a query object for the RAPI.
RAPI has lots of curious rules for coercing values.
This function operates on dicts in-place and has no return value.
@type query: dict
@param query: Query arguments
"""
for name in query:
value = query[name]
# None is sent as an empty string.
if value is None:
query[name] = ""
# Booleans are sent as 0 or 1.
elif isinstance(value, bool):
query[name] = int(value)
# XXX shouldn't this just check for basestring instead?
elif isinstance(value, dict):
raise ValueError("Invalid query data type %r" %
type(value).__name__) | def function[prepare_query, parameter[query]]:
constant[
Prepare a query object for the RAPI.
RAPI has lots of curious rules for coercing values.
This function operates on dicts in-place and has no return value.
@type query: dict
@param query: Query arguments
]
for taget[name[name]] in starred[name[query]] begin[:]
variable[value] assign[=] call[name[query]][name[name]]
if compare[name[value] is constant[None]] begin[:]
call[name[query]][name[name]] assign[=] constant[] | keyword[def] identifier[prepare_query] ( identifier[query] ):
literal[string]
keyword[for] identifier[name] keyword[in] identifier[query] :
identifier[value] = identifier[query] [ identifier[name] ]
keyword[if] identifier[value] keyword[is] keyword[None] :
identifier[query] [ identifier[name] ]= literal[string]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[bool] ):
identifier[query] [ identifier[name] ]= identifier[int] ( identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[dict] ):
keyword[raise] identifier[ValueError] ( literal[string] %
identifier[type] ( identifier[value] ). identifier[__name__] ) | def prepare_query(query):
"""
Prepare a query object for the RAPI.
RAPI has lots of curious rules for coercing values.
This function operates on dicts in-place and has no return value.
@type query: dict
@param query: Query arguments
"""
for name in query:
value = query[name]
# None is sent as an empty string.
if value is None:
query[name] = '' # depends on [control=['if'], data=[]]
# Booleans are sent as 0 or 1.
elif isinstance(value, bool):
query[name] = int(value) # depends on [control=['if'], data=[]]
# XXX shouldn't this just check for basestring instead?
elif isinstance(value, dict):
raise ValueError('Invalid query data type %r' % type(value).__name__) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']] |
def _try_to_clean_garbage(self, writer_spec, exclude_list=()):
"""Tries to remove any files created by this shard that aren't needed.
Args:
writer_spec: writer_spec for the MR.
exclude_list: A list of filenames (strings) that should not be
removed.
"""
# Try to remove garbage (if any). Note that listbucket is not strongly
# consistent so something might survive.
tmpl = string.Template(self._TMPFILE_PREFIX)
prefix = tmpl.substitute(
id=self.status.mapreduce_id, shard=self.status.shard)
bucket = self._get_tmp_gcs_bucket(writer_spec)
account_id = self._get_tmp_account_id(writer_spec)
for f in cloudstorage.listbucket("/%s/%s" % (bucket, prefix),
_account_id=account_id):
if f.filename not in exclude_list:
self._remove_tmpfile(f.filename, self.status.writer_spec) | def function[_try_to_clean_garbage, parameter[self, writer_spec, exclude_list]]:
constant[Tries to remove any files created by this shard that aren't needed.
Args:
writer_spec: writer_spec for the MR.
exclude_list: A list of filenames (strings) that should not be
removed.
]
variable[tmpl] assign[=] call[name[string].Template, parameter[name[self]._TMPFILE_PREFIX]]
variable[prefix] assign[=] call[name[tmpl].substitute, parameter[]]
variable[bucket] assign[=] call[name[self]._get_tmp_gcs_bucket, parameter[name[writer_spec]]]
variable[account_id] assign[=] call[name[self]._get_tmp_account_id, parameter[name[writer_spec]]]
for taget[name[f]] in starred[call[name[cloudstorage].listbucket, parameter[binary_operation[constant[/%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6c7a00>, <ast.Name object at 0x7da20c6c4f70>]]]]]] begin[:]
if compare[name[f].filename <ast.NotIn object at 0x7da2590d7190> name[exclude_list]] begin[:]
call[name[self]._remove_tmpfile, parameter[name[f].filename, name[self].status.writer_spec]] | keyword[def] identifier[_try_to_clean_garbage] ( identifier[self] , identifier[writer_spec] , identifier[exclude_list] =()):
literal[string]
identifier[tmpl] = identifier[string] . identifier[Template] ( identifier[self] . identifier[_TMPFILE_PREFIX] )
identifier[prefix] = identifier[tmpl] . identifier[substitute] (
identifier[id] = identifier[self] . identifier[status] . identifier[mapreduce_id] , identifier[shard] = identifier[self] . identifier[status] . identifier[shard] )
identifier[bucket] = identifier[self] . identifier[_get_tmp_gcs_bucket] ( identifier[writer_spec] )
identifier[account_id] = identifier[self] . identifier[_get_tmp_account_id] ( identifier[writer_spec] )
keyword[for] identifier[f] keyword[in] identifier[cloudstorage] . identifier[listbucket] ( literal[string] %( identifier[bucket] , identifier[prefix] ),
identifier[_account_id] = identifier[account_id] ):
keyword[if] identifier[f] . identifier[filename] keyword[not] keyword[in] identifier[exclude_list] :
identifier[self] . identifier[_remove_tmpfile] ( identifier[f] . identifier[filename] , identifier[self] . identifier[status] . identifier[writer_spec] ) | def _try_to_clean_garbage(self, writer_spec, exclude_list=()):
"""Tries to remove any files created by this shard that aren't needed.
Args:
writer_spec: writer_spec for the MR.
exclude_list: A list of filenames (strings) that should not be
removed.
"""
# Try to remove garbage (if any). Note that listbucket is not strongly
# consistent so something might survive.
tmpl = string.Template(self._TMPFILE_PREFIX)
prefix = tmpl.substitute(id=self.status.mapreduce_id, shard=self.status.shard)
bucket = self._get_tmp_gcs_bucket(writer_spec)
account_id = self._get_tmp_account_id(writer_spec)
for f in cloudstorage.listbucket('/%s/%s' % (bucket, prefix), _account_id=account_id):
if f.filename not in exclude_list:
self._remove_tmpfile(f.filename, self.status.writer_spec) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']] |
def get_url_directory_string(url):
"""
Determines the url's directory string.
:param str url: the url to extract the directory string from
:return str: the directory string on the server
"""
domain = UrlExtractor.get_allowed_domain(url)
splitted_url = url.split('/')
# the following commented list comprehension could replace
# the following for, if not and break statement
# index = [index for index in range(len(splitted_url))
# if not re.search(domain, splitted_url[index]) is None][0]
for index in range(len(splitted_url)):
if not re.search(domain, splitted_url[index]) is None:
if splitted_url[-1] is "":
splitted_url = splitted_url[index + 1:-2]
else:
splitted_url = splitted_url[index + 1:-1]
break
return '_'.join(splitted_url) | def function[get_url_directory_string, parameter[url]]:
constant[
Determines the url's directory string.
:param str url: the url to extract the directory string from
:return str: the directory string on the server
]
variable[domain] assign[=] call[name[UrlExtractor].get_allowed_domain, parameter[name[url]]]
variable[splitted_url] assign[=] call[name[url].split, parameter[constant[/]]]
for taget[name[index]] in starred[call[name[range], parameter[call[name[len], parameter[name[splitted_url]]]]]] begin[:]
if <ast.UnaryOp object at 0x7da2054a55a0> begin[:]
if compare[call[name[splitted_url]][<ast.UnaryOp object at 0x7da18c4cf400>] is constant[]] begin[:]
variable[splitted_url] assign[=] call[name[splitted_url]][<ast.Slice object at 0x7da18c4ceaa0>]
break
return[call[constant[_].join, parameter[name[splitted_url]]]] | keyword[def] identifier[get_url_directory_string] ( identifier[url] ):
literal[string]
identifier[domain] = identifier[UrlExtractor] . identifier[get_allowed_domain] ( identifier[url] )
identifier[splitted_url] = identifier[url] . identifier[split] ( literal[string] )
keyword[for] identifier[index] keyword[in] identifier[range] ( identifier[len] ( identifier[splitted_url] )):
keyword[if] keyword[not] identifier[re] . identifier[search] ( identifier[domain] , identifier[splitted_url] [ identifier[index] ]) keyword[is] keyword[None] :
keyword[if] identifier[splitted_url] [- literal[int] ] keyword[is] literal[string] :
identifier[splitted_url] = identifier[splitted_url] [ identifier[index] + literal[int] :- literal[int] ]
keyword[else] :
identifier[splitted_url] = identifier[splitted_url] [ identifier[index] + literal[int] :- literal[int] ]
keyword[break]
keyword[return] literal[string] . identifier[join] ( identifier[splitted_url] ) | def get_url_directory_string(url):
"""
Determines the url's directory string.
:param str url: the url to extract the directory string from
:return str: the directory string on the server
"""
domain = UrlExtractor.get_allowed_domain(url)
splitted_url = url.split('/')
# the following commented list comprehension could replace
# the following for, if not and break statement
# index = [index for index in range(len(splitted_url))
# if not re.search(domain, splitted_url[index]) is None][0]
for index in range(len(splitted_url)):
if not re.search(domain, splitted_url[index]) is None:
if splitted_url[-1] is '':
splitted_url = splitted_url[index + 1:-2] # depends on [control=['if'], data=[]]
else:
splitted_url = splitted_url[index + 1:-1]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']]
return '_'.join(splitted_url) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.