code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def close_trackbacks(self, request, queryset):
"""
Close the trackbacks for selected entries.
"""
queryset.update(trackback_enabled=False)
self.message_user(
request, _('Trackbacks are now closed for selected entries.')) | def function[close_trackbacks, parameter[self, request, queryset]]:
constant[
Close the trackbacks for selected entries.
]
call[name[queryset].update, parameter[]]
call[name[self].message_user, parameter[name[request], call[name[_], parameter[constant[Trackbacks are now closed for selected entries.]]]]] | keyword[def] identifier[close_trackbacks] ( identifier[self] , identifier[request] , identifier[queryset] ):
literal[string]
identifier[queryset] . identifier[update] ( identifier[trackback_enabled] = keyword[False] )
identifier[self] . identifier[message_user] (
identifier[request] , identifier[_] ( literal[string] )) | def close_trackbacks(self, request, queryset):
"""
Close the trackbacks for selected entries.
"""
queryset.update(trackback_enabled=False)
self.message_user(request, _('Trackbacks are now closed for selected entries.')) |
def info_gain_nominal(x, y, separate_max):
"""
Function calculates information gain for discrete features. If feature is continuous it is firstly discretized.
x: numpy array - numerical or discrete feature
y: numpy array - labels
ft: string - feature type ("c" - continuous, "d" - discrete)
split_fun: function - function for discretization of numerical features
"""
x_vals = np.unique(x) # unique values
if len(x_vals) < 3: # if there is just one unique value
return None
y_dist = Counter(y) # label distribution
h_y = h(y_dist.values()) # class entropy
# calculate distributions and splits in accordance with feature type
dist, splits = nominal_splits(x, y, x_vals, y_dist, separate_max)
indices, repeat = (range(1, len(dist)), 1) if len(dist) < 50 else (range(1, len(dist), len(dist) / 10), 3)
interval = len(dist) / 10
max_ig, max_i, iteration = 0, 1, 0
while iteration < repeat:
for i in indices:
dist0 = np.sum([el for el in dist[:i]]) # iter 0: take first distribution
dist1 = np.sum([el for el in dist[i:]]) # iter 0: take the other distributions without first
coef = np.true_divide([np.sum(dist0.values()), np.sum(dist1.values())], len(y))
ig = h_y - np.dot(coef, [h(dist0.values()), h(dist1.values())]) # calculate information gain
if ig > max_ig:
max_ig, max_i = ig, i # store index and value of maximal information gain
iteration += 1
if repeat > 1:
interval = int(interval * 0.5)
if max_i in indices and interval > 0:
middle_index = indices.index(max_i)
else:
break
min_index = middle_index if middle_index == 0 else middle_index - 1
max_index = middle_index if middle_index == len(indices) - 1 else middle_index + 1
indices = range(indices[min_index], indices[max_index], interval)
# store splits of maximal information gain in accordance with feature type
return float(max_ig), [splits[:max_i], splits[max_i:]] | def function[info_gain_nominal, parameter[x, y, separate_max]]:
constant[
Function calculates information gain for discrete features. If feature is continuous it is firstly discretized.
x: numpy array - numerical or discrete feature
y: numpy array - labels
ft: string - feature type ("c" - continuous, "d" - discrete)
split_fun: function - function for discretization of numerical features
]
variable[x_vals] assign[=] call[name[np].unique, parameter[name[x]]]
if compare[call[name[len], parameter[name[x_vals]]] less[<] constant[3]] begin[:]
return[constant[None]]
variable[y_dist] assign[=] call[name[Counter], parameter[name[y]]]
variable[h_y] assign[=] call[name[h], parameter[call[name[y_dist].values, parameter[]]]]
<ast.Tuple object at 0x7da204347400> assign[=] call[name[nominal_splits], parameter[name[x], name[y], name[x_vals], name[y_dist], name[separate_max]]]
<ast.Tuple object at 0x7da2043444f0> assign[=] <ast.IfExp object at 0x7da204345de0>
variable[interval] assign[=] binary_operation[call[name[len], parameter[name[dist]]] / constant[10]]
<ast.Tuple object at 0x7da204346770> assign[=] tuple[[<ast.Constant object at 0x7da204345450>, <ast.Constant object at 0x7da2043474c0>, <ast.Constant object at 0x7da204346740>]]
while compare[name[iteration] less[<] name[repeat]] begin[:]
for taget[name[i]] in starred[name[indices]] begin[:]
variable[dist0] assign[=] call[name[np].sum, parameter[<ast.ListComp object at 0x7da204344bb0>]]
variable[dist1] assign[=] call[name[np].sum, parameter[<ast.ListComp object at 0x7da204347a30>]]
variable[coef] assign[=] call[name[np].true_divide, parameter[list[[<ast.Call object at 0x7da204347070>, <ast.Call object at 0x7da204346110>]], call[name[len], parameter[name[y]]]]]
variable[ig] assign[=] binary_operation[name[h_y] - call[name[np].dot, parameter[name[coef], list[[<ast.Call object at 0x7da2043458a0>, <ast.Call object at 0x7da204345c30>]]]]]
if compare[name[ig] greater[>] name[max_ig]] begin[:]
<ast.Tuple object at 0x7da204344a30> assign[=] tuple[[<ast.Name object at 0x7da204346860>, <ast.Name object at 0x7da2043455a0>]]
<ast.AugAssign object at 0x7da2043460e0>
if compare[name[repeat] greater[>] constant[1]] begin[:]
variable[interval] assign[=] call[name[int], parameter[binary_operation[name[interval] * constant[0.5]]]]
if <ast.BoolOp object at 0x7da2043456c0> begin[:]
variable[middle_index] assign[=] call[name[indices].index, parameter[name[max_i]]]
variable[min_index] assign[=] <ast.IfExp object at 0x7da204347d90>
variable[max_index] assign[=] <ast.IfExp object at 0x7da204347bb0>
variable[indices] assign[=] call[name[range], parameter[call[name[indices]][name[min_index]], call[name[indices]][name[max_index]], name[interval]]]
return[tuple[[<ast.Call object at 0x7da204345510>, <ast.List object at 0x7da204345990>]]] | keyword[def] identifier[info_gain_nominal] ( identifier[x] , identifier[y] , identifier[separate_max] ):
literal[string]
identifier[x_vals] = identifier[np] . identifier[unique] ( identifier[x] )
keyword[if] identifier[len] ( identifier[x_vals] )< literal[int] :
keyword[return] keyword[None]
identifier[y_dist] = identifier[Counter] ( identifier[y] )
identifier[h_y] = identifier[h] ( identifier[y_dist] . identifier[values] ())
identifier[dist] , identifier[splits] = identifier[nominal_splits] ( identifier[x] , identifier[y] , identifier[x_vals] , identifier[y_dist] , identifier[separate_max] )
identifier[indices] , identifier[repeat] =( identifier[range] ( literal[int] , identifier[len] ( identifier[dist] )), literal[int] ) keyword[if] identifier[len] ( identifier[dist] )< literal[int] keyword[else] ( identifier[range] ( literal[int] , identifier[len] ( identifier[dist] ), identifier[len] ( identifier[dist] )/ literal[int] ), literal[int] )
identifier[interval] = identifier[len] ( identifier[dist] )/ literal[int]
identifier[max_ig] , identifier[max_i] , identifier[iteration] = literal[int] , literal[int] , literal[int]
keyword[while] identifier[iteration] < identifier[repeat] :
keyword[for] identifier[i] keyword[in] identifier[indices] :
identifier[dist0] = identifier[np] . identifier[sum] ([ identifier[el] keyword[for] identifier[el] keyword[in] identifier[dist] [: identifier[i] ]])
identifier[dist1] = identifier[np] . identifier[sum] ([ identifier[el] keyword[for] identifier[el] keyword[in] identifier[dist] [ identifier[i] :]])
identifier[coef] = identifier[np] . identifier[true_divide] ([ identifier[np] . identifier[sum] ( identifier[dist0] . identifier[values] ()), identifier[np] . identifier[sum] ( identifier[dist1] . identifier[values] ())], identifier[len] ( identifier[y] ))
identifier[ig] = identifier[h_y] - identifier[np] . identifier[dot] ( identifier[coef] ,[ identifier[h] ( identifier[dist0] . identifier[values] ()), identifier[h] ( identifier[dist1] . identifier[values] ())])
keyword[if] identifier[ig] > identifier[max_ig] :
identifier[max_ig] , identifier[max_i] = identifier[ig] , identifier[i]
identifier[iteration] += literal[int]
keyword[if] identifier[repeat] > literal[int] :
identifier[interval] = identifier[int] ( identifier[interval] * literal[int] )
keyword[if] identifier[max_i] keyword[in] identifier[indices] keyword[and] identifier[interval] > literal[int] :
identifier[middle_index] = identifier[indices] . identifier[index] ( identifier[max_i] )
keyword[else] :
keyword[break]
identifier[min_index] = identifier[middle_index] keyword[if] identifier[middle_index] == literal[int] keyword[else] identifier[middle_index] - literal[int]
identifier[max_index] = identifier[middle_index] keyword[if] identifier[middle_index] == identifier[len] ( identifier[indices] )- literal[int] keyword[else] identifier[middle_index] + literal[int]
identifier[indices] = identifier[range] ( identifier[indices] [ identifier[min_index] ], identifier[indices] [ identifier[max_index] ], identifier[interval] )
keyword[return] identifier[float] ( identifier[max_ig] ),[ identifier[splits] [: identifier[max_i] ], identifier[splits] [ identifier[max_i] :]] | def info_gain_nominal(x, y, separate_max):
"""
Function calculates information gain for discrete features. If feature is continuous it is firstly discretized.
x: numpy array - numerical or discrete feature
y: numpy array - labels
ft: string - feature type ("c" - continuous, "d" - discrete)
split_fun: function - function for discretization of numerical features
"""
x_vals = np.unique(x) # unique values
if len(x_vals) < 3: # if there is just one unique value
return None # depends on [control=['if'], data=[]]
y_dist = Counter(y) # label distribution
h_y = h(y_dist.values()) # class entropy
# calculate distributions and splits in accordance with feature type
(dist, splits) = nominal_splits(x, y, x_vals, y_dist, separate_max)
(indices, repeat) = (range(1, len(dist)), 1) if len(dist) < 50 else (range(1, len(dist), len(dist) / 10), 3)
interval = len(dist) / 10
(max_ig, max_i, iteration) = (0, 1, 0)
while iteration < repeat:
for i in indices:
dist0 = np.sum([el for el in dist[:i]]) # iter 0: take first distribution
dist1 = np.sum([el for el in dist[i:]]) # iter 0: take the other distributions without first
coef = np.true_divide([np.sum(dist0.values()), np.sum(dist1.values())], len(y))
ig = h_y - np.dot(coef, [h(dist0.values()), h(dist1.values())]) # calculate information gain
if ig > max_ig:
(max_ig, max_i) = (ig, i) # store index and value of maximal information gain # depends on [control=['if'], data=['ig', 'max_ig']] # depends on [control=['for'], data=['i']]
iteration += 1
if repeat > 1:
interval = int(interval * 0.5)
if max_i in indices and interval > 0:
middle_index = indices.index(max_i) # depends on [control=['if'], data=[]]
else:
break
min_index = middle_index if middle_index == 0 else middle_index - 1
max_index = middle_index if middle_index == len(indices) - 1 else middle_index + 1
indices = range(indices[min_index], indices[max_index], interval) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['iteration', 'repeat']]
# store splits of maximal information gain in accordance with feature type
return (float(max_ig), [splits[:max_i], splits[max_i:]]) |
def _extract_columns(self, table_name):
''' a method to extract the column properties of an existing table '''
import re
from sqlalchemy import MetaData, VARCHAR, INTEGER, BLOB, BOOLEAN, FLOAT
from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, BIT, BYTEA
# retrieve list of tables
metadata_object = MetaData()
table_list = self.engine.table_names()
# determine columns
prior_columns = {}
if table_name in table_list:
metadata_object.reflect(self.engine)
existing_table = metadata_object.tables[table_name]
for column in existing_table.columns:
column_type = None
column_length = None
if column.type.__class__ == FLOAT().__class__:
column_type = 'float'
elif column.type.__class__ == DOUBLE_PRECISION().__class__: # Postgres
column_type = 'float'
elif column.type.__class__ == INTEGER().__class__:
column_type = 'integer'
elif column.type.__class__ == VARCHAR().__class__:
column_length = getattr(column.type, 'length', None)
if column_length == 1:
if column.primary_key:
column_length = None
column_type = 'string'
elif column.type.__class__ == BLOB().__class__:
column_type = 'list'
elif column.type.__class__ in (BIT().__class__, BYTEA().__class__):
column_type = 'list'
elif column.type.__class__ == BOOLEAN().__class__:
column_type = 'boolean'
prior_columns[column.key] = (column.key, column_type, '', column_length)
return prior_columns | def function[_extract_columns, parameter[self, table_name]]:
constant[ a method to extract the column properties of an existing table ]
import module[re]
from relative_module[sqlalchemy] import module[MetaData], module[VARCHAR], module[INTEGER], module[BLOB], module[BOOLEAN], module[FLOAT]
from relative_module[sqlalchemy.dialects.postgresql] import module[DOUBLE_PRECISION], module[BIT], module[BYTEA]
variable[metadata_object] assign[=] call[name[MetaData], parameter[]]
variable[table_list] assign[=] call[name[self].engine.table_names, parameter[]]
variable[prior_columns] assign[=] dictionary[[], []]
if compare[name[table_name] in name[table_list]] begin[:]
call[name[metadata_object].reflect, parameter[name[self].engine]]
variable[existing_table] assign[=] call[name[metadata_object].tables][name[table_name]]
for taget[name[column]] in starred[name[existing_table].columns] begin[:]
variable[column_type] assign[=] constant[None]
variable[column_length] assign[=] constant[None]
if compare[name[column].type.__class__ equal[==] call[name[FLOAT], parameter[]].__class__] begin[:]
variable[column_type] assign[=] constant[float]
call[name[prior_columns]][name[column].key] assign[=] tuple[[<ast.Attribute object at 0x7da20e9557b0>, <ast.Name object at 0x7da20e957ee0>, <ast.Constant object at 0x7da20e956230>, <ast.Name object at 0x7da20e957be0>]]
return[name[prior_columns]] | keyword[def] identifier[_extract_columns] ( identifier[self] , identifier[table_name] ):
literal[string]
keyword[import] identifier[re]
keyword[from] identifier[sqlalchemy] keyword[import] identifier[MetaData] , identifier[VARCHAR] , identifier[INTEGER] , identifier[BLOB] , identifier[BOOLEAN] , identifier[FLOAT]
keyword[from] identifier[sqlalchemy] . identifier[dialects] . identifier[postgresql] keyword[import] identifier[DOUBLE_PRECISION] , identifier[BIT] , identifier[BYTEA]
identifier[metadata_object] = identifier[MetaData] ()
identifier[table_list] = identifier[self] . identifier[engine] . identifier[table_names] ()
identifier[prior_columns] ={}
keyword[if] identifier[table_name] keyword[in] identifier[table_list] :
identifier[metadata_object] . identifier[reflect] ( identifier[self] . identifier[engine] )
identifier[existing_table] = identifier[metadata_object] . identifier[tables] [ identifier[table_name] ]
keyword[for] identifier[column] keyword[in] identifier[existing_table] . identifier[columns] :
identifier[column_type] = keyword[None]
identifier[column_length] = keyword[None]
keyword[if] identifier[column] . identifier[type] . identifier[__class__] == identifier[FLOAT] (). identifier[__class__] :
identifier[column_type] = literal[string]
keyword[elif] identifier[column] . identifier[type] . identifier[__class__] == identifier[DOUBLE_PRECISION] (). identifier[__class__] :
identifier[column_type] = literal[string]
keyword[elif] identifier[column] . identifier[type] . identifier[__class__] == identifier[INTEGER] (). identifier[__class__] :
identifier[column_type] = literal[string]
keyword[elif] identifier[column] . identifier[type] . identifier[__class__] == identifier[VARCHAR] (). identifier[__class__] :
identifier[column_length] = identifier[getattr] ( identifier[column] . identifier[type] , literal[string] , keyword[None] )
keyword[if] identifier[column_length] == literal[int] :
keyword[if] identifier[column] . identifier[primary_key] :
identifier[column_length] = keyword[None]
identifier[column_type] = literal[string]
keyword[elif] identifier[column] . identifier[type] . identifier[__class__] == identifier[BLOB] (). identifier[__class__] :
identifier[column_type] = literal[string]
keyword[elif] identifier[column] . identifier[type] . identifier[__class__] keyword[in] ( identifier[BIT] (). identifier[__class__] , identifier[BYTEA] (). identifier[__class__] ):
identifier[column_type] = literal[string]
keyword[elif] identifier[column] . identifier[type] . identifier[__class__] == identifier[BOOLEAN] (). identifier[__class__] :
identifier[column_type] = literal[string]
identifier[prior_columns] [ identifier[column] . identifier[key] ]=( identifier[column] . identifier[key] , identifier[column_type] , literal[string] , identifier[column_length] )
keyword[return] identifier[prior_columns] | def _extract_columns(self, table_name):
""" a method to extract the column properties of an existing table """
import re
from sqlalchemy import MetaData, VARCHAR, INTEGER, BLOB, BOOLEAN, FLOAT
from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, BIT, BYTEA
# retrieve list of tables
metadata_object = MetaData()
table_list = self.engine.table_names()
# determine columns
prior_columns = {}
if table_name in table_list:
metadata_object.reflect(self.engine)
existing_table = metadata_object.tables[table_name]
for column in existing_table.columns:
column_type = None
column_length = None
if column.type.__class__ == FLOAT().__class__:
column_type = 'float' # depends on [control=['if'], data=[]]
elif column.type.__class__ == DOUBLE_PRECISION().__class__: # Postgres
column_type = 'float' # depends on [control=['if'], data=[]]
elif column.type.__class__ == INTEGER().__class__:
column_type = 'integer' # depends on [control=['if'], data=[]]
elif column.type.__class__ == VARCHAR().__class__:
column_length = getattr(column.type, 'length', None)
if column_length == 1:
if column.primary_key:
column_length = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['column_length']]
column_type = 'string' # depends on [control=['if'], data=[]]
elif column.type.__class__ == BLOB().__class__:
column_type = 'list' # depends on [control=['if'], data=[]]
elif column.type.__class__ in (BIT().__class__, BYTEA().__class__):
column_type = 'list' # depends on [control=['if'], data=[]]
elif column.type.__class__ == BOOLEAN().__class__:
column_type = 'boolean' # depends on [control=['if'], data=[]]
prior_columns[column.key] = (column.key, column_type, '', column_length) # depends on [control=['for'], data=['column']] # depends on [control=['if'], data=['table_name']]
return prior_columns |
def get_reservation_ports(session, reservation_id, model_name='Generic Traffic Generator Port'):
""" Get all Generic Traffic Generator Port in reservation.
:return: list of all Generic Traffic Generator Port resource objects in reservation
"""
reservation_ports = []
reservation = session.GetReservationDetails(reservation_id).ReservationDescription
for resource in reservation.Resources:
if resource.ResourceModelName == model_name:
reservation_ports.append(resource)
return reservation_ports | def function[get_reservation_ports, parameter[session, reservation_id, model_name]]:
constant[ Get all Generic Traffic Generator Port in reservation.
:return: list of all Generic Traffic Generator Port resource objects in reservation
]
variable[reservation_ports] assign[=] list[[]]
variable[reservation] assign[=] call[name[session].GetReservationDetails, parameter[name[reservation_id]]].ReservationDescription
for taget[name[resource]] in starred[name[reservation].Resources] begin[:]
if compare[name[resource].ResourceModelName equal[==] name[model_name]] begin[:]
call[name[reservation_ports].append, parameter[name[resource]]]
return[name[reservation_ports]] | keyword[def] identifier[get_reservation_ports] ( identifier[session] , identifier[reservation_id] , identifier[model_name] = literal[string] ):
literal[string]
identifier[reservation_ports] =[]
identifier[reservation] = identifier[session] . identifier[GetReservationDetails] ( identifier[reservation_id] ). identifier[ReservationDescription]
keyword[for] identifier[resource] keyword[in] identifier[reservation] . identifier[Resources] :
keyword[if] identifier[resource] . identifier[ResourceModelName] == identifier[model_name] :
identifier[reservation_ports] . identifier[append] ( identifier[resource] )
keyword[return] identifier[reservation_ports] | def get_reservation_ports(session, reservation_id, model_name='Generic Traffic Generator Port'):
""" Get all Generic Traffic Generator Port in reservation.
:return: list of all Generic Traffic Generator Port resource objects in reservation
"""
reservation_ports = []
reservation = session.GetReservationDetails(reservation_id).ReservationDescription
for resource in reservation.Resources:
if resource.ResourceModelName == model_name:
reservation_ports.append(resource) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['resource']]
return reservation_ports |
def as_dict(self):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol} | def function[as_dict, parameter[self]]:
constant[
Makes Element obey the general json interface used in pymatgen for
easier serialization.
]
return[dictionary[[<ast.Constant object at 0x7da18ede7310>, <ast.Constant object at 0x7da18ede7f40>, <ast.Constant object at 0x7da18ede60e0>], [<ast.Attribute object at 0x7da18ede5ff0>, <ast.Attribute object at 0x7da2041da4a0>, <ast.Attribute object at 0x7da2041d8940>]]] | keyword[def] identifier[as_dict] ( identifier[self] ):
literal[string]
keyword[return] { literal[string] : identifier[self] . identifier[__class__] . identifier[__module__] ,
literal[string] : identifier[self] . identifier[__class__] . identifier[__name__] ,
literal[string] : identifier[self] . identifier[symbol] } | def as_dict(self):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return {'@module': self.__class__.__module__, '@class': self.__class__.__name__, 'element': self.symbol} |
def _setToDefaults(self):
""" Load the default parameter settings into the GUI. """
# Create an empty object, where every item is set to it's default value
try:
tmpObj = cfgpars.ConfigObjPars(self._taskParsObj.filename,
associatedPkg=\
self._taskParsObj.getAssocPkg(),
setAllToDefaults=self.taskName,
strict=False)
except Exception as ex:
msg = "Error Determining Defaults"
showerror(message=msg+'\n\n'+ex.message, title="Error Determining Defaults")
return
# Set the GUI entries to these values (let the user Save after)
tmpObj.filename = self._taskParsObj.filename = '' # name it later
newParList = tmpObj.getParList()
try:
self.setAllEntriesFromParList(newParList) # needn't updateModel yet
self.checkAllTriggers('defaults')
self.updateTitle('')
self.showStatus("Loaded default "+self.taskName+" values via: "+ \
os.path.basename(tmpObj._original_configspec), keep=1)
except editpar.UnfoundParamError as pe:
showerror(message=str(pe), title="Error Setting to Default Values") | def function[_setToDefaults, parameter[self]]:
constant[ Load the default parameter settings into the GUI. ]
<ast.Try object at 0x7da1b0e830d0>
name[tmpObj].filename assign[=] constant[]
variable[newParList] assign[=] call[name[tmpObj].getParList, parameter[]]
<ast.Try object at 0x7da1b0e48b20> | keyword[def] identifier[_setToDefaults] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[tmpObj] = identifier[cfgpars] . identifier[ConfigObjPars] ( identifier[self] . identifier[_taskParsObj] . identifier[filename] ,
identifier[associatedPkg] = identifier[self] . identifier[_taskParsObj] . identifier[getAssocPkg] (),
identifier[setAllToDefaults] = identifier[self] . identifier[taskName] ,
identifier[strict] = keyword[False] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[msg] = literal[string]
identifier[showerror] ( identifier[message] = identifier[msg] + literal[string] + identifier[ex] . identifier[message] , identifier[title] = literal[string] )
keyword[return]
identifier[tmpObj] . identifier[filename] = identifier[self] . identifier[_taskParsObj] . identifier[filename] = literal[string]
identifier[newParList] = identifier[tmpObj] . identifier[getParList] ()
keyword[try] :
identifier[self] . identifier[setAllEntriesFromParList] ( identifier[newParList] )
identifier[self] . identifier[checkAllTriggers] ( literal[string] )
identifier[self] . identifier[updateTitle] ( literal[string] )
identifier[self] . identifier[showStatus] ( literal[string] + identifier[self] . identifier[taskName] + literal[string] + identifier[os] . identifier[path] . identifier[basename] ( identifier[tmpObj] . identifier[_original_configspec] ), identifier[keep] = literal[int] )
keyword[except] identifier[editpar] . identifier[UnfoundParamError] keyword[as] identifier[pe] :
identifier[showerror] ( identifier[message] = identifier[str] ( identifier[pe] ), identifier[title] = literal[string] ) | def _setToDefaults(self):
""" Load the default parameter settings into the GUI. """
# Create an empty object, where every item is set to it's default value
try:
tmpObj = cfgpars.ConfigObjPars(self._taskParsObj.filename, associatedPkg=self._taskParsObj.getAssocPkg(), setAllToDefaults=self.taskName, strict=False) # depends on [control=['try'], data=[]]
except Exception as ex:
msg = 'Error Determining Defaults'
showerror(message=msg + '\n\n' + ex.message, title='Error Determining Defaults')
return # depends on [control=['except'], data=['ex']]
# Set the GUI entries to these values (let the user Save after)
tmpObj.filename = self._taskParsObj.filename = '' # name it later
newParList = tmpObj.getParList()
try:
self.setAllEntriesFromParList(newParList) # needn't updateModel yet
self.checkAllTriggers('defaults')
self.updateTitle('')
self.showStatus('Loaded default ' + self.taskName + ' values via: ' + os.path.basename(tmpObj._original_configspec), keep=1) # depends on [control=['try'], data=[]]
except editpar.UnfoundParamError as pe:
showerror(message=str(pe), title='Error Setting to Default Values') # depends on [control=['except'], data=['pe']] |
def _set_tagged_outer_vlan(self, v, load=False):
"""
Setter method for tagged_outer_vlan, mapped from YANG variable /interface/port_channel/logical_interface/port_channel/pc_cmd_container_dummy/service_instance_vlan_cmds_dummy_container/tagged_outer_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tagged_outer_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tagged_outer_vlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tagged_outer_vlan.tagged_outer_vlan, is_container='container', presence=False, yang_name="tagged-outer-vlan", rest_name="vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Outer VLAN for this logical interface', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'alt-name': u'vlan'}}, namespace='urn:brocade.com:mgmt:brocade-lif', defining_module='brocade-lif', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tagged_outer_vlan must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tagged_outer_vlan.tagged_outer_vlan, is_container='container', presence=False, yang_name="tagged-outer-vlan", rest_name="vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Outer VLAN for this logical interface', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'alt-name': u'vlan'}}, namespace='urn:brocade.com:mgmt:brocade-lif', defining_module='brocade-lif', yang_type='container', is_config=True)""",
})
self.__tagged_outer_vlan = t
if hasattr(self, '_set'):
self._set() | def function[_set_tagged_outer_vlan, parameter[self, v, load]]:
constant[
Setter method for tagged_outer_vlan, mapped from YANG variable /interface/port_channel/logical_interface/port_channel/pc_cmd_container_dummy/service_instance_vlan_cmds_dummy_container/tagged_outer_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tagged_outer_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tagged_outer_vlan() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18f58d030>
name[self].__tagged_outer_vlan assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_tagged_outer_vlan] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[tagged_outer_vlan] . identifier[tagged_outer_vlan] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__tagged_outer_vlan] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_tagged_outer_vlan(self, v, load=False):
"""
Setter method for tagged_outer_vlan, mapped from YANG variable /interface/port_channel/logical_interface/port_channel/pc_cmd_container_dummy/service_instance_vlan_cmds_dummy_container/tagged_outer_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tagged_outer_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tagged_outer_vlan() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=tagged_outer_vlan.tagged_outer_vlan, is_container='container', presence=False, yang_name='tagged-outer-vlan', rest_name='vlan', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Outer VLAN for this logical interface', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'alt-name': u'vlan'}}, namespace='urn:brocade.com:mgmt:brocade-lif', defining_module='brocade-lif', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'tagged_outer_vlan must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=tagged_outer_vlan.tagged_outer_vlan, is_container=\'container\', presence=False, yang_name="tagged-outer-vlan", rest_name="vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Configure Outer VLAN for this logical interface\', u\'cli-compact-syntax\': None, u\'cli-sequence-commands\': None, u\'alt-name\': u\'vlan\'}}, namespace=\'urn:brocade.com:mgmt:brocade-lif\', defining_module=\'brocade-lif\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__tagged_outer_vlan = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def add_leverage(self):
""" Adds leverage term to the model
Returns
----------
None (changes instance attributes)
"""
if self.leverage is True:
pass
else:
self.leverage = True
self.z_no += 1
self.latent_variables.z_list.pop()
self.latent_variables.z_list.pop()
self.latent_variables.z_list.pop()
self.latent_variables.z_list.pop()
self.latent_variables.add_z('Leverage Term', fam.Flat(transform=None), fam.Normal(0, 3))
self.latent_variables.add_z('Skewness', fam.Flat(transform='exp'), fam.Normal(0, 3))
self.latent_variables.add_z('v', fam.Flat(transform='exp'), fam.Normal(0, 3))
self.latent_variables.add_z('Returns Constant', fam.Normal(0,3,transform=None), fam.Normal(0, 3))
self.latent_variables.add_z('GARCH-M', fam.Normal(0,3,transform=None), fam.Normal(0, 3))
self.latent_variables.z_list[-3].start = 2.0 | def function[add_leverage, parameter[self]]:
constant[ Adds leverage term to the model
Returns
----------
None (changes instance attributes)
]
if compare[name[self].leverage is constant[True]] begin[:]
pass | keyword[def] identifier[add_leverage] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[leverage] keyword[is] keyword[True] :
keyword[pass]
keyword[else] :
identifier[self] . identifier[leverage] = keyword[True]
identifier[self] . identifier[z_no] += literal[int]
identifier[self] . identifier[latent_variables] . identifier[z_list] . identifier[pop] ()
identifier[self] . identifier[latent_variables] . identifier[z_list] . identifier[pop] ()
identifier[self] . identifier[latent_variables] . identifier[z_list] . identifier[pop] ()
identifier[self] . identifier[latent_variables] . identifier[z_list] . identifier[pop] ()
identifier[self] . identifier[latent_variables] . identifier[add_z] ( literal[string] , identifier[fam] . identifier[Flat] ( identifier[transform] = keyword[None] ), identifier[fam] . identifier[Normal] ( literal[int] , literal[int] ))
identifier[self] . identifier[latent_variables] . identifier[add_z] ( literal[string] , identifier[fam] . identifier[Flat] ( identifier[transform] = literal[string] ), identifier[fam] . identifier[Normal] ( literal[int] , literal[int] ))
identifier[self] . identifier[latent_variables] . identifier[add_z] ( literal[string] , identifier[fam] . identifier[Flat] ( identifier[transform] = literal[string] ), identifier[fam] . identifier[Normal] ( literal[int] , literal[int] ))
identifier[self] . identifier[latent_variables] . identifier[add_z] ( literal[string] , identifier[fam] . identifier[Normal] ( literal[int] , literal[int] , identifier[transform] = keyword[None] ), identifier[fam] . identifier[Normal] ( literal[int] , literal[int] ))
identifier[self] . identifier[latent_variables] . identifier[add_z] ( literal[string] , identifier[fam] . identifier[Normal] ( literal[int] , literal[int] , identifier[transform] = keyword[None] ), identifier[fam] . identifier[Normal] ( literal[int] , literal[int] ))
identifier[self] . identifier[latent_variables] . identifier[z_list] [- literal[int] ]. identifier[start] = literal[int] | def add_leverage(self):
""" Adds leverage term to the model
Returns
----------
None (changes instance attributes)
"""
if self.leverage is True:
pass # depends on [control=['if'], data=[]]
else:
self.leverage = True
self.z_no += 1
self.latent_variables.z_list.pop()
self.latent_variables.z_list.pop()
self.latent_variables.z_list.pop()
self.latent_variables.z_list.pop()
self.latent_variables.add_z('Leverage Term', fam.Flat(transform=None), fam.Normal(0, 3))
self.latent_variables.add_z('Skewness', fam.Flat(transform='exp'), fam.Normal(0, 3))
self.latent_variables.add_z('v', fam.Flat(transform='exp'), fam.Normal(0, 3))
self.latent_variables.add_z('Returns Constant', fam.Normal(0, 3, transform=None), fam.Normal(0, 3))
self.latent_variables.add_z('GARCH-M', fam.Normal(0, 3, transform=None), fam.Normal(0, 3))
self.latent_variables.z_list[-3].start = 2.0 |
def stop(self):
"""Hard stop the server and sub process"""
self._end.value = True
if self.background_process:
try:
self.background_process.terminate()
except Exception:
pass
for task_id, values in self.current_tasks.items():
try:
values['proc'].terminate()
except Exception:
pass | def function[stop, parameter[self]]:
constant[Hard stop the server and sub process]
name[self]._end.value assign[=] constant[True]
if name[self].background_process begin[:]
<ast.Try object at 0x7da20c6a8ee0>
for taget[tuple[[<ast.Name object at 0x7da20c6a9390>, <ast.Name object at 0x7da20c6aa410>]]] in starred[call[name[self].current_tasks.items, parameter[]]] begin[:]
<ast.Try object at 0x7da20c6a89a0> | keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_end] . identifier[value] = keyword[True]
keyword[if] identifier[self] . identifier[background_process] :
keyword[try] :
identifier[self] . identifier[background_process] . identifier[terminate] ()
keyword[except] identifier[Exception] :
keyword[pass]
keyword[for] identifier[task_id] , identifier[values] keyword[in] identifier[self] . identifier[current_tasks] . identifier[items] ():
keyword[try] :
identifier[values] [ literal[string] ]. identifier[terminate] ()
keyword[except] identifier[Exception] :
keyword[pass] | def stop(self):
"""Hard stop the server and sub process"""
self._end.value = True
if self.background_process:
try:
self.background_process.terminate() # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
for (task_id, values) in self.current_tasks.items():
try:
values['proc'].terminate() # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] |
def _connect(self):
"""Connects to the cloud web services. If this is the first
authentication, a web browser will be started to authenticate
against google and provide access to elasticluster.
:return: A Resource object with methods for interacting with the
service.
"""
# ensure only one thread runs the authentication process, if needed
with GoogleCloudProvider.__gce_lock:
# check for existing connection
if not self._gce:
version = pkg_resources.get_distribution("elasticluster").version
http = googleapiclient.http.set_user_agent(httplib2.Http(), "elasticluster/%s" % version)
credentials = self._get_credentials()
self._auth_http = credentials.authorize(http)
self._gce = build(GCE_API_NAME, GCE_API_VERSION, http=http)
return self._gce | def function[_connect, parameter[self]]:
constant[Connects to the cloud web services. If this is the first
authentication, a web browser will be started to authenticate
against google and provide access to elasticluster.
:return: A Resource object with methods for interacting with the
service.
]
with name[GoogleCloudProvider].__gce_lock begin[:]
if <ast.UnaryOp object at 0x7da1b08a5ae0> begin[:]
variable[version] assign[=] call[name[pkg_resources].get_distribution, parameter[constant[elasticluster]]].version
variable[http] assign[=] call[name[googleapiclient].http.set_user_agent, parameter[call[name[httplib2].Http, parameter[]], binary_operation[constant[elasticluster/%s] <ast.Mod object at 0x7da2590d6920> name[version]]]]
variable[credentials] assign[=] call[name[self]._get_credentials, parameter[]]
name[self]._auth_http assign[=] call[name[credentials].authorize, parameter[name[http]]]
name[self]._gce assign[=] call[name[build], parameter[name[GCE_API_NAME], name[GCE_API_VERSION]]]
return[name[self]._gce] | keyword[def] identifier[_connect] ( identifier[self] ):
literal[string]
keyword[with] identifier[GoogleCloudProvider] . identifier[__gce_lock] :
keyword[if] keyword[not] identifier[self] . identifier[_gce] :
identifier[version] = identifier[pkg_resources] . identifier[get_distribution] ( literal[string] ). identifier[version]
identifier[http] = identifier[googleapiclient] . identifier[http] . identifier[set_user_agent] ( identifier[httplib2] . identifier[Http] (), literal[string] % identifier[version] )
identifier[credentials] = identifier[self] . identifier[_get_credentials] ()
identifier[self] . identifier[_auth_http] = identifier[credentials] . identifier[authorize] ( identifier[http] )
identifier[self] . identifier[_gce] = identifier[build] ( identifier[GCE_API_NAME] , identifier[GCE_API_VERSION] , identifier[http] = identifier[http] )
keyword[return] identifier[self] . identifier[_gce] | def _connect(self):
"""Connects to the cloud web services. If this is the first
authentication, a web browser will be started to authenticate
against google and provide access to elasticluster.
:return: A Resource object with methods for interacting with the
service.
"""
# ensure only one thread runs the authentication process, if needed
with GoogleCloudProvider.__gce_lock:
# check for existing connection
if not self._gce:
version = pkg_resources.get_distribution('elasticluster').version
http = googleapiclient.http.set_user_agent(httplib2.Http(), 'elasticluster/%s' % version)
credentials = self._get_credentials()
self._auth_http = credentials.authorize(http)
self._gce = build(GCE_API_NAME, GCE_API_VERSION, http=http) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
return self._gce |
def _basic_debug(prefix, self):
"""
Prints out basic information about an Asn1Value object. Extracted for reuse
among different classes that customize the debug information.
:param prefix:
A unicode string of spaces to prefix output line with
:param self:
The object to print the debugging information about
"""
print('%s%s Object #%s' % (prefix, type_name(self), id(self)))
if self._header:
print('%s Header: 0x%s' % (prefix, binascii.hexlify(self._header or b'').decode('utf-8')))
has_header = self.method is not None and self.class_ is not None and self.tag is not None
if has_header:
method_name = METHOD_NUM_TO_NAME_MAP.get(self.method)
class_name = CLASS_NUM_TO_NAME_MAP.get(self.class_)
if self.explicit is not None:
for class_, tag in self.explicit:
print(
'%s %s tag %s (explicitly tagged)' %
(
prefix,
CLASS_NUM_TO_NAME_MAP.get(class_),
tag
)
)
if has_header:
print('%s %s %s %s' % (prefix, method_name, class_name, self.tag))
elif self.implicit:
if has_header:
print('%s %s %s tag %s (implicitly tagged)' % (prefix, method_name, class_name, self.tag))
elif has_header:
print('%s %s %s tag %s' % (prefix, method_name, class_name, self.tag))
print('%s Data: 0x%s' % (prefix, binascii.hexlify(self.contents or b'').decode('utf-8'))) | def function[_basic_debug, parameter[prefix, self]]:
constant[
Prints out basic information about an Asn1Value object. Extracted for reuse
among different classes that customize the debug information.
:param prefix:
A unicode string of spaces to prefix output line with
:param self:
The object to print the debugging information about
]
call[name[print], parameter[binary_operation[constant[%s%s Object #%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bcca5c0>, <ast.Call object at 0x7da18bccb0a0>, <ast.Call object at 0x7da18bcc8190>]]]]]
if name[self]._header begin[:]
call[name[print], parameter[binary_operation[constant[%s Header: 0x%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bcc8eb0>, <ast.Call object at 0x7da18bcc92d0>]]]]]
variable[has_header] assign[=] <ast.BoolOp object at 0x7da18bcc9450>
if name[has_header] begin[:]
variable[method_name] assign[=] call[name[METHOD_NUM_TO_NAME_MAP].get, parameter[name[self].method]]
variable[class_name] assign[=] call[name[CLASS_NUM_TO_NAME_MAP].get, parameter[name[self].class_]]
if compare[name[self].explicit is_not constant[None]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f00e620>, <ast.Name object at 0x7da18f00ebf0>]]] in starred[name[self].explicit] begin[:]
call[name[print], parameter[binary_operation[constant[%s %s tag %s (explicitly tagged)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00eda0>, <ast.Call object at 0x7da18f00e740>, <ast.Name object at 0x7da18f00ccd0>]]]]]
if name[has_header] begin[:]
call[name[print], parameter[binary_operation[constant[%s %s %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00f550>, <ast.Name object at 0x7da18f00f2b0>, <ast.Name object at 0x7da18f00cb50>, <ast.Attribute object at 0x7da18f00ee90>]]]]]
call[name[print], parameter[binary_operation[constant[%s Data: 0x%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00e7d0>, <ast.Call object at 0x7da18f00ec50>]]]]] | keyword[def] identifier[_basic_debug] ( identifier[prefix] , identifier[self] ):
literal[string]
identifier[print] ( literal[string] %( identifier[prefix] , identifier[type_name] ( identifier[self] ), identifier[id] ( identifier[self] )))
keyword[if] identifier[self] . identifier[_header] :
identifier[print] ( literal[string] %( identifier[prefix] , identifier[binascii] . identifier[hexlify] ( identifier[self] . identifier[_header] keyword[or] literal[string] ). identifier[decode] ( literal[string] )))
identifier[has_header] = identifier[self] . identifier[method] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[class_] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[tag] keyword[is] keyword[not] keyword[None]
keyword[if] identifier[has_header] :
identifier[method_name] = identifier[METHOD_NUM_TO_NAME_MAP] . identifier[get] ( identifier[self] . identifier[method] )
identifier[class_name] = identifier[CLASS_NUM_TO_NAME_MAP] . identifier[get] ( identifier[self] . identifier[class_] )
keyword[if] identifier[self] . identifier[explicit] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[class_] , identifier[tag] keyword[in] identifier[self] . identifier[explicit] :
identifier[print] (
literal[string] %
(
identifier[prefix] ,
identifier[CLASS_NUM_TO_NAME_MAP] . identifier[get] ( identifier[class_] ),
identifier[tag]
)
)
keyword[if] identifier[has_header] :
identifier[print] ( literal[string] %( identifier[prefix] , identifier[method_name] , identifier[class_name] , identifier[self] . identifier[tag] ))
keyword[elif] identifier[self] . identifier[implicit] :
keyword[if] identifier[has_header] :
identifier[print] ( literal[string] %( identifier[prefix] , identifier[method_name] , identifier[class_name] , identifier[self] . identifier[tag] ))
keyword[elif] identifier[has_header] :
identifier[print] ( literal[string] %( identifier[prefix] , identifier[method_name] , identifier[class_name] , identifier[self] . identifier[tag] ))
identifier[print] ( literal[string] %( identifier[prefix] , identifier[binascii] . identifier[hexlify] ( identifier[self] . identifier[contents] keyword[or] literal[string] ). identifier[decode] ( literal[string] ))) | def _basic_debug(prefix, self):
"""
Prints out basic information about an Asn1Value object. Extracted for reuse
among different classes that customize the debug information.
:param prefix:
A unicode string of spaces to prefix output line with
:param self:
The object to print the debugging information about
"""
print('%s%s Object #%s' % (prefix, type_name(self), id(self)))
if self._header:
print('%s Header: 0x%s' % (prefix, binascii.hexlify(self._header or b'').decode('utf-8'))) # depends on [control=['if'], data=[]]
has_header = self.method is not None and self.class_ is not None and (self.tag is not None)
if has_header:
method_name = METHOD_NUM_TO_NAME_MAP.get(self.method)
class_name = CLASS_NUM_TO_NAME_MAP.get(self.class_) # depends on [control=['if'], data=[]]
if self.explicit is not None:
for (class_, tag) in self.explicit:
print('%s %s tag %s (explicitly tagged)' % (prefix, CLASS_NUM_TO_NAME_MAP.get(class_), tag)) # depends on [control=['for'], data=[]]
if has_header:
print('%s %s %s %s' % (prefix, method_name, class_name, self.tag)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self.implicit:
if has_header:
print('%s %s %s tag %s (implicitly tagged)' % (prefix, method_name, class_name, self.tag)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif has_header:
print('%s %s %s tag %s' % (prefix, method_name, class_name, self.tag)) # depends on [control=['if'], data=[]]
print('%s Data: 0x%s' % (prefix, binascii.hexlify(self.contents or b'').decode('utf-8'))) |
def setDataFrameFromFile(self, filepath, **kwargs):
"""
Sets the model's dataFrame by reading a file.
Accepted file formats:
- .xlsx (sheet1 is read unless specified in kwargs)
- .csv (comma separated unless specified in kwargs)
- .txt (any separator)
:param filepath: (str)
The path to the file to be read.
:param kwargs:
pandas.read_csv(**kwargs) or pandas.read_excel(**kwargs)
:return: None
"""
df = superReadFile(filepath, **kwargs)
self.setDataFrame(df, filePath=filepath) | def function[setDataFrameFromFile, parameter[self, filepath]]:
constant[
Sets the model's dataFrame by reading a file.
Accepted file formats:
- .xlsx (sheet1 is read unless specified in kwargs)
- .csv (comma separated unless specified in kwargs)
- .txt (any separator)
:param filepath: (str)
The path to the file to be read.
:param kwargs:
pandas.read_csv(**kwargs) or pandas.read_excel(**kwargs)
:return: None
]
variable[df] assign[=] call[name[superReadFile], parameter[name[filepath]]]
call[name[self].setDataFrame, parameter[name[df]]] | keyword[def] identifier[setDataFrameFromFile] ( identifier[self] , identifier[filepath] ,** identifier[kwargs] ):
literal[string]
identifier[df] = identifier[superReadFile] ( identifier[filepath] ,** identifier[kwargs] )
identifier[self] . identifier[setDataFrame] ( identifier[df] , identifier[filePath] = identifier[filepath] ) | def setDataFrameFromFile(self, filepath, **kwargs):
"""
Sets the model's dataFrame by reading a file.
Accepted file formats:
- .xlsx (sheet1 is read unless specified in kwargs)
- .csv (comma separated unless specified in kwargs)
- .txt (any separator)
:param filepath: (str)
The path to the file to be read.
:param kwargs:
pandas.read_csv(**kwargs) or pandas.read_excel(**kwargs)
:return: None
"""
df = superReadFile(filepath, **kwargs)
self.setDataFrame(df, filePath=filepath) |
def smart_import(mpath):
"""Given a path smart_import will import the module and return the attr reffered to."""
try:
rest = __import__(mpath)
except ImportError:
split = mpath.split('.')
rest = smart_import('.'.join(split[:-1]))
rest = getattr(rest, split[-1])
return rest | def function[smart_import, parameter[mpath]]:
constant[Given a path smart_import will import the module and return the attr reffered to.]
<ast.Try object at 0x7da1b146cdf0>
return[name[rest]] | keyword[def] identifier[smart_import] ( identifier[mpath] ):
literal[string]
keyword[try] :
identifier[rest] = identifier[__import__] ( identifier[mpath] )
keyword[except] identifier[ImportError] :
identifier[split] = identifier[mpath] . identifier[split] ( literal[string] )
identifier[rest] = identifier[smart_import] ( literal[string] . identifier[join] ( identifier[split] [:- literal[int] ]))
identifier[rest] = identifier[getattr] ( identifier[rest] , identifier[split] [- literal[int] ])
keyword[return] identifier[rest] | def smart_import(mpath):
"""Given a path smart_import will import the module and return the attr reffered to."""
try:
rest = __import__(mpath) # depends on [control=['try'], data=[]]
except ImportError:
split = mpath.split('.')
rest = smart_import('.'.join(split[:-1]))
rest = getattr(rest, split[-1]) # depends on [control=['except'], data=[]]
return rest |
def _fixStringValue(s, p):
"""Clean up string value including special characters, etc."""
# pylint: disable=too-many-branches
s = s[1:-1]
rv = ''
esc = False
i = -1
while i < len(s) - 1:
i += 1
ch = s[i]
if ch == '\\' and not esc:
esc = True
continue
if not esc:
rv += ch
continue
if ch == '"':
rv += '"'
elif ch == 'n':
rv += '\n'
elif ch == 't':
rv += '\t'
elif ch == 'b':
rv += '\b'
elif ch == 'f':
rv += '\f'
elif ch == 'r':
rv += '\r'
elif ch == '\\':
rv += '\\'
elif ch in ['x', 'X']:
hexc = 0
j = 0
i += 1
while j < 4:
c = s[i + j]
c = c.upper()
if not c.isdigit() and c not in 'ABCDEF':
break
hexc <<= 4
if c.isdigit():
hexc |= ord(c) - ord('0')
else:
hexc |= ord(c) - ord('A') + 0XA
j += 1
if j == 0:
# DSP0004 requires 1..4 hex chars - we have 0
raise MOFParseError(
parser_token=p,
msg="Unicode escape sequence (e.g. '\\x12AB') requires "
"at least one hex character")
rv += six.unichr(hexc)
i += j - 1
esc = False
return rv | def function[_fixStringValue, parameter[s, p]]:
constant[Clean up string value including special characters, etc.]
variable[s] assign[=] call[name[s]][<ast.Slice object at 0x7da1b0c44a30>]
variable[rv] assign[=] constant[]
variable[esc] assign[=] constant[False]
variable[i] assign[=] <ast.UnaryOp object at 0x7da1b0c47070>
while compare[name[i] less[<] binary_operation[call[name[len], parameter[name[s]]] - constant[1]]] begin[:]
<ast.AugAssign object at 0x7da1b0c474f0>
variable[ch] assign[=] call[name[s]][name[i]]
if <ast.BoolOp object at 0x7da1b0c454e0> begin[:]
variable[esc] assign[=] constant[True]
continue
if <ast.UnaryOp object at 0x7da1b0e9cc40> begin[:]
<ast.AugAssign object at 0x7da1b0e9ff40>
continue
if compare[name[ch] equal[==] constant["]] begin[:]
<ast.AugAssign object at 0x7da1b0e9c0a0>
variable[esc] assign[=] constant[False]
return[name[rv]] | keyword[def] identifier[_fixStringValue] ( identifier[s] , identifier[p] ):
literal[string]
identifier[s] = identifier[s] [ literal[int] :- literal[int] ]
identifier[rv] = literal[string]
identifier[esc] = keyword[False]
identifier[i] =- literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[s] )- literal[int] :
identifier[i] += literal[int]
identifier[ch] = identifier[s] [ identifier[i] ]
keyword[if] identifier[ch] == literal[string] keyword[and] keyword[not] identifier[esc] :
identifier[esc] = keyword[True]
keyword[continue]
keyword[if] keyword[not] identifier[esc] :
identifier[rv] += identifier[ch]
keyword[continue]
keyword[if] identifier[ch] == literal[string] :
identifier[rv] += literal[string]
keyword[elif] identifier[ch] == literal[string] :
identifier[rv] += literal[string]
keyword[elif] identifier[ch] == literal[string] :
identifier[rv] += literal[string]
keyword[elif] identifier[ch] == literal[string] :
identifier[rv] += literal[string]
keyword[elif] identifier[ch] == literal[string] :
identifier[rv] += literal[string]
keyword[elif] identifier[ch] == literal[string] :
identifier[rv] += literal[string]
keyword[elif] identifier[ch] == literal[string] :
identifier[rv] += literal[string]
keyword[elif] identifier[ch] keyword[in] [ literal[string] , literal[string] ]:
identifier[hexc] = literal[int]
identifier[j] = literal[int]
identifier[i] += literal[int]
keyword[while] identifier[j] < literal[int] :
identifier[c] = identifier[s] [ identifier[i] + identifier[j] ]
identifier[c] = identifier[c] . identifier[upper] ()
keyword[if] keyword[not] identifier[c] . identifier[isdigit] () keyword[and] identifier[c] keyword[not] keyword[in] literal[string] :
keyword[break]
identifier[hexc] <<= literal[int]
keyword[if] identifier[c] . identifier[isdigit] ():
identifier[hexc] |= identifier[ord] ( identifier[c] )- identifier[ord] ( literal[string] )
keyword[else] :
identifier[hexc] |= identifier[ord] ( identifier[c] )- identifier[ord] ( literal[string] )+ literal[int]
identifier[j] += literal[int]
keyword[if] identifier[j] == literal[int] :
keyword[raise] identifier[MOFParseError] (
identifier[parser_token] = identifier[p] ,
identifier[msg] = literal[string]
literal[string] )
identifier[rv] += identifier[six] . identifier[unichr] ( identifier[hexc] )
identifier[i] += identifier[j] - literal[int]
identifier[esc] = keyword[False]
keyword[return] identifier[rv] | def _fixStringValue(s, p):
"""Clean up string value including special characters, etc."""
# pylint: disable=too-many-branches
s = s[1:-1]
rv = ''
esc = False
i = -1
while i < len(s) - 1:
i += 1
ch = s[i]
if ch == '\\' and (not esc):
esc = True
continue # depends on [control=['if'], data=[]]
if not esc:
rv += ch
continue # depends on [control=['if'], data=[]]
if ch == '"':
rv += '"' # depends on [control=['if'], data=[]]
elif ch == 'n':
rv += '\n' # depends on [control=['if'], data=[]]
elif ch == 't':
rv += '\t' # depends on [control=['if'], data=[]]
elif ch == 'b':
rv += '\x08' # depends on [control=['if'], data=[]]
elif ch == 'f':
rv += '\x0c' # depends on [control=['if'], data=[]]
elif ch == 'r':
rv += '\r' # depends on [control=['if'], data=[]]
elif ch == '\\':
rv += '\\' # depends on [control=['if'], data=[]]
elif ch in ['x', 'X']:
hexc = 0
j = 0
i += 1
while j < 4:
c = s[i + j]
c = c.upper()
if not c.isdigit() and c not in 'ABCDEF':
break # depends on [control=['if'], data=[]]
hexc <<= 4
if c.isdigit():
hexc |= ord(c) - ord('0') # depends on [control=['if'], data=[]]
else:
hexc |= ord(c) - ord('A') + 10
j += 1 # depends on [control=['while'], data=['j']]
if j == 0:
# DSP0004 requires 1..4 hex chars - we have 0
raise MOFParseError(parser_token=p, msg="Unicode escape sequence (e.g. '\\x12AB') requires at least one hex character") # depends on [control=['if'], data=[]]
rv += six.unichr(hexc)
i += j - 1 # depends on [control=['if'], data=[]]
esc = False # depends on [control=['while'], data=['i']]
return rv |
def smeft_toarray(wc_name, wc_dict):
"""Construct a numpy array with Wilson coefficient values from a
dictionary of label-value pairs corresponding to the non-redundant
elements."""
shape = smeftutil.C_keys_shape[wc_name]
C = np.zeros(shape, dtype=complex)
for k, v in wc_dict.items():
if k.split('_')[0] != wc_name:
continue
indices = k.split('_')[-1] # e.g. '1213'
indices = tuple(int(s) - 1 for s in indices) # e.g. (1, 2, 1, 3)
C[indices] = v
C = smeftutil.symmetrize({wc_name: C})[wc_name]
return C | def function[smeft_toarray, parameter[wc_name, wc_dict]]:
constant[Construct a numpy array with Wilson coefficient values from a
dictionary of label-value pairs corresponding to the non-redundant
elements.]
variable[shape] assign[=] call[name[smeftutil].C_keys_shape][name[wc_name]]
variable[C] assign[=] call[name[np].zeros, parameter[name[shape]]]
for taget[tuple[[<ast.Name object at 0x7da1b1afa5f0>, <ast.Name object at 0x7da1b1af8a90>]]] in starred[call[name[wc_dict].items, parameter[]]] begin[:]
if compare[call[call[name[k].split, parameter[constant[_]]]][constant[0]] not_equal[!=] name[wc_name]] begin[:]
continue
variable[indices] assign[=] call[call[name[k].split, parameter[constant[_]]]][<ast.UnaryOp object at 0x7da1b1af9750>]
variable[indices] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b1afa6b0>]]
call[name[C]][name[indices]] assign[=] name[v]
variable[C] assign[=] call[call[name[smeftutil].symmetrize, parameter[dictionary[[<ast.Name object at 0x7da1b1b68e50>], [<ast.Name object at 0x7da1b1b6ba30>]]]]][name[wc_name]]
return[name[C]] | keyword[def] identifier[smeft_toarray] ( identifier[wc_name] , identifier[wc_dict] ):
literal[string]
identifier[shape] = identifier[smeftutil] . identifier[C_keys_shape] [ identifier[wc_name] ]
identifier[C] = identifier[np] . identifier[zeros] ( identifier[shape] , identifier[dtype] = identifier[complex] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[wc_dict] . identifier[items] ():
keyword[if] identifier[k] . identifier[split] ( literal[string] )[ literal[int] ]!= identifier[wc_name] :
keyword[continue]
identifier[indices] = identifier[k] . identifier[split] ( literal[string] )[- literal[int] ]
identifier[indices] = identifier[tuple] ( identifier[int] ( identifier[s] )- literal[int] keyword[for] identifier[s] keyword[in] identifier[indices] )
identifier[C] [ identifier[indices] ]= identifier[v]
identifier[C] = identifier[smeftutil] . identifier[symmetrize] ({ identifier[wc_name] : identifier[C] })[ identifier[wc_name] ]
keyword[return] identifier[C] | def smeft_toarray(wc_name, wc_dict):
"""Construct a numpy array with Wilson coefficient values from a
dictionary of label-value pairs corresponding to the non-redundant
elements."""
shape = smeftutil.C_keys_shape[wc_name]
C = np.zeros(shape, dtype=complex)
for (k, v) in wc_dict.items():
if k.split('_')[0] != wc_name:
continue # depends on [control=['if'], data=[]]
indices = k.split('_')[-1] # e.g. '1213'
indices = tuple((int(s) - 1 for s in indices)) # e.g. (1, 2, 1, 3)
C[indices] = v # depends on [control=['for'], data=[]]
C = smeftutil.symmetrize({wc_name: C})[wc_name]
return C |
def _find_mapreduce_yaml(start, checked):
"""Traverse the directory tree identified by start until a directory already
in checked is encountered or the path of mapreduce.yaml is found.
Checked is present both to make loop termination easy to reason about and so
that the same directories do not get rechecked.
Args:
start: the path to start in and work upward from
checked: the set of already examined directories
Returns:
the path of mapreduce.yaml file or None if not found.
"""
dir = start
while dir not in checked:
checked.add(dir)
for mr_yaml_name in MR_YAML_NAMES:
yaml_path = os.path.join(dir, mr_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
dir = os.path.dirname(dir)
return None | def function[_find_mapreduce_yaml, parameter[start, checked]]:
constant[Traverse the directory tree identified by start until a directory already
in checked is encountered or the path of mapreduce.yaml is found.
Checked is present both to make loop termination easy to reason about and so
that the same directories do not get rechecked.
Args:
start: the path to start in and work upward from
checked: the set of already examined directories
Returns:
the path of mapreduce.yaml file or None if not found.
]
variable[dir] assign[=] name[start]
while compare[name[dir] <ast.NotIn object at 0x7da2590d7190> name[checked]] begin[:]
call[name[checked].add, parameter[name[dir]]]
for taget[name[mr_yaml_name]] in starred[name[MR_YAML_NAMES]] begin[:]
variable[yaml_path] assign[=] call[name[os].path.join, parameter[name[dir], name[mr_yaml_name]]]
if call[name[os].path.exists, parameter[name[yaml_path]]] begin[:]
return[name[yaml_path]]
variable[dir] assign[=] call[name[os].path.dirname, parameter[name[dir]]]
return[constant[None]] | keyword[def] identifier[_find_mapreduce_yaml] ( identifier[start] , identifier[checked] ):
literal[string]
identifier[dir] = identifier[start]
keyword[while] identifier[dir] keyword[not] keyword[in] identifier[checked] :
identifier[checked] . identifier[add] ( identifier[dir] )
keyword[for] identifier[mr_yaml_name] keyword[in] identifier[MR_YAML_NAMES] :
identifier[yaml_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[dir] , identifier[mr_yaml_name] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[yaml_path] ):
keyword[return] identifier[yaml_path]
identifier[dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[dir] )
keyword[return] keyword[None] | def _find_mapreduce_yaml(start, checked):
"""Traverse the directory tree identified by start until a directory already
in checked is encountered or the path of mapreduce.yaml is found.
Checked is present both to make loop termination easy to reason about and so
that the same directories do not get rechecked.
Args:
start: the path to start in and work upward from
checked: the set of already examined directories
Returns:
the path of mapreduce.yaml file or None if not found.
"""
dir = start
while dir not in checked:
checked.add(dir)
for mr_yaml_name in MR_YAML_NAMES:
yaml_path = os.path.join(dir, mr_yaml_name)
if os.path.exists(yaml_path):
return yaml_path # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['mr_yaml_name']]
dir = os.path.dirname(dir) # depends on [control=['while'], data=['dir', 'checked']]
return None |
def start(self, *args, **kwargs):
"""Starts the instance.
:raises RuntimeError: has been already started.
:raises TypeError: :meth:`run` is not canonical.
"""
if self.is_running():
raise RuntimeError('Already started')
self._running = self.run(*args, **kwargs)
try:
yielded = next(self._running)
except StopIteration:
raise TypeError('run() must yield just one time')
if yielded is not None:
raise TypeError('run() must yield without value') | def function[start, parameter[self]]:
constant[Starts the instance.
:raises RuntimeError: has been already started.
:raises TypeError: :meth:`run` is not canonical.
]
if call[name[self].is_running, parameter[]] begin[:]
<ast.Raise object at 0x7da1b11abd00>
name[self]._running assign[=] call[name[self].run, parameter[<ast.Starred object at 0x7da1b11a9780>]]
<ast.Try object at 0x7da1b11a9f90>
if compare[name[yielded] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da1b11ab100> | keyword[def] identifier[start] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[is_running] ():
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[self] . identifier[_running] = identifier[self] . identifier[run] (* identifier[args] ,** identifier[kwargs] )
keyword[try] :
identifier[yielded] = identifier[next] ( identifier[self] . identifier[_running] )
keyword[except] identifier[StopIteration] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[yielded] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string] ) | def start(self, *args, **kwargs):
"""Starts the instance.
:raises RuntimeError: has been already started.
:raises TypeError: :meth:`run` is not canonical.
"""
if self.is_running():
raise RuntimeError('Already started') # depends on [control=['if'], data=[]]
self._running = self.run(*args, **kwargs)
try:
yielded = next(self._running) # depends on [control=['try'], data=[]]
except StopIteration:
raise TypeError('run() must yield just one time') # depends on [control=['except'], data=[]]
if yielded is not None:
raise TypeError('run() must yield without value') # depends on [control=['if'], data=[]] |
def RtlGetVersion(os_version_info_struct):
"""Wraps the lowlevel RtlGetVersion routine.
Args:
os_version_info_struct: instance of either a RTL_OSVERSIONINFOW structure
or a RTL_OSVERSIONINFOEXW structure,
ctypes.Structure-wrapped, with the
dwOSVersionInfoSize field preset to
ctypes.sizeof(self).
Raises:
OSError: if the underlaying routine fails.
See: https://msdn.microsoft.com/en-us/library/
windows/hardware/ff561910(v=vs.85).aspx .
"""
rc = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version_info_struct))
if rc != 0:
raise OSError("Getting Windows version failed.") | def function[RtlGetVersion, parameter[os_version_info_struct]]:
constant[Wraps the lowlevel RtlGetVersion routine.
Args:
os_version_info_struct: instance of either a RTL_OSVERSIONINFOW structure
or a RTL_OSVERSIONINFOEXW structure,
ctypes.Structure-wrapped, with the
dwOSVersionInfoSize field preset to
ctypes.sizeof(self).
Raises:
OSError: if the underlaying routine fails.
See: https://msdn.microsoft.com/en-us/library/
windows/hardware/ff561910(v=vs.85).aspx .
]
variable[rc] assign[=] call[name[ctypes].windll.Ntdll.RtlGetVersion, parameter[call[name[ctypes].byref, parameter[name[os_version_info_struct]]]]]
if compare[name[rc] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da2044c27d0> | keyword[def] identifier[RtlGetVersion] ( identifier[os_version_info_struct] ):
literal[string]
identifier[rc] = identifier[ctypes] . identifier[windll] . identifier[Ntdll] . identifier[RtlGetVersion] ( identifier[ctypes] . identifier[byref] ( identifier[os_version_info_struct] ))
keyword[if] identifier[rc] != literal[int] :
keyword[raise] identifier[OSError] ( literal[string] ) | def RtlGetVersion(os_version_info_struct):
"""Wraps the lowlevel RtlGetVersion routine.
Args:
os_version_info_struct: instance of either a RTL_OSVERSIONINFOW structure
or a RTL_OSVERSIONINFOEXW structure,
ctypes.Structure-wrapped, with the
dwOSVersionInfoSize field preset to
ctypes.sizeof(self).
Raises:
OSError: if the underlaying routine fails.
See: https://msdn.microsoft.com/en-us/library/
windows/hardware/ff561910(v=vs.85).aspx .
"""
rc = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version_info_struct))
if rc != 0:
raise OSError('Getting Windows version failed.') # depends on [control=['if'], data=[]] |
def annual_heating_design_day_996(self):
"""A design day object representing the annual 99.6% heating design day."""
self._load_header_check()
if bool(self._heating_dict) is True:
avg_press = self.atmospheric_station_pressure.average
avg_press = None if avg_press == 999999 else avg_press
return DesignDay.from_ashrae_dict_heating(
self._heating_dict, self.location, False, avg_press)
else:
return None | def function[annual_heating_design_day_996, parameter[self]]:
constant[A design day object representing the annual 99.6% heating design day.]
call[name[self]._load_header_check, parameter[]]
if compare[call[name[bool], parameter[name[self]._heating_dict]] is constant[True]] begin[:]
variable[avg_press] assign[=] name[self].atmospheric_station_pressure.average
variable[avg_press] assign[=] <ast.IfExp object at 0x7da1b12aba00>
return[call[name[DesignDay].from_ashrae_dict_heating, parameter[name[self]._heating_dict, name[self].location, constant[False], name[avg_press]]]] | keyword[def] identifier[annual_heating_design_day_996] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_load_header_check] ()
keyword[if] identifier[bool] ( identifier[self] . identifier[_heating_dict] ) keyword[is] keyword[True] :
identifier[avg_press] = identifier[self] . identifier[atmospheric_station_pressure] . identifier[average]
identifier[avg_press] = keyword[None] keyword[if] identifier[avg_press] == literal[int] keyword[else] identifier[avg_press]
keyword[return] identifier[DesignDay] . identifier[from_ashrae_dict_heating] (
identifier[self] . identifier[_heating_dict] , identifier[self] . identifier[location] , keyword[False] , identifier[avg_press] )
keyword[else] :
keyword[return] keyword[None] | def annual_heating_design_day_996(self):
"""A design day object representing the annual 99.6% heating design day."""
self._load_header_check()
if bool(self._heating_dict) is True:
avg_press = self.atmospheric_station_pressure.average
avg_press = None if avg_press == 999999 else avg_press
return DesignDay.from_ashrae_dict_heating(self._heating_dict, self.location, False, avg_press) # depends on [control=['if'], data=[]]
else:
return None |
def create_user(self, username, password, tags=""):
"""
Creates a user.
:param string username: The name to give to the new user
:param string password: Password for the new user
:param string tags: Comma-separated list of tags for the user
:returns: boolean
"""
path = Client.urls['users_by_name'] % username
body = json.dumps({'password': password, 'tags': tags})
return self._call(path, 'PUT', body=body,
headers=Client.json_headers) | def function[create_user, parameter[self, username, password, tags]]:
constant[
Creates a user.
:param string username: The name to give to the new user
:param string password: Password for the new user
:param string tags: Comma-separated list of tags for the user
:returns: boolean
]
variable[path] assign[=] binary_operation[call[name[Client].urls][constant[users_by_name]] <ast.Mod object at 0x7da2590d6920> name[username]]
variable[body] assign[=] call[name[json].dumps, parameter[dictionary[[<ast.Constant object at 0x7da1b0fd4d60>, <ast.Constant object at 0x7da1b0fd4850>], [<ast.Name object at 0x7da1b0fd5270>, <ast.Name object at 0x7da1b0fd4c40>]]]]
return[call[name[self]._call, parameter[name[path], constant[PUT]]]] | keyword[def] identifier[create_user] ( identifier[self] , identifier[username] , identifier[password] , identifier[tags] = literal[string] ):
literal[string]
identifier[path] = identifier[Client] . identifier[urls] [ literal[string] ]% identifier[username]
identifier[body] = identifier[json] . identifier[dumps] ({ literal[string] : identifier[password] , literal[string] : identifier[tags] })
keyword[return] identifier[self] . identifier[_call] ( identifier[path] , literal[string] , identifier[body] = identifier[body] ,
identifier[headers] = identifier[Client] . identifier[json_headers] ) | def create_user(self, username, password, tags=''):
"""
Creates a user.
:param string username: The name to give to the new user
:param string password: Password for the new user
:param string tags: Comma-separated list of tags for the user
:returns: boolean
"""
path = Client.urls['users_by_name'] % username
body = json.dumps({'password': password, 'tags': tags})
return self._call(path, 'PUT', body=body, headers=Client.json_headers) |
def substitute(template, mapping=None):
"""
Render the template *template*. *mapping* is a :class:`dict` with
values to add to the template.
"""
if mapping is None:
mapping = {}
templ = Template(template)
return templ.substitute(mapping) | def function[substitute, parameter[template, mapping]]:
constant[
Render the template *template*. *mapping* is a :class:`dict` with
values to add to the template.
]
if compare[name[mapping] is constant[None]] begin[:]
variable[mapping] assign[=] dictionary[[], []]
variable[templ] assign[=] call[name[Template], parameter[name[template]]]
return[call[name[templ].substitute, parameter[name[mapping]]]] | keyword[def] identifier[substitute] ( identifier[template] , identifier[mapping] = keyword[None] ):
literal[string]
keyword[if] identifier[mapping] keyword[is] keyword[None] :
identifier[mapping] ={}
identifier[templ] = identifier[Template] ( identifier[template] )
keyword[return] identifier[templ] . identifier[substitute] ( identifier[mapping] ) | def substitute(template, mapping=None):
"""
Render the template *template*. *mapping* is a :class:`dict` with
values to add to the template.
"""
if mapping is None:
mapping = {} # depends on [control=['if'], data=['mapping']]
templ = Template(template)
return templ.substitute(mapping) |
def _clear_interrupt(self, intbit):
"""Clear the specified interrupt bit in the interrupt status register.
"""
int_status = self._device.readU8(VCNL4010_INTSTAT);
int_status &= ~intbit;
self._device.write8(VCNL4010_INTSTAT, int_status); | def function[_clear_interrupt, parameter[self, intbit]]:
constant[Clear the specified interrupt bit in the interrupt status register.
]
variable[int_status] assign[=] call[name[self]._device.readU8, parameter[name[VCNL4010_INTSTAT]]]
<ast.AugAssign object at 0x7da18bcc9840>
call[name[self]._device.write8, parameter[name[VCNL4010_INTSTAT], name[int_status]]] | keyword[def] identifier[_clear_interrupt] ( identifier[self] , identifier[intbit] ):
literal[string]
identifier[int_status] = identifier[self] . identifier[_device] . identifier[readU8] ( identifier[VCNL4010_INTSTAT] );
identifier[int_status] &=~ identifier[intbit] ;
identifier[self] . identifier[_device] . identifier[write8] ( identifier[VCNL4010_INTSTAT] , identifier[int_status] ); | def _clear_interrupt(self, intbit):
"""Clear the specified interrupt bit in the interrupt status register.
"""
int_status = self._device.readU8(VCNL4010_INTSTAT)
int_status &= ~intbit
self._device.write8(VCNL4010_INTSTAT, int_status) |
def accept(self):
"""
Update `show_errors` and hide dialog box.
Overrides method of `QDialogBox`.
"""
AutosaveErrorDialog.show_errors = not self.dismiss_box.isChecked()
return QDialog.accept(self) | def function[accept, parameter[self]]:
constant[
Update `show_errors` and hide dialog box.
Overrides method of `QDialogBox`.
]
name[AutosaveErrorDialog].show_errors assign[=] <ast.UnaryOp object at 0x7da20c7c80d0>
return[call[name[QDialog].accept, parameter[name[self]]]] | keyword[def] identifier[accept] ( identifier[self] ):
literal[string]
identifier[AutosaveErrorDialog] . identifier[show_errors] = keyword[not] identifier[self] . identifier[dismiss_box] . identifier[isChecked] ()
keyword[return] identifier[QDialog] . identifier[accept] ( identifier[self] ) | def accept(self):
"""
Update `show_errors` and hide dialog box.
Overrides method of `QDialogBox`.
"""
AutosaveErrorDialog.show_errors = not self.dismiss_box.isChecked()
return QDialog.accept(self) |
def focusout(event):
"""Change style on focus out events."""
w = event.widget.spinbox
bc = w.style.lookup("TEntry", "bordercolor", ("!focus",))
dc = w.style.lookup("TEntry", "darkcolor", ("!focus",))
lc = w.style.lookup("TEntry", "lightcolor", ("!focus",))
w.style.configure("%s.spinbox.TFrame" % event.widget, bordercolor=bc,
darkcolor=dc, lightcolor=lc) | def function[focusout, parameter[event]]:
constant[Change style on focus out events.]
variable[w] assign[=] name[event].widget.spinbox
variable[bc] assign[=] call[name[w].style.lookup, parameter[constant[TEntry], constant[bordercolor], tuple[[<ast.Constant object at 0x7da18f00ff70>]]]]
variable[dc] assign[=] call[name[w].style.lookup, parameter[constant[TEntry], constant[darkcolor], tuple[[<ast.Constant object at 0x7da18f00c460>]]]]
variable[lc] assign[=] call[name[w].style.lookup, parameter[constant[TEntry], constant[lightcolor], tuple[[<ast.Constant object at 0x7da18f00d180>]]]]
call[name[w].style.configure, parameter[binary_operation[constant[%s.spinbox.TFrame] <ast.Mod object at 0x7da2590d6920> name[event].widget]]] | keyword[def] identifier[focusout] ( identifier[event] ):
literal[string]
identifier[w] = identifier[event] . identifier[widget] . identifier[spinbox]
identifier[bc] = identifier[w] . identifier[style] . identifier[lookup] ( literal[string] , literal[string] ,( literal[string] ,))
identifier[dc] = identifier[w] . identifier[style] . identifier[lookup] ( literal[string] , literal[string] ,( literal[string] ,))
identifier[lc] = identifier[w] . identifier[style] . identifier[lookup] ( literal[string] , literal[string] ,( literal[string] ,))
identifier[w] . identifier[style] . identifier[configure] ( literal[string] % identifier[event] . identifier[widget] , identifier[bordercolor] = identifier[bc] ,
identifier[darkcolor] = identifier[dc] , identifier[lightcolor] = identifier[lc] ) | def focusout(event):
"""Change style on focus out events."""
w = event.widget.spinbox
bc = w.style.lookup('TEntry', 'bordercolor', ('!focus',))
dc = w.style.lookup('TEntry', 'darkcolor', ('!focus',))
lc = w.style.lookup('TEntry', 'lightcolor', ('!focus',))
w.style.configure('%s.spinbox.TFrame' % event.widget, bordercolor=bc, darkcolor=dc, lightcolor=lc) |
def get_raw_access_token(self, method='POST', **kwargs):
'''
Returns a Requests' response over the
:attr:`OAuth2Service.access_token_url`.
Use this if your endpoint if you need the full `Response` object.
:param method: A string representation of the HTTP method to be used,
defaults to `POST`.
:type method: str
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
'''
key = 'params'
if method in ENTITY_METHODS:
key = 'data'
kwargs.setdefault(key, {})
kwargs[key].update({'client_id': self.client_id,
'client_secret': self.client_secret})
session = self.get_session()
self.access_token_response = session.request(method,
self.access_token_url,
**kwargs)
return self.access_token_response | def function[get_raw_access_token, parameter[self, method]]:
constant[
Returns a Requests' response over the
:attr:`OAuth2Service.access_token_url`.
Use this if your endpoint if you need the full `Response` object.
:param method: A string representation of the HTTP method to be used,
defaults to `POST`.
:type method: str
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
]
variable[key] assign[=] constant[params]
if compare[name[method] in name[ENTITY_METHODS]] begin[:]
variable[key] assign[=] constant[data]
call[name[kwargs].setdefault, parameter[name[key], dictionary[[], []]]]
call[call[name[kwargs]][name[key]].update, parameter[dictionary[[<ast.Constant object at 0x7da1b0685810>, <ast.Constant object at 0x7da1b0684eb0>], [<ast.Attribute object at 0x7da1b0686920>, <ast.Attribute object at 0x7da1b0687490>]]]]
variable[session] assign[=] call[name[self].get_session, parameter[]]
name[self].access_token_response assign[=] call[name[session].request, parameter[name[method], name[self].access_token_url]]
return[name[self].access_token_response] | keyword[def] identifier[get_raw_access_token] ( identifier[self] , identifier[method] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[key] = literal[string]
keyword[if] identifier[method] keyword[in] identifier[ENTITY_METHODS] :
identifier[key] = literal[string]
identifier[kwargs] . identifier[setdefault] ( identifier[key] ,{})
identifier[kwargs] [ identifier[key] ]. identifier[update] ({ literal[string] : identifier[self] . identifier[client_id] ,
literal[string] : identifier[self] . identifier[client_secret] })
identifier[session] = identifier[self] . identifier[get_session] ()
identifier[self] . identifier[access_token_response] = identifier[session] . identifier[request] ( identifier[method] ,
identifier[self] . identifier[access_token_url] ,
** identifier[kwargs] )
keyword[return] identifier[self] . identifier[access_token_response] | def get_raw_access_token(self, method='POST', **kwargs):
"""
Returns a Requests' response over the
:attr:`OAuth2Service.access_token_url`.
Use this if your endpoint if you need the full `Response` object.
:param method: A string representation of the HTTP method to be used,
defaults to `POST`.
:type method: str
:param \\*\\*kwargs: Optional arguments. Same as Requests.
:type \\*\\*kwargs: dict
"""
key = 'params'
if method in ENTITY_METHODS:
key = 'data' # depends on [control=['if'], data=[]]
kwargs.setdefault(key, {})
kwargs[key].update({'client_id': self.client_id, 'client_secret': self.client_secret})
session = self.get_session()
self.access_token_response = session.request(method, self.access_token_url, **kwargs)
return self.access_token_response |
def clean_ids(feed: "Feed") -> "Feed":
"""
In the given "Feed", strip whitespace from all string IDs and
then replace every remaining whitespace chunk with an underscore.
Return the resulting "Feed".
"""
# Alter feed inputs only, and build a new feed from them.
# The derived feed attributes, such as feed.trips_i,
# will be automatically handled when creating the new feed.
feed = feed.copy()
for table in cs.GTFS_REF["table"].unique():
f = getattr(feed, table)
if f is None:
continue
for column in cs.GTFS_REF.loc[cs.GTFS_REF["table"] == table, "column"]:
if column in f.columns and column.endswith("_id"):
try:
f[column] = f[column].str.strip().str.replace(r"\s+", "_")
setattr(feed, table, f)
except AttributeError:
# Column is not of string type
continue
return feed | def function[clean_ids, parameter[feed]]:
constant[
In the given "Feed", strip whitespace from all string IDs and
then replace every remaining whitespace chunk with an underscore.
Return the resulting "Feed".
]
variable[feed] assign[=] call[name[feed].copy, parameter[]]
for taget[name[table]] in starred[call[call[name[cs].GTFS_REF][constant[table]].unique, parameter[]]] begin[:]
variable[f] assign[=] call[name[getattr], parameter[name[feed], name[table]]]
if compare[name[f] is constant[None]] begin[:]
continue
for taget[name[column]] in starred[call[name[cs].GTFS_REF.loc][tuple[[<ast.Compare object at 0x7da1b0b92ef0>, <ast.Constant object at 0x7da1b0b92fb0>]]]] begin[:]
if <ast.BoolOp object at 0x7da1b0b92f80> begin[:]
<ast.Try object at 0x7da1b0b92da0>
return[name[feed]] | keyword[def] identifier[clean_ids] ( identifier[feed] : literal[string] )-> literal[string] :
literal[string]
identifier[feed] = identifier[feed] . identifier[copy] ()
keyword[for] identifier[table] keyword[in] identifier[cs] . identifier[GTFS_REF] [ literal[string] ]. identifier[unique] ():
identifier[f] = identifier[getattr] ( identifier[feed] , identifier[table] )
keyword[if] identifier[f] keyword[is] keyword[None] :
keyword[continue]
keyword[for] identifier[column] keyword[in] identifier[cs] . identifier[GTFS_REF] . identifier[loc] [ identifier[cs] . identifier[GTFS_REF] [ literal[string] ]== identifier[table] , literal[string] ]:
keyword[if] identifier[column] keyword[in] identifier[f] . identifier[columns] keyword[and] identifier[column] . identifier[endswith] ( literal[string] ):
keyword[try] :
identifier[f] [ identifier[column] ]= identifier[f] [ identifier[column] ]. identifier[str] . identifier[strip] (). identifier[str] . identifier[replace] ( literal[string] , literal[string] )
identifier[setattr] ( identifier[feed] , identifier[table] , identifier[f] )
keyword[except] identifier[AttributeError] :
keyword[continue]
keyword[return] identifier[feed] | def clean_ids(feed: 'Feed') -> 'Feed':
"""
In the given "Feed", strip whitespace from all string IDs and
then replace every remaining whitespace chunk with an underscore.
Return the resulting "Feed".
"""
# Alter feed inputs only, and build a new feed from them.
# The derived feed attributes, such as feed.trips_i,
# will be automatically handled when creating the new feed.
feed = feed.copy()
for table in cs.GTFS_REF['table'].unique():
f = getattr(feed, table)
if f is None:
continue # depends on [control=['if'], data=[]]
for column in cs.GTFS_REF.loc[cs.GTFS_REF['table'] == table, 'column']:
if column in f.columns and column.endswith('_id'):
try:
f[column] = f[column].str.strip().str.replace('\\s+', '_')
setattr(feed, table, f) # depends on [control=['try'], data=[]]
except AttributeError:
# Column is not of string type
continue # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['column']] # depends on [control=['for'], data=['table']]
return feed |
def parse_date(date_string):
"""
Parse the given string as datetime object. This parser supports in almost any string formats.
For relative times, like `10min ago`, this parser computes the actual time relative to current UTC time. This
allows time to always be in UTC if an explicit time zone is not provided.
Parameters
----------
date_string : str
String representing the date
Returns
-------
datetime.datetime
Parsed datetime object. None, if the string cannot be parsed.
"""
parser_settings = {
# Relative times like '10m ago' must subtract from the current UTC time. Without this setting, dateparser
# will use current local time as the base for subtraction, but falsely assume it is a UTC time. Therefore
# the time that dateparser returns will be a `datetime` object that did not have any timezone information.
# So be explicit to set the time to UTC.
"RELATIVE_BASE": datetime.datetime.utcnow()
}
return dateparser.parse(date_string, settings=parser_settings) | def function[parse_date, parameter[date_string]]:
constant[
Parse the given string as datetime object. This parser supports in almost any string formats.
For relative times, like `10min ago`, this parser computes the actual time relative to current UTC time. This
allows time to always be in UTC if an explicit time zone is not provided.
Parameters
----------
date_string : str
String representing the date
Returns
-------
datetime.datetime
Parsed datetime object. None, if the string cannot be parsed.
]
variable[parser_settings] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f61f60>], [<ast.Call object at 0x7da1b1f63fa0>]]
return[call[name[dateparser].parse, parameter[name[date_string]]]] | keyword[def] identifier[parse_date] ( identifier[date_string] ):
literal[string]
identifier[parser_settings] ={
literal[string] : identifier[datetime] . identifier[datetime] . identifier[utcnow] ()
}
keyword[return] identifier[dateparser] . identifier[parse] ( identifier[date_string] , identifier[settings] = identifier[parser_settings] ) | def parse_date(date_string):
"""
Parse the given string as datetime object. This parser supports in almost any string formats.
For relative times, like `10min ago`, this parser computes the actual time relative to current UTC time. This
allows time to always be in UTC if an explicit time zone is not provided.
Parameters
----------
date_string : str
String representing the date
Returns
-------
datetime.datetime
Parsed datetime object. None, if the string cannot be parsed.
"""
# Relative times like '10m ago' must subtract from the current UTC time. Without this setting, dateparser
# will use current local time as the base for subtraction, but falsely assume it is a UTC time. Therefore
# the time that dateparser returns will be a `datetime` object that did not have any timezone information.
# So be explicit to set the time to UTC.
parser_settings = {'RELATIVE_BASE': datetime.datetime.utcnow()}
return dateparser.parse(date_string, settings=parser_settings) |
def select_actions(root, action_space, max_episode_steps):
"""
Select actions from the tree
Normally we select the greedy action that has the highest reward
associated with that subtree. We have a small chance to select a
random action based on the exploration param and visit count of the
current node at each step.
We select actions for the longest possible episode, but normally these
will not all be used. They will instead be truncated to the length
of the actual episode and then used to update the tree.
"""
node = root
acts = []
steps = 0
while steps < max_episode_steps:
if node is None:
# we've fallen off the explored area of the tree, just select random actions
act = action_space.sample()
else:
epsilon = EXPLORATION_PARAM / np.log(node.visits + 2)
if random.random() < epsilon:
# random action
act = action_space.sample()
else:
# greedy action
act_value = {}
for act in range(action_space.n):
if node is not None and act in node.children:
act_value[act] = node.children[act].value
else:
act_value[act] = -np.inf
best_value = max(act_value.values())
best_acts = [
act for act, value in act_value.items() if value == best_value
]
act = random.choice(best_acts)
if act in node.children:
node = node.children[act]
else:
node = None
acts.append(act)
steps += 1
return acts | def function[select_actions, parameter[root, action_space, max_episode_steps]]:
constant[
Select actions from the tree
Normally we select the greedy action that has the highest reward
associated with that subtree. We have a small chance to select a
random action based on the exploration param and visit count of the
current node at each step.
We select actions for the longest possible episode, but normally these
will not all be used. They will instead be truncated to the length
of the actual episode and then used to update the tree.
]
variable[node] assign[=] name[root]
variable[acts] assign[=] list[[]]
variable[steps] assign[=] constant[0]
while compare[name[steps] less[<] name[max_episode_steps]] begin[:]
if compare[name[node] is constant[None]] begin[:]
variable[act] assign[=] call[name[action_space].sample, parameter[]]
call[name[acts].append, parameter[name[act]]]
<ast.AugAssign object at 0x7da2054a4bb0>
return[name[acts]] | keyword[def] identifier[select_actions] ( identifier[root] , identifier[action_space] , identifier[max_episode_steps] ):
literal[string]
identifier[node] = identifier[root]
identifier[acts] =[]
identifier[steps] = literal[int]
keyword[while] identifier[steps] < identifier[max_episode_steps] :
keyword[if] identifier[node] keyword[is] keyword[None] :
identifier[act] = identifier[action_space] . identifier[sample] ()
keyword[else] :
identifier[epsilon] = identifier[EXPLORATION_PARAM] / identifier[np] . identifier[log] ( identifier[node] . identifier[visits] + literal[int] )
keyword[if] identifier[random] . identifier[random] ()< identifier[epsilon] :
identifier[act] = identifier[action_space] . identifier[sample] ()
keyword[else] :
identifier[act_value] ={}
keyword[for] identifier[act] keyword[in] identifier[range] ( identifier[action_space] . identifier[n] ):
keyword[if] identifier[node] keyword[is] keyword[not] keyword[None] keyword[and] identifier[act] keyword[in] identifier[node] . identifier[children] :
identifier[act_value] [ identifier[act] ]= identifier[node] . identifier[children] [ identifier[act] ]. identifier[value]
keyword[else] :
identifier[act_value] [ identifier[act] ]=- identifier[np] . identifier[inf]
identifier[best_value] = identifier[max] ( identifier[act_value] . identifier[values] ())
identifier[best_acts] =[
identifier[act] keyword[for] identifier[act] , identifier[value] keyword[in] identifier[act_value] . identifier[items] () keyword[if] identifier[value] == identifier[best_value]
]
identifier[act] = identifier[random] . identifier[choice] ( identifier[best_acts] )
keyword[if] identifier[act] keyword[in] identifier[node] . identifier[children] :
identifier[node] = identifier[node] . identifier[children] [ identifier[act] ]
keyword[else] :
identifier[node] = keyword[None]
identifier[acts] . identifier[append] ( identifier[act] )
identifier[steps] += literal[int]
keyword[return] identifier[acts] | def select_actions(root, action_space, max_episode_steps):
"""
Select actions from the tree
Normally we select the greedy action that has the highest reward
associated with that subtree. We have a small chance to select a
random action based on the exploration param and visit count of the
current node at each step.
We select actions for the longest possible episode, but normally these
will not all be used. They will instead be truncated to the length
of the actual episode and then used to update the tree.
"""
node = root
acts = []
steps = 0
while steps < max_episode_steps:
if node is None:
# we've fallen off the explored area of the tree, just select random actions
act = action_space.sample() # depends on [control=['if'], data=[]]
else:
epsilon = EXPLORATION_PARAM / np.log(node.visits + 2)
if random.random() < epsilon:
# random action
act = action_space.sample() # depends on [control=['if'], data=[]]
else:
# greedy action
act_value = {}
for act in range(action_space.n):
if node is not None and act in node.children:
act_value[act] = node.children[act].value # depends on [control=['if'], data=[]]
else:
act_value[act] = -np.inf # depends on [control=['for'], data=['act']]
best_value = max(act_value.values())
best_acts = [act for (act, value) in act_value.items() if value == best_value]
act = random.choice(best_acts)
if act in node.children:
node = node.children[act] # depends on [control=['if'], data=['act']]
else:
node = None
acts.append(act)
steps += 1 # depends on [control=['while'], data=['steps']]
return acts |
def make_radial_kernel(psf, fn, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None,
normalize=False, klims=None, sparse=False):
"""Make a kernel for a general radially symmetric 2D function.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
fn : callable
Function that evaluates the kernel at a radial coordinate r.
sigma : float
68% containment radius in degrees.
"""
if klims is None:
egy = psf.energies
else:
egy = psf.energies[klims[0]:klims[1] + 1]
ang_dist = make_pixel_distance(npix, xpix, ypix) * cdelt
max_ang_dist = np.max(ang_dist) + cdelt
#dtheta = np.linspace(0.0, (np.max(ang_dist) * 1.05)**0.5, 200)**2.0
# z = create_kernel_function_lookup(psf, fn, sigma, egy,
# dtheta, psf_scale_fn)
shape = (len(egy), npix, npix)
k = np.zeros(shape)
r99 = psf.containment_angle(energies=egy, fraction=0.997)
r34 = psf.containment_angle(energies=egy, fraction=0.34)
rmin = np.maximum(r34 / 4., 0.01)
rmax = np.maximum(r99, 0.1)
if sigma is not None:
rmin = np.maximum(rmin, 0.5 * sigma)
rmax = np.maximum(rmax, 2.0 * r34 + 3.0 * sigma)
rmax = np.minimum(rmax, max_ang_dist)
for i in range(len(egy)):
rebin = min(int(np.ceil(cdelt / rmin[i])), 8)
if sparse:
dtheta = np.linspace(0.0, rmax[i]**0.5, 100)**2.0
else:
dtheta = np.linspace(0.0, max_ang_dist**0.5, 200)**2.0
z = eval_radial_kernel(psf, fn, sigma, i, dtheta, psf_scale_fn)
xdist = make_pixel_distance(npix * rebin,
xpix * rebin + (rebin - 1.0) / 2.,
ypix * rebin + (rebin - 1.0) / 2.)
xdist *= cdelt / float(rebin)
#x = val_to_pix(dtheta, np.ravel(xdist))
if sparse:
m = np.ravel(xdist) < rmax[i]
kk = np.zeros(xdist.size)
#kk[m] = map_coordinates(z, [x[m]], order=2, prefilter=False)
kk[m] = np.interp(np.ravel(xdist)[m], dtheta, z)
kk = kk.reshape(xdist.shape)
else:
kk = np.interp(np.ravel(xdist), dtheta, z).reshape(xdist.shape)
# kk = map_coordinates(z, [x], order=2,
# prefilter=False).reshape(xdist.shape)
if rebin > 1:
kk = sum_bins(kk, 0, rebin)
kk = sum_bins(kk, 1, rebin)
k[i] = kk / float(rebin)**2
k = k.reshape((len(egy),) + ang_dist.shape)
if normalize:
k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)
return k | def function[make_radial_kernel, parameter[psf, fn, sigma, npix, cdelt, xpix, ypix, psf_scale_fn, normalize, klims, sparse]]:
constant[Make a kernel for a general radially symmetric 2D function.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
fn : callable
Function that evaluates the kernel at a radial coordinate r.
sigma : float
68% containment radius in degrees.
]
if compare[name[klims] is constant[None]] begin[:]
variable[egy] assign[=] name[psf].energies
variable[ang_dist] assign[=] binary_operation[call[name[make_pixel_distance], parameter[name[npix], name[xpix], name[ypix]]] * name[cdelt]]
variable[max_ang_dist] assign[=] binary_operation[call[name[np].max, parameter[name[ang_dist]]] + name[cdelt]]
variable[shape] assign[=] tuple[[<ast.Call object at 0x7da207f009d0>, <ast.Name object at 0x7da207f03040>, <ast.Name object at 0x7da207f029e0>]]
variable[k] assign[=] call[name[np].zeros, parameter[name[shape]]]
variable[r99] assign[=] call[name[psf].containment_angle, parameter[]]
variable[r34] assign[=] call[name[psf].containment_angle, parameter[]]
variable[rmin] assign[=] call[name[np].maximum, parameter[binary_operation[name[r34] / constant[4.0]], constant[0.01]]]
variable[rmax] assign[=] call[name[np].maximum, parameter[name[r99], constant[0.1]]]
if compare[name[sigma] is_not constant[None]] begin[:]
variable[rmin] assign[=] call[name[np].maximum, parameter[name[rmin], binary_operation[constant[0.5] * name[sigma]]]]
variable[rmax] assign[=] call[name[np].maximum, parameter[name[rmax], binary_operation[binary_operation[constant[2.0] * name[r34]] + binary_operation[constant[3.0] * name[sigma]]]]]
variable[rmax] assign[=] call[name[np].minimum, parameter[name[rmax], name[max_ang_dist]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[egy]]]]]] begin[:]
variable[rebin] assign[=] call[name[min], parameter[call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[name[cdelt] / call[name[rmin]][name[i]]]]]]], constant[8]]]
if name[sparse] begin[:]
variable[dtheta] assign[=] binary_operation[call[name[np].linspace, parameter[constant[0.0], binary_operation[call[name[rmax]][name[i]] ** constant[0.5]], constant[100]]] ** constant[2.0]]
variable[z] assign[=] call[name[eval_radial_kernel], parameter[name[psf], name[fn], name[sigma], name[i], name[dtheta], name[psf_scale_fn]]]
variable[xdist] assign[=] call[name[make_pixel_distance], parameter[binary_operation[name[npix] * name[rebin]], binary_operation[binary_operation[name[xpix] * name[rebin]] + binary_operation[binary_operation[name[rebin] - constant[1.0]] / constant[2.0]]], binary_operation[binary_operation[name[ypix] * name[rebin]] + binary_operation[binary_operation[name[rebin] - constant[1.0]] / constant[2.0]]]]]
<ast.AugAssign object at 0x7da207f036a0>
if name[sparse] begin[:]
variable[m] assign[=] compare[call[name[np].ravel, parameter[name[xdist]]] less[<] call[name[rmax]][name[i]]]
variable[kk] assign[=] call[name[np].zeros, parameter[name[xdist].size]]
call[name[kk]][name[m]] assign[=] call[name[np].interp, parameter[call[call[name[np].ravel, parameter[name[xdist]]]][name[m]], name[dtheta], name[z]]]
variable[kk] assign[=] call[name[kk].reshape, parameter[name[xdist].shape]]
if compare[name[rebin] greater[>] constant[1]] begin[:]
variable[kk] assign[=] call[name[sum_bins], parameter[name[kk], constant[0], name[rebin]]]
variable[kk] assign[=] call[name[sum_bins], parameter[name[kk], constant[1], name[rebin]]]
call[name[k]][name[i]] assign[=] binary_operation[name[kk] / binary_operation[call[name[float], parameter[name[rebin]]] ** constant[2]]]
variable[k] assign[=] call[name[k].reshape, parameter[binary_operation[tuple[[<ast.Call object at 0x7da207f03dc0>]] + name[ang_dist].shape]]]
if name[normalize] begin[:]
<ast.AugAssign object at 0x7da18f00da50>
return[name[k]] | keyword[def] identifier[make_radial_kernel] ( identifier[psf] , identifier[fn] , identifier[sigma] , identifier[npix] , identifier[cdelt] , identifier[xpix] , identifier[ypix] , identifier[psf_scale_fn] = keyword[None] ,
identifier[normalize] = keyword[False] , identifier[klims] = keyword[None] , identifier[sparse] = keyword[False] ):
literal[string]
keyword[if] identifier[klims] keyword[is] keyword[None] :
identifier[egy] = identifier[psf] . identifier[energies]
keyword[else] :
identifier[egy] = identifier[psf] . identifier[energies] [ identifier[klims] [ literal[int] ]: identifier[klims] [ literal[int] ]+ literal[int] ]
identifier[ang_dist] = identifier[make_pixel_distance] ( identifier[npix] , identifier[xpix] , identifier[ypix] )* identifier[cdelt]
identifier[max_ang_dist] = identifier[np] . identifier[max] ( identifier[ang_dist] )+ identifier[cdelt]
identifier[shape] =( identifier[len] ( identifier[egy] ), identifier[npix] , identifier[npix] )
identifier[k] = identifier[np] . identifier[zeros] ( identifier[shape] )
identifier[r99] = identifier[psf] . identifier[containment_angle] ( identifier[energies] = identifier[egy] , identifier[fraction] = literal[int] )
identifier[r34] = identifier[psf] . identifier[containment_angle] ( identifier[energies] = identifier[egy] , identifier[fraction] = literal[int] )
identifier[rmin] = identifier[np] . identifier[maximum] ( identifier[r34] / literal[int] , literal[int] )
identifier[rmax] = identifier[np] . identifier[maximum] ( identifier[r99] , literal[int] )
keyword[if] identifier[sigma] keyword[is] keyword[not] keyword[None] :
identifier[rmin] = identifier[np] . identifier[maximum] ( identifier[rmin] , literal[int] * identifier[sigma] )
identifier[rmax] = identifier[np] . identifier[maximum] ( identifier[rmax] , literal[int] * identifier[r34] + literal[int] * identifier[sigma] )
identifier[rmax] = identifier[np] . identifier[minimum] ( identifier[rmax] , identifier[max_ang_dist] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[egy] )):
identifier[rebin] = identifier[min] ( identifier[int] ( identifier[np] . identifier[ceil] ( identifier[cdelt] / identifier[rmin] [ identifier[i] ])), literal[int] )
keyword[if] identifier[sparse] :
identifier[dtheta] = identifier[np] . identifier[linspace] ( literal[int] , identifier[rmax] [ identifier[i] ]** literal[int] , literal[int] )** literal[int]
keyword[else] :
identifier[dtheta] = identifier[np] . identifier[linspace] ( literal[int] , identifier[max_ang_dist] ** literal[int] , literal[int] )** literal[int]
identifier[z] = identifier[eval_radial_kernel] ( identifier[psf] , identifier[fn] , identifier[sigma] , identifier[i] , identifier[dtheta] , identifier[psf_scale_fn] )
identifier[xdist] = identifier[make_pixel_distance] ( identifier[npix] * identifier[rebin] ,
identifier[xpix] * identifier[rebin] +( identifier[rebin] - literal[int] )/ literal[int] ,
identifier[ypix] * identifier[rebin] +( identifier[rebin] - literal[int] )/ literal[int] )
identifier[xdist] *= identifier[cdelt] / identifier[float] ( identifier[rebin] )
keyword[if] identifier[sparse] :
identifier[m] = identifier[np] . identifier[ravel] ( identifier[xdist] )< identifier[rmax] [ identifier[i] ]
identifier[kk] = identifier[np] . identifier[zeros] ( identifier[xdist] . identifier[size] )
identifier[kk] [ identifier[m] ]= identifier[np] . identifier[interp] ( identifier[np] . identifier[ravel] ( identifier[xdist] )[ identifier[m] ], identifier[dtheta] , identifier[z] )
identifier[kk] = identifier[kk] . identifier[reshape] ( identifier[xdist] . identifier[shape] )
keyword[else] :
identifier[kk] = identifier[np] . identifier[interp] ( identifier[np] . identifier[ravel] ( identifier[xdist] ), identifier[dtheta] , identifier[z] ). identifier[reshape] ( identifier[xdist] . identifier[shape] )
keyword[if] identifier[rebin] > literal[int] :
identifier[kk] = identifier[sum_bins] ( identifier[kk] , literal[int] , identifier[rebin] )
identifier[kk] = identifier[sum_bins] ( identifier[kk] , literal[int] , identifier[rebin] )
identifier[k] [ identifier[i] ]= identifier[kk] / identifier[float] ( identifier[rebin] )** literal[int]
identifier[k] = identifier[k] . identifier[reshape] (( identifier[len] ( identifier[egy] ),)+ identifier[ang_dist] . identifier[shape] )
keyword[if] identifier[normalize] :
identifier[k] /=( identifier[np] . identifier[sum] ( identifier[k] , identifier[axis] = literal[int] )[ identifier[np] . identifier[newaxis] ,...]* identifier[np] . identifier[radians] ( identifier[cdelt] )** literal[int] )
keyword[return] identifier[k] | def make_radial_kernel(psf, fn, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False, klims=None, sparse=False):
"""Make a kernel for a general radially symmetric 2D function.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
fn : callable
Function that evaluates the kernel at a radial coordinate r.
sigma : float
68% containment radius in degrees.
"""
if klims is None:
egy = psf.energies # depends on [control=['if'], data=[]]
else:
egy = psf.energies[klims[0]:klims[1] + 1]
ang_dist = make_pixel_distance(npix, xpix, ypix) * cdelt
max_ang_dist = np.max(ang_dist) + cdelt
#dtheta = np.linspace(0.0, (np.max(ang_dist) * 1.05)**0.5, 200)**2.0
# z = create_kernel_function_lookup(psf, fn, sigma, egy,
# dtheta, psf_scale_fn)
shape = (len(egy), npix, npix)
k = np.zeros(shape)
r99 = psf.containment_angle(energies=egy, fraction=0.997)
r34 = psf.containment_angle(energies=egy, fraction=0.34)
rmin = np.maximum(r34 / 4.0, 0.01)
rmax = np.maximum(r99, 0.1)
if sigma is not None:
rmin = np.maximum(rmin, 0.5 * sigma)
rmax = np.maximum(rmax, 2.0 * r34 + 3.0 * sigma) # depends on [control=['if'], data=['sigma']]
rmax = np.minimum(rmax, max_ang_dist)
for i in range(len(egy)):
rebin = min(int(np.ceil(cdelt / rmin[i])), 8)
if sparse:
dtheta = np.linspace(0.0, rmax[i] ** 0.5, 100) ** 2.0 # depends on [control=['if'], data=[]]
else:
dtheta = np.linspace(0.0, max_ang_dist ** 0.5, 200) ** 2.0
z = eval_radial_kernel(psf, fn, sigma, i, dtheta, psf_scale_fn)
xdist = make_pixel_distance(npix * rebin, xpix * rebin + (rebin - 1.0) / 2.0, ypix * rebin + (rebin - 1.0) / 2.0)
xdist *= cdelt / float(rebin)
#x = val_to_pix(dtheta, np.ravel(xdist))
if sparse:
m = np.ravel(xdist) < rmax[i]
kk = np.zeros(xdist.size)
#kk[m] = map_coordinates(z, [x[m]], order=2, prefilter=False)
kk[m] = np.interp(np.ravel(xdist)[m], dtheta, z)
kk = kk.reshape(xdist.shape) # depends on [control=['if'], data=[]]
else:
kk = np.interp(np.ravel(xdist), dtheta, z).reshape(xdist.shape)
# kk = map_coordinates(z, [x], order=2,
# prefilter=False).reshape(xdist.shape)
if rebin > 1:
kk = sum_bins(kk, 0, rebin)
kk = sum_bins(kk, 1, rebin) # depends on [control=['if'], data=['rebin']]
k[i] = kk / float(rebin) ** 2 # depends on [control=['for'], data=['i']]
k = k.reshape((len(egy),) + ang_dist.shape)
if normalize:
k /= np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2 # depends on [control=['if'], data=[]]
return k |
def train_return_grad(self, input, grad):
"""
Performs a forward pass from the input batch, followed by a backward
pass using the provided gradient (in place of a loss function). Returns
a MpsFloatArray representing the output (final gradient) of the backward
pass. Calling asnumpy() on this value will wait for the batch to finish
and yield the output as a numpy array.
"""
assert self._mode == MpsGraphMode.TrainReturnGrad
assert input.shape == self._ishape
assert grad.shape == self._oshape
input_array = MpsFloatArray(input)
grad_array = MpsFloatArray(grad)
result_handle = _ctypes.c_void_p()
status_code = self._LIB.TCMPSTrainGraph(
self.handle, input_array.handle, grad_array.handle,
_ctypes.byref(result_handle))
assert status_code == 0, "Error calling TCMPSTrainReturnGradGraph"
assert result_handle, "TCMPSTrainReturnGradGraph unexpectedly returned NULL pointer"
result = MpsFloatArray(result_handle)
assert result.shape() == self._ishape
return result | def function[train_return_grad, parameter[self, input, grad]]:
constant[
Performs a forward pass from the input batch, followed by a backward
pass using the provided gradient (in place of a loss function). Returns
a MpsFloatArray representing the output (final gradient) of the backward
pass. Calling asnumpy() on this value will wait for the batch to finish
and yield the output as a numpy array.
]
assert[compare[name[self]._mode equal[==] name[MpsGraphMode].TrainReturnGrad]]
assert[compare[name[input].shape equal[==] name[self]._ishape]]
assert[compare[name[grad].shape equal[==] name[self]._oshape]]
variable[input_array] assign[=] call[name[MpsFloatArray], parameter[name[input]]]
variable[grad_array] assign[=] call[name[MpsFloatArray], parameter[name[grad]]]
variable[result_handle] assign[=] call[name[_ctypes].c_void_p, parameter[]]
variable[status_code] assign[=] call[name[self]._LIB.TCMPSTrainGraph, parameter[name[self].handle, name[input_array].handle, name[grad_array].handle, call[name[_ctypes].byref, parameter[name[result_handle]]]]]
assert[compare[name[status_code] equal[==] constant[0]]]
assert[name[result_handle]]
variable[result] assign[=] call[name[MpsFloatArray], parameter[name[result_handle]]]
assert[compare[call[name[result].shape, parameter[]] equal[==] name[self]._ishape]]
return[name[result]] | keyword[def] identifier[train_return_grad] ( identifier[self] , identifier[input] , identifier[grad] ):
literal[string]
keyword[assert] identifier[self] . identifier[_mode] == identifier[MpsGraphMode] . identifier[TrainReturnGrad]
keyword[assert] identifier[input] . identifier[shape] == identifier[self] . identifier[_ishape]
keyword[assert] identifier[grad] . identifier[shape] == identifier[self] . identifier[_oshape]
identifier[input_array] = identifier[MpsFloatArray] ( identifier[input] )
identifier[grad_array] = identifier[MpsFloatArray] ( identifier[grad] )
identifier[result_handle] = identifier[_ctypes] . identifier[c_void_p] ()
identifier[status_code] = identifier[self] . identifier[_LIB] . identifier[TCMPSTrainGraph] (
identifier[self] . identifier[handle] , identifier[input_array] . identifier[handle] , identifier[grad_array] . identifier[handle] ,
identifier[_ctypes] . identifier[byref] ( identifier[result_handle] ))
keyword[assert] identifier[status_code] == literal[int] , literal[string]
keyword[assert] identifier[result_handle] , literal[string]
identifier[result] = identifier[MpsFloatArray] ( identifier[result_handle] )
keyword[assert] identifier[result] . identifier[shape] ()== identifier[self] . identifier[_ishape]
keyword[return] identifier[result] | def train_return_grad(self, input, grad):
"""
Performs a forward pass from the input batch, followed by a backward
pass using the provided gradient (in place of a loss function). Returns
a MpsFloatArray representing the output (final gradient) of the backward
pass. Calling asnumpy() on this value will wait for the batch to finish
and yield the output as a numpy array.
"""
assert self._mode == MpsGraphMode.TrainReturnGrad
assert input.shape == self._ishape
assert grad.shape == self._oshape
input_array = MpsFloatArray(input)
grad_array = MpsFloatArray(grad)
result_handle = _ctypes.c_void_p()
status_code = self._LIB.TCMPSTrainGraph(self.handle, input_array.handle, grad_array.handle, _ctypes.byref(result_handle))
assert status_code == 0, 'Error calling TCMPSTrainReturnGradGraph'
assert result_handle, 'TCMPSTrainReturnGradGraph unexpectedly returned NULL pointer'
result = MpsFloatArray(result_handle)
assert result.shape() == self._ishape
return result |
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
from compat import six, force_text
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to)
if isinstance(to, six.string_types):
# Handle relative URLs
if any(to.startswith(path) for path in ('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return urlresolvers.reverse(to, args=args, kwargs=kwargs)
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to | def function[resolve_url, parameter[to]]:
constant[
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
]
from relative_module[compat] import module[six], module[force_text]
if call[name[hasattr], parameter[name[to], constant[get_absolute_url]]] begin[:]
return[call[name[to].get_absolute_url, parameter[]]]
if call[name[isinstance], parameter[name[to], name[Promise]]] begin[:]
variable[to] assign[=] call[name[force_text], parameter[name[to]]]
if call[name[isinstance], parameter[name[to], name[six].string_types]] begin[:]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da20c9916c0>]] begin[:]
return[name[to]]
<ast.Try object at 0x7da20c992980>
return[name[to]] | keyword[def] identifier[resolve_url] ( identifier[to] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[compat] keyword[import] identifier[six] , identifier[force_text]
keyword[if] identifier[hasattr] ( identifier[to] , literal[string] ):
keyword[return] identifier[to] . identifier[get_absolute_url] ()
keyword[if] identifier[isinstance] ( identifier[to] , identifier[Promise] ):
identifier[to] = identifier[force_text] ( identifier[to] )
keyword[if] identifier[isinstance] ( identifier[to] , identifier[six] . identifier[string_types] ):
keyword[if] identifier[any] ( identifier[to] . identifier[startswith] ( identifier[path] ) keyword[for] identifier[path] keyword[in] ( literal[string] , literal[string] )):
keyword[return] identifier[to]
keyword[try] :
keyword[return] identifier[urlresolvers] . identifier[reverse] ( identifier[to] , identifier[args] = identifier[args] , identifier[kwargs] = identifier[kwargs] )
keyword[except] identifier[urlresolvers] . identifier[NoReverseMatch] :
keyword[if] identifier[callable] ( identifier[to] ):
keyword[raise]
keyword[if] literal[string] keyword[not] keyword[in] identifier[to] keyword[and] literal[string] keyword[not] keyword[in] identifier[to] :
keyword[raise]
keyword[return] identifier[to] | def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
from compat import six, force_text
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url() # depends on [control=['if'], data=[]]
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to) # depends on [control=['if'], data=[]]
if isinstance(to, six.string_types):
# Handle relative URLs
if any((to.startswith(path) for path in ('./', '../'))):
return to # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Next try a reverse URL resolution.
try:
return urlresolvers.reverse(to, args=args, kwargs=kwargs) # depends on [control=['try'], data=[]]
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise # depends on [control=['if'], data=[]]
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
# Finally, fall back and assume it's a URL
return to |
def typed_fs_cache(app_name, *args, **kwargs):
""" Convenience method to simplify declaration of multiple @fs_cache
e.g.,
>>> my_fs_cache = typed_fs_cache('myapp_name', expires=86400 * 30)
>>> @my_fs_cache('first_method')
... def some_method(*args, **kwargs):
... pass
>>> @my_fs_cache('second_method')
... def some_other_method(*args, **kwargs):
... pass
"""
return functools.partial(fs_cache, app_name, *args, **kwargs) | def function[typed_fs_cache, parameter[app_name]]:
constant[ Convenience method to simplify declaration of multiple @fs_cache
e.g.,
>>> my_fs_cache = typed_fs_cache('myapp_name', expires=86400 * 30)
>>> @my_fs_cache('first_method')
... def some_method(*args, **kwargs):
... pass
>>> @my_fs_cache('second_method')
... def some_other_method(*args, **kwargs):
... pass
]
return[call[name[functools].partial, parameter[name[fs_cache], name[app_name], <ast.Starred object at 0x7da2041d8be0>]]] | keyword[def] identifier[typed_fs_cache] ( identifier[app_name] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[functools] . identifier[partial] ( identifier[fs_cache] , identifier[app_name] ,* identifier[args] ,** identifier[kwargs] ) | def typed_fs_cache(app_name, *args, **kwargs):
""" Convenience method to simplify declaration of multiple @fs_cache
e.g.,
>>> my_fs_cache = typed_fs_cache('myapp_name', expires=86400 * 30)
>>> @my_fs_cache('first_method')
... def some_method(*args, **kwargs):
... pass
>>> @my_fs_cache('second_method')
... def some_other_method(*args, **kwargs):
... pass
"""
return functools.partial(fs_cache, app_name, *args, **kwargs) |
def commands(cls):
"""Returns a list of all methods that start with ``cmd_``."""
cmds = [cmd[4:] for cmd in dir(cls) if cmd.startswith('cmd_')]
return cmds | def function[commands, parameter[cls]]:
constant[Returns a list of all methods that start with ``cmd_``.]
variable[cmds] assign[=] <ast.ListComp object at 0x7da1b10e4790>
return[name[cmds]] | keyword[def] identifier[commands] ( identifier[cls] ):
literal[string]
identifier[cmds] =[ identifier[cmd] [ literal[int] :] keyword[for] identifier[cmd] keyword[in] identifier[dir] ( identifier[cls] ) keyword[if] identifier[cmd] . identifier[startswith] ( literal[string] )]
keyword[return] identifier[cmds] | def commands(cls):
"""Returns a list of all methods that start with ``cmd_``."""
cmds = [cmd[4:] for cmd in dir(cls) if cmd.startswith('cmd_')]
return cmds |
def set_regressor_interface_params(spec, features, output_features):
""" Common utilities to set the regressor interface params.
"""
if output_features is None:
output_features = [("predicted_class", datatypes.Double())]
else:
output_features = _fm.process_or_validate_features(output_features, 1)
if len(output_features) != 1:
raise ValueError("Provided output features for a regressor must be "
"one Double feature.")
if output_features[0][1] != datatypes.Double():
raise ValueError("Output type of a regressor must be a Double.")
prediction_name = output_features[0][0]
spec.description.predictedFeatureName = prediction_name
# Normalize the features list.
features = _fm.process_or_validate_features(features)
# add input and output features
for cur_input_name, feature_type in features:
input_ = spec.description.input.add()
input_.name = cur_input_name
datatypes._set_datatype(input_.type, feature_type)
output_ = spec.description.output.add()
output_.name = prediction_name
datatypes._set_datatype(output_.type, 'Double')
return spec | def function[set_regressor_interface_params, parameter[spec, features, output_features]]:
constant[ Common utilities to set the regressor interface params.
]
if compare[name[output_features] is constant[None]] begin[:]
variable[output_features] assign[=] list[[<ast.Tuple object at 0x7da18bc72410>]]
if compare[call[name[len], parameter[name[output_features]]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da18bc70ca0>
if compare[call[call[name[output_features]][constant[0]]][constant[1]] not_equal[!=] call[name[datatypes].Double, parameter[]]] begin[:]
<ast.Raise object at 0x7da18bc71c30>
variable[prediction_name] assign[=] call[call[name[output_features]][constant[0]]][constant[0]]
name[spec].description.predictedFeatureName assign[=] name[prediction_name]
variable[features] assign[=] call[name[_fm].process_or_validate_features, parameter[name[features]]]
for taget[tuple[[<ast.Name object at 0x7da18bc72140>, <ast.Name object at 0x7da18bc73190>]]] in starred[name[features]] begin[:]
variable[input_] assign[=] call[name[spec].description.input.add, parameter[]]
name[input_].name assign[=] name[cur_input_name]
call[name[datatypes]._set_datatype, parameter[name[input_].type, name[feature_type]]]
variable[output_] assign[=] call[name[spec].description.output.add, parameter[]]
name[output_].name assign[=] name[prediction_name]
call[name[datatypes]._set_datatype, parameter[name[output_].type, constant[Double]]]
return[name[spec]] | keyword[def] identifier[set_regressor_interface_params] ( identifier[spec] , identifier[features] , identifier[output_features] ):
literal[string]
keyword[if] identifier[output_features] keyword[is] keyword[None] :
identifier[output_features] =[( literal[string] , identifier[datatypes] . identifier[Double] ())]
keyword[else] :
identifier[output_features] = identifier[_fm] . identifier[process_or_validate_features] ( identifier[output_features] , literal[int] )
keyword[if] identifier[len] ( identifier[output_features] )!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] identifier[output_features] [ literal[int] ][ literal[int] ]!= identifier[datatypes] . identifier[Double] ():
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[prediction_name] = identifier[output_features] [ literal[int] ][ literal[int] ]
identifier[spec] . identifier[description] . identifier[predictedFeatureName] = identifier[prediction_name]
identifier[features] = identifier[_fm] . identifier[process_or_validate_features] ( identifier[features] )
keyword[for] identifier[cur_input_name] , identifier[feature_type] keyword[in] identifier[features] :
identifier[input_] = identifier[spec] . identifier[description] . identifier[input] . identifier[add] ()
identifier[input_] . identifier[name] = identifier[cur_input_name]
identifier[datatypes] . identifier[_set_datatype] ( identifier[input_] . identifier[type] , identifier[feature_type] )
identifier[output_] = identifier[spec] . identifier[description] . identifier[output] . identifier[add] ()
identifier[output_] . identifier[name] = identifier[prediction_name]
identifier[datatypes] . identifier[_set_datatype] ( identifier[output_] . identifier[type] , literal[string] )
keyword[return] identifier[spec] | def set_regressor_interface_params(spec, features, output_features):
""" Common utilities to set the regressor interface params.
"""
if output_features is None:
output_features = [('predicted_class', datatypes.Double())] # depends on [control=['if'], data=['output_features']]
else:
output_features = _fm.process_or_validate_features(output_features, 1)
if len(output_features) != 1:
raise ValueError('Provided output features for a regressor must be one Double feature.') # depends on [control=['if'], data=[]]
if output_features[0][1] != datatypes.Double():
raise ValueError('Output type of a regressor must be a Double.') # depends on [control=['if'], data=[]]
prediction_name = output_features[0][0]
spec.description.predictedFeatureName = prediction_name
# Normalize the features list.
features = _fm.process_or_validate_features(features)
# add input and output features
for (cur_input_name, feature_type) in features:
input_ = spec.description.input.add()
input_.name = cur_input_name
datatypes._set_datatype(input_.type, feature_type) # depends on [control=['for'], data=[]]
output_ = spec.description.output.add()
output_.name = prediction_name
datatypes._set_datatype(output_.type, 'Double')
return spec |
def _post_activity(self, activity, unserialize=True):
""" Posts a activity to feed """
# I think we always want to post to feed
feed_url = "{proto}://{server}/api/user/{username}/feed".format(
proto=self._pump.protocol,
server=self._pump.client.server,
username=self._pump.client.nickname
)
data = self._pump.request(feed_url, method="POST", data=activity)
if not data:
return False
if "error" in data:
raise PumpException(data["error"])
if unserialize:
if "target" in data:
# we probably want to unserialize target if it's there
# true for collection.{add,remove}
self.unserialize(data["target"])
else:
# copy activity attributes into object
if "author" not in data["object"]:
data["object"]["author"] = data["actor"]
for key in ["to", "cc", "bto", "bcc"]:
if key not in data["object"] and key in data:
data["object"][key] = data[key]
self.unserialize(data["object"])
return True | def function[_post_activity, parameter[self, activity, unserialize]]:
constant[ Posts a activity to feed ]
variable[feed_url] assign[=] call[constant[{proto}://{server}/api/user/{username}/feed].format, parameter[]]
variable[data] assign[=] call[name[self]._pump.request, parameter[name[feed_url]]]
if <ast.UnaryOp object at 0x7da1b2626e30> begin[:]
return[constant[False]]
if compare[constant[error] in name[data]] begin[:]
<ast.Raise object at 0x7da1b26276d0>
if name[unserialize] begin[:]
if compare[constant[target] in name[data]] begin[:]
call[name[self].unserialize, parameter[call[name[data]][constant[target]]]]
return[constant[True]] | keyword[def] identifier[_post_activity] ( identifier[self] , identifier[activity] , identifier[unserialize] = keyword[True] ):
literal[string]
identifier[feed_url] = literal[string] . identifier[format] (
identifier[proto] = identifier[self] . identifier[_pump] . identifier[protocol] ,
identifier[server] = identifier[self] . identifier[_pump] . identifier[client] . identifier[server] ,
identifier[username] = identifier[self] . identifier[_pump] . identifier[client] . identifier[nickname]
)
identifier[data] = identifier[self] . identifier[_pump] . identifier[request] ( identifier[feed_url] , identifier[method] = literal[string] , identifier[data] = identifier[activity] )
keyword[if] keyword[not] identifier[data] :
keyword[return] keyword[False]
keyword[if] literal[string] keyword[in] identifier[data] :
keyword[raise] identifier[PumpException] ( identifier[data] [ literal[string] ])
keyword[if] identifier[unserialize] :
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[self] . identifier[unserialize] ( identifier[data] [ literal[string] ])
keyword[else] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[data] [ literal[string] ]:
identifier[data] [ literal[string] ][ literal[string] ]= identifier[data] [ literal[string] ]
keyword[for] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[key] keyword[not] keyword[in] identifier[data] [ literal[string] ] keyword[and] identifier[key] keyword[in] identifier[data] :
identifier[data] [ literal[string] ][ identifier[key] ]= identifier[data] [ identifier[key] ]
identifier[self] . identifier[unserialize] ( identifier[data] [ literal[string] ])
keyword[return] keyword[True] | def _post_activity(self, activity, unserialize=True):
""" Posts a activity to feed """
# I think we always want to post to feed
feed_url = '{proto}://{server}/api/user/{username}/feed'.format(proto=self._pump.protocol, server=self._pump.client.server, username=self._pump.client.nickname)
data = self._pump.request(feed_url, method='POST', data=activity)
if not data:
return False # depends on [control=['if'], data=[]]
if 'error' in data:
raise PumpException(data['error']) # depends on [control=['if'], data=['data']]
if unserialize:
if 'target' in data:
# we probably want to unserialize target if it's there
# true for collection.{add,remove}
self.unserialize(data['target']) # depends on [control=['if'], data=['data']]
else:
# copy activity attributes into object
if 'author' not in data['object']:
data['object']['author'] = data['actor'] # depends on [control=['if'], data=[]]
for key in ['to', 'cc', 'bto', 'bcc']:
if key not in data['object'] and key in data:
data['object'][key] = data[key] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
self.unserialize(data['object']) # depends on [control=['if'], data=[]]
return True |
def generate_form(model, only=None, meta=None):
"""
Generate WTForm based on SQLAlchemy table
:param model: SQLAlchemy sa.Table
:param only: list or set of columns that should be used in final form
:param meta: Meta class with settings for form
:return: WTForm object
"""
fields = OrderedDict()
if meta:
fields['Meta'] = meta
for name, column in model.__dict__['columns'].items():
if only:
if not name in only:
continue
if not isinstance(column, Column):
continue
fields[name] = TYPE_MAP[column.type.__class__](
name, render_kw={'placeholder': name}
)
form = type(
'Add{}Form'.format(model.name.capitalize()),
(Form,),
fields
)
return form | def function[generate_form, parameter[model, only, meta]]:
constant[
Generate WTForm based on SQLAlchemy table
:param model: SQLAlchemy sa.Table
:param only: list or set of columns that should be used in final form
:param meta: Meta class with settings for form
:return: WTForm object
]
variable[fields] assign[=] call[name[OrderedDict], parameter[]]
if name[meta] begin[:]
call[name[fields]][constant[Meta]] assign[=] name[meta]
for taget[tuple[[<ast.Name object at 0x7da1b0a437c0>, <ast.Name object at 0x7da1b0a41870>]]] in starred[call[call[name[model].__dict__][constant[columns]].items, parameter[]]] begin[:]
if name[only] begin[:]
if <ast.UnaryOp object at 0x7da1b0a43910> begin[:]
continue
if <ast.UnaryOp object at 0x7da1b0a401c0> begin[:]
continue
call[name[fields]][name[name]] assign[=] call[call[name[TYPE_MAP]][name[column].type.__class__], parameter[name[name]]]
variable[form] assign[=] call[name[type], parameter[call[constant[Add{}Form].format, parameter[call[name[model].name.capitalize, parameter[]]]], tuple[[<ast.Name object at 0x7da1b0a43250>]], name[fields]]]
return[name[form]] | keyword[def] identifier[generate_form] ( identifier[model] , identifier[only] = keyword[None] , identifier[meta] = keyword[None] ):
literal[string]
identifier[fields] = identifier[OrderedDict] ()
keyword[if] identifier[meta] :
identifier[fields] [ literal[string] ]= identifier[meta]
keyword[for] identifier[name] , identifier[column] keyword[in] identifier[model] . identifier[__dict__] [ literal[string] ]. identifier[items] ():
keyword[if] identifier[only] :
keyword[if] keyword[not] identifier[name] keyword[in] identifier[only] :
keyword[continue]
keyword[if] keyword[not] identifier[isinstance] ( identifier[column] , identifier[Column] ):
keyword[continue]
identifier[fields] [ identifier[name] ]= identifier[TYPE_MAP] [ identifier[column] . identifier[type] . identifier[__class__] ](
identifier[name] , identifier[render_kw] ={ literal[string] : identifier[name] }
)
identifier[form] = identifier[type] (
literal[string] . identifier[format] ( identifier[model] . identifier[name] . identifier[capitalize] ()),
( identifier[Form] ,),
identifier[fields]
)
keyword[return] identifier[form] | def generate_form(model, only=None, meta=None):
"""
Generate WTForm based on SQLAlchemy table
:param model: SQLAlchemy sa.Table
:param only: list or set of columns that should be used in final form
:param meta: Meta class with settings for form
:return: WTForm object
"""
fields = OrderedDict()
if meta:
fields['Meta'] = meta # depends on [control=['if'], data=[]]
for (name, column) in model.__dict__['columns'].items():
if only:
if not name in only:
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not isinstance(column, Column):
continue # depends on [control=['if'], data=[]]
fields[name] = TYPE_MAP[column.type.__class__](name, render_kw={'placeholder': name}) # depends on [control=['for'], data=[]]
form = type('Add{}Form'.format(model.name.capitalize()), (Form,), fields)
return form |
def _remove_unexpected_query_parameters(schema, req):
"""Remove unexpected properties from the req.GET."""
additional_properties = schema.get('addtionalProperties', True)
if additional_properties:
pattern_regexes = []
patterns = schema.get('patternProperties', None)
if patterns:
for regex in patterns:
pattern_regexes.append(re.compile(regex))
for param in set(req.GET.keys()):
if param not in schema['properties'].keys():
if not (list(regex for regex in pattern_regexes if
regex.match(param))):
del req.GET[param] | def function[_remove_unexpected_query_parameters, parameter[schema, req]]:
constant[Remove unexpected properties from the req.GET.]
variable[additional_properties] assign[=] call[name[schema].get, parameter[constant[addtionalProperties], constant[True]]]
if name[additional_properties] begin[:]
variable[pattern_regexes] assign[=] list[[]]
variable[patterns] assign[=] call[name[schema].get, parameter[constant[patternProperties], constant[None]]]
if name[patterns] begin[:]
for taget[name[regex]] in starred[name[patterns]] begin[:]
call[name[pattern_regexes].append, parameter[call[name[re].compile, parameter[name[regex]]]]]
for taget[name[param]] in starred[call[name[set], parameter[call[name[req].GET.keys, parameter[]]]]] begin[:]
if compare[name[param] <ast.NotIn object at 0x7da2590d7190> call[call[name[schema]][constant[properties]].keys, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da20e954d00> begin[:]
<ast.Delete object at 0x7da20e955840> | keyword[def] identifier[_remove_unexpected_query_parameters] ( identifier[schema] , identifier[req] ):
literal[string]
identifier[additional_properties] = identifier[schema] . identifier[get] ( literal[string] , keyword[True] )
keyword[if] identifier[additional_properties] :
identifier[pattern_regexes] =[]
identifier[patterns] = identifier[schema] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[patterns] :
keyword[for] identifier[regex] keyword[in] identifier[patterns] :
identifier[pattern_regexes] . identifier[append] ( identifier[re] . identifier[compile] ( identifier[regex] ))
keyword[for] identifier[param] keyword[in] identifier[set] ( identifier[req] . identifier[GET] . identifier[keys] ()):
keyword[if] identifier[param] keyword[not] keyword[in] identifier[schema] [ literal[string] ]. identifier[keys] ():
keyword[if] keyword[not] ( identifier[list] ( identifier[regex] keyword[for] identifier[regex] keyword[in] identifier[pattern_regexes] keyword[if]
identifier[regex] . identifier[match] ( identifier[param] ))):
keyword[del] identifier[req] . identifier[GET] [ identifier[param] ] | def _remove_unexpected_query_parameters(schema, req):
"""Remove unexpected properties from the req.GET."""
additional_properties = schema.get('addtionalProperties', True)
if additional_properties:
pattern_regexes = []
patterns = schema.get('patternProperties', None)
if patterns:
for regex in patterns:
pattern_regexes.append(re.compile(regex)) # depends on [control=['for'], data=['regex']] # depends on [control=['if'], data=[]]
for param in set(req.GET.keys()):
if param not in schema['properties'].keys():
if not list((regex for regex in pattern_regexes if regex.match(param))):
del req.GET[param] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['param']] # depends on [control=['for'], data=['param']] # depends on [control=['if'], data=[]] |
def context_value(name):
"""
Returns an effect that drops the current value, and replaces it with
the value from the context with the given name.
"""
def context_value(_value, context, **_params):
return defer.succeed(context[name])
return context_value | def function[context_value, parameter[name]]:
constant[
Returns an effect that drops the current value, and replaces it with
the value from the context with the given name.
]
def function[context_value, parameter[_value, context]]:
return[call[name[defer].succeed, parameter[call[name[context]][name[name]]]]]
return[name[context_value]] | keyword[def] identifier[context_value] ( identifier[name] ):
literal[string]
keyword[def] identifier[context_value] ( identifier[_value] , identifier[context] ,** identifier[_params] ):
keyword[return] identifier[defer] . identifier[succeed] ( identifier[context] [ identifier[name] ])
keyword[return] identifier[context_value] | def context_value(name):
"""
Returns an effect that drops the current value, and replaces it with
the value from the context with the given name.
"""
def context_value(_value, context, **_params):
return defer.succeed(context[name])
return context_value |
def cpr(self) -> str:
"""Generate a random CPR number (Central Person Registry).
:return: CPR number.
:Example:
0105865167
"""
day = '{:02d}'.format(self.random.randint(1, 31))
month = '{:02d}'.format(self.random.randint(1, 12))
year = '{:02d}'.format(self.random.randint(0, 99))
serial_number = '{:04d}'.format(self.random.randint(0, 9999))
cpr_nr = '{}{}{}{}'.format(day, month, year, serial_number)
return cpr_nr | def function[cpr, parameter[self]]:
constant[Generate a random CPR number (Central Person Registry).
:return: CPR number.
:Example:
0105865167
]
variable[day] assign[=] call[constant[{:02d}].format, parameter[call[name[self].random.randint, parameter[constant[1], constant[31]]]]]
variable[month] assign[=] call[constant[{:02d}].format, parameter[call[name[self].random.randint, parameter[constant[1], constant[12]]]]]
variable[year] assign[=] call[constant[{:02d}].format, parameter[call[name[self].random.randint, parameter[constant[0], constant[99]]]]]
variable[serial_number] assign[=] call[constant[{:04d}].format, parameter[call[name[self].random.randint, parameter[constant[0], constant[9999]]]]]
variable[cpr_nr] assign[=] call[constant[{}{}{}{}].format, parameter[name[day], name[month], name[year], name[serial_number]]]
return[name[cpr_nr]] | keyword[def] identifier[cpr] ( identifier[self] )-> identifier[str] :
literal[string]
identifier[day] = literal[string] . identifier[format] ( identifier[self] . identifier[random] . identifier[randint] ( literal[int] , literal[int] ))
identifier[month] = literal[string] . identifier[format] ( identifier[self] . identifier[random] . identifier[randint] ( literal[int] , literal[int] ))
identifier[year] = literal[string] . identifier[format] ( identifier[self] . identifier[random] . identifier[randint] ( literal[int] , literal[int] ))
identifier[serial_number] = literal[string] . identifier[format] ( identifier[self] . identifier[random] . identifier[randint] ( literal[int] , literal[int] ))
identifier[cpr_nr] = literal[string] . identifier[format] ( identifier[day] , identifier[month] , identifier[year] , identifier[serial_number] )
keyword[return] identifier[cpr_nr] | def cpr(self) -> str:
"""Generate a random CPR number (Central Person Registry).
:return: CPR number.
:Example:
0105865167
"""
day = '{:02d}'.format(self.random.randint(1, 31))
month = '{:02d}'.format(self.random.randint(1, 12))
year = '{:02d}'.format(self.random.randint(0, 99))
serial_number = '{:04d}'.format(self.random.randint(0, 9999))
cpr_nr = '{}{}{}{}'.format(day, month, year, serial_number)
return cpr_nr |
def rename(args):
"""
%prog rename in.gff3 switch.ids > reindexed.gff3
Change the IDs within the gff3.
"""
p = OptionParser(rename.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ingff3, switch = args
switch = DictFile(switch)
gff = Gff(ingff3)
for g in gff:
id, = g.attributes["ID"]
newname = switch.get(id, id)
g.attributes["ID"] = [newname]
if "Parent" in g.attributes:
parents = g.attributes["Parent"]
g.attributes["Parent"] = [switch.get(x, x) for x in parents]
g.update_attributes()
print(g) | def function[rename, parameter[args]]:
constant[
%prog rename in.gff3 switch.ids > reindexed.gff3
Change the IDs within the gff3.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[rename].__doc__]]
<ast.Tuple object at 0x7da1b086b8e0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b086b5b0>]]
<ast.Tuple object at 0x7da1b086bd00> assign[=] name[args]
variable[switch] assign[=] call[name[DictFile], parameter[name[switch]]]
variable[gff] assign[=] call[name[Gff], parameter[name[ingff3]]]
for taget[name[g]] in starred[name[gff]] begin[:]
<ast.Tuple object at 0x7da1b086be50> assign[=] call[name[g].attributes][constant[ID]]
variable[newname] assign[=] call[name[switch].get, parameter[name[id], name[id]]]
call[name[g].attributes][constant[ID]] assign[=] list[[<ast.Name object at 0x7da1b08eadd0>]]
if compare[constant[Parent] in name[g].attributes] begin[:]
variable[parents] assign[=] call[name[g].attributes][constant[Parent]]
call[name[g].attributes][constant[Parent]] assign[=] <ast.ListComp object at 0x7da1b08e87c0>
call[name[g].update_attributes, parameter[]]
call[name[print], parameter[name[g]]] | keyword[def] identifier[rename] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[rename] . identifier[__doc__] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[ingff3] , identifier[switch] = identifier[args]
identifier[switch] = identifier[DictFile] ( identifier[switch] )
identifier[gff] = identifier[Gff] ( identifier[ingff3] )
keyword[for] identifier[g] keyword[in] identifier[gff] :
identifier[id] ,= identifier[g] . identifier[attributes] [ literal[string] ]
identifier[newname] = identifier[switch] . identifier[get] ( identifier[id] , identifier[id] )
identifier[g] . identifier[attributes] [ literal[string] ]=[ identifier[newname] ]
keyword[if] literal[string] keyword[in] identifier[g] . identifier[attributes] :
identifier[parents] = identifier[g] . identifier[attributes] [ literal[string] ]
identifier[g] . identifier[attributes] [ literal[string] ]=[ identifier[switch] . identifier[get] ( identifier[x] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[parents] ]
identifier[g] . identifier[update_attributes] ()
identifier[print] ( identifier[g] ) | def rename(args):
"""
%prog rename in.gff3 switch.ids > reindexed.gff3
Change the IDs within the gff3.
"""
p = OptionParser(rename.__doc__)
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(ingff3, switch) = args
switch = DictFile(switch)
gff = Gff(ingff3)
for g in gff:
(id,) = g.attributes['ID']
newname = switch.get(id, id)
g.attributes['ID'] = [newname]
if 'Parent' in g.attributes:
parents = g.attributes['Parent']
g.attributes['Parent'] = [switch.get(x, x) for x in parents] # depends on [control=['if'], data=[]]
g.update_attributes()
print(g) # depends on [control=['for'], data=['g']] |
def update_pipe_channel(self, uid, channel_name, label): # pylint: disable=unused-argument
'''
Update this consumer to listen on channel_name for the js widget associated with uid
'''
pipe_group_name = _form_pipe_channel_name(channel_name)
if self.channel_layer:
current = self.channel_maps.get(uid, None)
if current != pipe_group_name:
if current:
async_to_sync(self.channel_layer.group_discard)(current, self.channel_name)
self.channel_maps[uid] = pipe_group_name
async_to_sync(self.channel_layer.group_add)(pipe_group_name, self.channel_name) | def function[update_pipe_channel, parameter[self, uid, channel_name, label]]:
constant[
Update this consumer to listen on channel_name for the js widget associated with uid
]
variable[pipe_group_name] assign[=] call[name[_form_pipe_channel_name], parameter[name[channel_name]]]
if name[self].channel_layer begin[:]
variable[current] assign[=] call[name[self].channel_maps.get, parameter[name[uid], constant[None]]]
if compare[name[current] not_equal[!=] name[pipe_group_name]] begin[:]
if name[current] begin[:]
call[call[name[async_to_sync], parameter[name[self].channel_layer.group_discard]], parameter[name[current], name[self].channel_name]]
call[name[self].channel_maps][name[uid]] assign[=] name[pipe_group_name]
call[call[name[async_to_sync], parameter[name[self].channel_layer.group_add]], parameter[name[pipe_group_name], name[self].channel_name]] | keyword[def] identifier[update_pipe_channel] ( identifier[self] , identifier[uid] , identifier[channel_name] , identifier[label] ):
literal[string]
identifier[pipe_group_name] = identifier[_form_pipe_channel_name] ( identifier[channel_name] )
keyword[if] identifier[self] . identifier[channel_layer] :
identifier[current] = identifier[self] . identifier[channel_maps] . identifier[get] ( identifier[uid] , keyword[None] )
keyword[if] identifier[current] != identifier[pipe_group_name] :
keyword[if] identifier[current] :
identifier[async_to_sync] ( identifier[self] . identifier[channel_layer] . identifier[group_discard] )( identifier[current] , identifier[self] . identifier[channel_name] )
identifier[self] . identifier[channel_maps] [ identifier[uid] ]= identifier[pipe_group_name]
identifier[async_to_sync] ( identifier[self] . identifier[channel_layer] . identifier[group_add] )( identifier[pipe_group_name] , identifier[self] . identifier[channel_name] ) | def update_pipe_channel(self, uid, channel_name, label): # pylint: disable=unused-argument
'\n Update this consumer to listen on channel_name for the js widget associated with uid\n '
pipe_group_name = _form_pipe_channel_name(channel_name)
if self.channel_layer:
current = self.channel_maps.get(uid, None)
if current != pipe_group_name:
if current:
async_to_sync(self.channel_layer.group_discard)(current, self.channel_name) # depends on [control=['if'], data=[]]
self.channel_maps[uid] = pipe_group_name
async_to_sync(self.channel_layer.group_add)(pipe_group_name, self.channel_name) # depends on [control=['if'], data=['current', 'pipe_group_name']] # depends on [control=['if'], data=[]] |
def seqannotation(self, seqrecord, allele, loc):
"""
Gets the Annotation from the found sequence
:return: The Annotation from the found sequence
:rtype: Annotation
"""
#seqrecord = self.seqrecord(allele, loc)
complete_annotation = get_features(seqrecord)
annotation = Annotation(annotation=complete_annotation,
method='match',
complete_annotation=True)
if self.alignments:
alignment = {f: self.annoated_alignments[loc][allele][f]['Seq']
for f in self.annoated_alignments[loc][allele].keys()}
annotation.aligned = alignment
return annotation | def function[seqannotation, parameter[self, seqrecord, allele, loc]]:
constant[
Gets the Annotation from the found sequence
:return: The Annotation from the found sequence
:rtype: Annotation
]
variable[complete_annotation] assign[=] call[name[get_features], parameter[name[seqrecord]]]
variable[annotation] assign[=] call[name[Annotation], parameter[]]
if name[self].alignments begin[:]
variable[alignment] assign[=] <ast.DictComp object at 0x7da18bccab30>
name[annotation].aligned assign[=] name[alignment]
return[name[annotation]] | keyword[def] identifier[seqannotation] ( identifier[self] , identifier[seqrecord] , identifier[allele] , identifier[loc] ):
literal[string]
identifier[complete_annotation] = identifier[get_features] ( identifier[seqrecord] )
identifier[annotation] = identifier[Annotation] ( identifier[annotation] = identifier[complete_annotation] ,
identifier[method] = literal[string] ,
identifier[complete_annotation] = keyword[True] )
keyword[if] identifier[self] . identifier[alignments] :
identifier[alignment] ={ identifier[f] : identifier[self] . identifier[annoated_alignments] [ identifier[loc] ][ identifier[allele] ][ identifier[f] ][ literal[string] ]
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[annoated_alignments] [ identifier[loc] ][ identifier[allele] ]. identifier[keys] ()}
identifier[annotation] . identifier[aligned] = identifier[alignment]
keyword[return] identifier[annotation] | def seqannotation(self, seqrecord, allele, loc):
"""
Gets the Annotation from the found sequence
:return: The Annotation from the found sequence
:rtype: Annotation
"""
#seqrecord = self.seqrecord(allele, loc)
complete_annotation = get_features(seqrecord)
annotation = Annotation(annotation=complete_annotation, method='match', complete_annotation=True)
if self.alignments:
alignment = {f: self.annoated_alignments[loc][allele][f]['Seq'] for f in self.annoated_alignments[loc][allele].keys()}
annotation.aligned = alignment # depends on [control=['if'], data=[]]
return annotation |
def _getitem_via_pathlist(external_dict,path_list,**kwargs):
'''
y = {'c': {'b': 200}}
_getitem_via_pathlist(y,['c','b'])
'''
if('s2n' in kwargs):
s2n = kwargs['s2n']
else:
s2n = 0
if('n2s' in kwargs):
n2s = kwargs['n2s']
else:
n2s = 0
this = external_dict
for i in range(0,path_list.__len__()):
key = path_list[i]
if(n2s ==1):
key = str(key)
if(s2n==1):
try:
int(key)
except:
pass
else:
key = int(key)
this = this.__getitem__(key)
return(this) | def function[_getitem_via_pathlist, parameter[external_dict, path_list]]:
constant[
y = {'c': {'b': 200}}
_getitem_via_pathlist(y,['c','b'])
]
if compare[constant[s2n] in name[kwargs]] begin[:]
variable[s2n] assign[=] call[name[kwargs]][constant[s2n]]
if compare[constant[n2s] in name[kwargs]] begin[:]
variable[n2s] assign[=] call[name[kwargs]][constant[n2s]]
variable[this] assign[=] name[external_dict]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[path_list].__len__, parameter[]]]]] begin[:]
variable[key] assign[=] call[name[path_list]][name[i]]
if compare[name[n2s] equal[==] constant[1]] begin[:]
variable[key] assign[=] call[name[str], parameter[name[key]]]
if compare[name[s2n] equal[==] constant[1]] begin[:]
<ast.Try object at 0x7da204963fa0>
variable[this] assign[=] call[name[this].__getitem__, parameter[name[key]]]
return[name[this]] | keyword[def] identifier[_getitem_via_pathlist] ( identifier[external_dict] , identifier[path_list] ,** identifier[kwargs] ):
literal[string]
keyword[if] ( literal[string] keyword[in] identifier[kwargs] ):
identifier[s2n] = identifier[kwargs] [ literal[string] ]
keyword[else] :
identifier[s2n] = literal[int]
keyword[if] ( literal[string] keyword[in] identifier[kwargs] ):
identifier[n2s] = identifier[kwargs] [ literal[string] ]
keyword[else] :
identifier[n2s] = literal[int]
identifier[this] = identifier[external_dict]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[path_list] . identifier[__len__] ()):
identifier[key] = identifier[path_list] [ identifier[i] ]
keyword[if] ( identifier[n2s] == literal[int] ):
identifier[key] = identifier[str] ( identifier[key] )
keyword[if] ( identifier[s2n] == literal[int] ):
keyword[try] :
identifier[int] ( identifier[key] )
keyword[except] :
keyword[pass]
keyword[else] :
identifier[key] = identifier[int] ( identifier[key] )
identifier[this] = identifier[this] . identifier[__getitem__] ( identifier[key] )
keyword[return] ( identifier[this] ) | def _getitem_via_pathlist(external_dict, path_list, **kwargs):
"""
y = {'c': {'b': 200}}
_getitem_via_pathlist(y,['c','b'])
"""
if 's2n' in kwargs:
s2n = kwargs['s2n'] # depends on [control=['if'], data=['kwargs']]
else:
s2n = 0
if 'n2s' in kwargs:
n2s = kwargs['n2s'] # depends on [control=['if'], data=['kwargs']]
else:
n2s = 0
this = external_dict
for i in range(0, path_list.__len__()):
key = path_list[i]
if n2s == 1:
key = str(key) # depends on [control=['if'], data=[]]
if s2n == 1:
try:
int(key) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
else:
key = int(key) # depends on [control=['if'], data=[]]
this = this.__getitem__(key) # depends on [control=['for'], data=['i']]
return this |
def _get(self, url,
param_dict={},
securityHandler=None,
additional_headers=[],
handlers=[],
proxy_url=None,
proxy_port=None,
compress=True,
custom_handlers=[],
out_folder=None,
file_name=None):
"""
Performs a GET operation
Inputs:
Output:
returns dictionary, string or None
"""
self._last_method = "GET"
CHUNK = 4056
param_dict, handler, cj = self._processHandler(securityHandler, param_dict)
headers = [] + additional_headers
if compress:
headers.append(('Accept-encoding', 'gzip'))
else:
headers.append(('Accept-encoding', ''))
headers.append(('User-Agent', self.useragent))
if len(param_dict.keys()) == 0:
param_dict = None
if handlers is None:
handlers = []
if handler is not None:
handlers.append(handler)
handlers.append(RedirectHandler())
if cj is not None:
handlers.append(request.HTTPCookieProcessor(cj))
if proxy_url is not None:
if proxy_port is None:
proxy_port = 80
proxies = {"http":"http://%s:%s" % (proxy_url, proxy_port),
"https":"https://%s:%s" % (proxy_url, proxy_port)}
proxy_support = request.ProxyHandler(proxies)
handlers.append(proxy_support)
opener = request.build_opener(*handlers)
opener.addheaders = headers
if param_dict is None:
resp = opener.open(url, data=param_dict)
elif len(str(urlencode(param_dict))) + len(url) >= 1999:
resp = opener.open(url, data=urlencode(param_dict))
else:
format_url = url + "?%s" % urlencode(param_dict)
resp = opener.open(fullurl=format_url)
self._last_code = resp.getcode()
self._last_url = resp.geturl()
# Get some headers from the response
maintype = self._mainType(resp)
contentDisposition = resp.headers.get('content-disposition')
contentEncoding = resp.headers.get('content-encoding')
contentType = resp.headers.get('content-Type').split(';')[0].lower()
contentLength = resp.headers.get('content-length')
if maintype.lower() in ('image',
'application/x-zip-compressed') or \
contentType == 'application/x-zip-compressed' or \
(contentDisposition is not None and \
contentDisposition.lower().find('attachment;') > -1):
fname = self._get_file_name(
contentDisposition=contentDisposition,
url=url)
if out_folder is None:
out_folder = tempfile.gettempdir()
if contentLength is not None:
max_length = int(contentLength)
if max_length < CHUNK:
CHUNK = max_length
file_name = os.path.join(out_folder, fname)
with open(file_name, 'wb') as writer:
for data in self._chunk(response=resp,
size=CHUNK):
writer.write(data)
writer.flush()
writer.flush()
del writer
return file_name
else:
read = ""
for data in self._chunk(response=resp,
size=CHUNK):
if self.PY3 == True:
read += data.decode('utf-8')
else:
read += data
del data
try:
results = json.loads(read)
if 'error' in results:
if 'message' in results['error']:
if results['error']['message'] == 'Request not made over ssl':
if url.startswith('http://'):
url = url.replace('http://', 'https://')
return self._get(url,
param_dict,
securityHandler,
additional_headers,
handlers,
proxy_url,
proxy_port,
compress,
custom_handlers,
out_folder,
file_name)
return results
except:
return read | def function[_get, parameter[self, url, param_dict, securityHandler, additional_headers, handlers, proxy_url, proxy_port, compress, custom_handlers, out_folder, file_name]]:
constant[
Performs a GET operation
Inputs:
Output:
returns dictionary, string or None
]
name[self]._last_method assign[=] constant[GET]
variable[CHUNK] assign[=] constant[4056]
<ast.Tuple object at 0x7da1b12556c0> assign[=] call[name[self]._processHandler, parameter[name[securityHandler], name[param_dict]]]
variable[headers] assign[=] binary_operation[list[[]] + name[additional_headers]]
if name[compress] begin[:]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da1b1255000>, <ast.Constant object at 0x7da1b1254a00>]]]]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da1b12554b0>, <ast.Attribute object at 0x7da1b1254610>]]]]
if compare[call[name[len], parameter[call[name[param_dict].keys, parameter[]]]] equal[==] constant[0]] begin[:]
variable[param_dict] assign[=] constant[None]
if compare[name[handlers] is constant[None]] begin[:]
variable[handlers] assign[=] list[[]]
if compare[name[handler] is_not constant[None]] begin[:]
call[name[handlers].append, parameter[name[handler]]]
call[name[handlers].append, parameter[call[name[RedirectHandler], parameter[]]]]
if compare[name[cj] is_not constant[None]] begin[:]
call[name[handlers].append, parameter[call[name[request].HTTPCookieProcessor, parameter[name[cj]]]]]
if compare[name[proxy_url] is_not constant[None]] begin[:]
if compare[name[proxy_port] is constant[None]] begin[:]
variable[proxy_port] assign[=] constant[80]
variable[proxies] assign[=] dictionary[[<ast.Constant object at 0x7da1b1254f70>, <ast.Constant object at 0x7da1b12560e0>], [<ast.BinOp object at 0x7da1b1254f40>, <ast.BinOp object at 0x7da1b1256a40>]]
variable[proxy_support] assign[=] call[name[request].ProxyHandler, parameter[name[proxies]]]
call[name[handlers].append, parameter[name[proxy_support]]]
variable[opener] assign[=] call[name[request].build_opener, parameter[<ast.Starred object at 0x7da1b124d150>]]
name[opener].addheaders assign[=] name[headers]
if compare[name[param_dict] is constant[None]] begin[:]
variable[resp] assign[=] call[name[opener].open, parameter[name[url]]]
name[self]._last_code assign[=] call[name[resp].getcode, parameter[]]
name[self]._last_url assign[=] call[name[resp].geturl, parameter[]]
variable[maintype] assign[=] call[name[self]._mainType, parameter[name[resp]]]
variable[contentDisposition] assign[=] call[name[resp].headers.get, parameter[constant[content-disposition]]]
variable[contentEncoding] assign[=] call[name[resp].headers.get, parameter[constant[content-encoding]]]
variable[contentType] assign[=] call[call[call[call[name[resp].headers.get, parameter[constant[content-Type]]].split, parameter[constant[;]]]][constant[0]].lower, parameter[]]
variable[contentLength] assign[=] call[name[resp].headers.get, parameter[constant[content-length]]]
if <ast.BoolOp object at 0x7da1b124e7a0> begin[:]
variable[fname] assign[=] call[name[self]._get_file_name, parameter[]]
if compare[name[out_folder] is constant[None]] begin[:]
variable[out_folder] assign[=] call[name[tempfile].gettempdir, parameter[]]
if compare[name[contentLength] is_not constant[None]] begin[:]
variable[max_length] assign[=] call[name[int], parameter[name[contentLength]]]
if compare[name[max_length] less[<] name[CHUNK]] begin[:]
variable[CHUNK] assign[=] name[max_length]
variable[file_name] assign[=] call[name[os].path.join, parameter[name[out_folder], name[fname]]]
with call[name[open], parameter[name[file_name], constant[wb]]] begin[:]
for taget[name[data]] in starred[call[name[self]._chunk, parameter[]]] begin[:]
call[name[writer].write, parameter[name[data]]]
call[name[writer].flush, parameter[]]
call[name[writer].flush, parameter[]]
<ast.Delete object at 0x7da1b124d180>
return[name[file_name]] | keyword[def] identifier[_get] ( identifier[self] , identifier[url] ,
identifier[param_dict] ={},
identifier[securityHandler] = keyword[None] ,
identifier[additional_headers] =[],
identifier[handlers] =[],
identifier[proxy_url] = keyword[None] ,
identifier[proxy_port] = keyword[None] ,
identifier[compress] = keyword[True] ,
identifier[custom_handlers] =[],
identifier[out_folder] = keyword[None] ,
identifier[file_name] = keyword[None] ):
literal[string]
identifier[self] . identifier[_last_method] = literal[string]
identifier[CHUNK] = literal[int]
identifier[param_dict] , identifier[handler] , identifier[cj] = identifier[self] . identifier[_processHandler] ( identifier[securityHandler] , identifier[param_dict] )
identifier[headers] =[]+ identifier[additional_headers]
keyword[if] identifier[compress] :
identifier[headers] . identifier[append] (( literal[string] , literal[string] ))
keyword[else] :
identifier[headers] . identifier[append] (( literal[string] , literal[string] ))
identifier[headers] . identifier[append] (( literal[string] , identifier[self] . identifier[useragent] ))
keyword[if] identifier[len] ( identifier[param_dict] . identifier[keys] ())== literal[int] :
identifier[param_dict] = keyword[None]
keyword[if] identifier[handlers] keyword[is] keyword[None] :
identifier[handlers] =[]
keyword[if] identifier[handler] keyword[is] keyword[not] keyword[None] :
identifier[handlers] . identifier[append] ( identifier[handler] )
identifier[handlers] . identifier[append] ( identifier[RedirectHandler] ())
keyword[if] identifier[cj] keyword[is] keyword[not] keyword[None] :
identifier[handlers] . identifier[append] ( identifier[request] . identifier[HTTPCookieProcessor] ( identifier[cj] ))
keyword[if] identifier[proxy_url] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[proxy_port] keyword[is] keyword[None] :
identifier[proxy_port] = literal[int]
identifier[proxies] ={ literal[string] : literal[string] %( identifier[proxy_url] , identifier[proxy_port] ),
literal[string] : literal[string] %( identifier[proxy_url] , identifier[proxy_port] )}
identifier[proxy_support] = identifier[request] . identifier[ProxyHandler] ( identifier[proxies] )
identifier[handlers] . identifier[append] ( identifier[proxy_support] )
identifier[opener] = identifier[request] . identifier[build_opener] (* identifier[handlers] )
identifier[opener] . identifier[addheaders] = identifier[headers]
keyword[if] identifier[param_dict] keyword[is] keyword[None] :
identifier[resp] = identifier[opener] . identifier[open] ( identifier[url] , identifier[data] = identifier[param_dict] )
keyword[elif] identifier[len] ( identifier[str] ( identifier[urlencode] ( identifier[param_dict] )))+ identifier[len] ( identifier[url] )>= literal[int] :
identifier[resp] = identifier[opener] . identifier[open] ( identifier[url] , identifier[data] = identifier[urlencode] ( identifier[param_dict] ))
keyword[else] :
identifier[format_url] = identifier[url] + literal[string] % identifier[urlencode] ( identifier[param_dict] )
identifier[resp] = identifier[opener] . identifier[open] ( identifier[fullurl] = identifier[format_url] )
identifier[self] . identifier[_last_code] = identifier[resp] . identifier[getcode] ()
identifier[self] . identifier[_last_url] = identifier[resp] . identifier[geturl] ()
identifier[maintype] = identifier[self] . identifier[_mainType] ( identifier[resp] )
identifier[contentDisposition] = identifier[resp] . identifier[headers] . identifier[get] ( literal[string] )
identifier[contentEncoding] = identifier[resp] . identifier[headers] . identifier[get] ( literal[string] )
identifier[contentType] = identifier[resp] . identifier[headers] . identifier[get] ( literal[string] ). identifier[split] ( literal[string] )[ literal[int] ]. identifier[lower] ()
identifier[contentLength] = identifier[resp] . identifier[headers] . identifier[get] ( literal[string] )
keyword[if] identifier[maintype] . identifier[lower] () keyword[in] ( literal[string] ,
literal[string] ) keyword[or] identifier[contentType] == literal[string] keyword[or] ( identifier[contentDisposition] keyword[is] keyword[not] keyword[None] keyword[and] identifier[contentDisposition] . identifier[lower] (). identifier[find] ( literal[string] )>- literal[int] ):
identifier[fname] = identifier[self] . identifier[_get_file_name] (
identifier[contentDisposition] = identifier[contentDisposition] ,
identifier[url] = identifier[url] )
keyword[if] identifier[out_folder] keyword[is] keyword[None] :
identifier[out_folder] = identifier[tempfile] . identifier[gettempdir] ()
keyword[if] identifier[contentLength] keyword[is] keyword[not] keyword[None] :
identifier[max_length] = identifier[int] ( identifier[contentLength] )
keyword[if] identifier[max_length] < identifier[CHUNK] :
identifier[CHUNK] = identifier[max_length]
identifier[file_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_folder] , identifier[fname] )
keyword[with] identifier[open] ( identifier[file_name] , literal[string] ) keyword[as] identifier[writer] :
keyword[for] identifier[data] keyword[in] identifier[self] . identifier[_chunk] ( identifier[response] = identifier[resp] ,
identifier[size] = identifier[CHUNK] ):
identifier[writer] . identifier[write] ( identifier[data] )
identifier[writer] . identifier[flush] ()
identifier[writer] . identifier[flush] ()
keyword[del] identifier[writer]
keyword[return] identifier[file_name]
keyword[else] :
identifier[read] = literal[string]
keyword[for] identifier[data] keyword[in] identifier[self] . identifier[_chunk] ( identifier[response] = identifier[resp] ,
identifier[size] = identifier[CHUNK] ):
keyword[if] identifier[self] . identifier[PY3] == keyword[True] :
identifier[read] += identifier[data] . identifier[decode] ( literal[string] )
keyword[else] :
identifier[read] += identifier[data]
keyword[del] identifier[data]
keyword[try] :
identifier[results] = identifier[json] . identifier[loads] ( identifier[read] )
keyword[if] literal[string] keyword[in] identifier[results] :
keyword[if] literal[string] keyword[in] identifier[results] [ literal[string] ]:
keyword[if] identifier[results] [ literal[string] ][ literal[string] ]== literal[string] :
keyword[if] identifier[url] . identifier[startswith] ( literal[string] ):
identifier[url] = identifier[url] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[self] . identifier[_get] ( identifier[url] ,
identifier[param_dict] ,
identifier[securityHandler] ,
identifier[additional_headers] ,
identifier[handlers] ,
identifier[proxy_url] ,
identifier[proxy_port] ,
identifier[compress] ,
identifier[custom_handlers] ,
identifier[out_folder] ,
identifier[file_name] )
keyword[return] identifier[results]
keyword[except] :
keyword[return] identifier[read] | def _get(self, url, param_dict={}, securityHandler=None, additional_headers=[], handlers=[], proxy_url=None, proxy_port=None, compress=True, custom_handlers=[], out_folder=None, file_name=None):
"""
Performs a GET operation
Inputs:
Output:
returns dictionary, string or None
"""
self._last_method = 'GET'
CHUNK = 4056
(param_dict, handler, cj) = self._processHandler(securityHandler, param_dict)
headers = [] + additional_headers
if compress:
headers.append(('Accept-encoding', 'gzip')) # depends on [control=['if'], data=[]]
else:
headers.append(('Accept-encoding', ''))
headers.append(('User-Agent', self.useragent))
if len(param_dict.keys()) == 0:
param_dict = None # depends on [control=['if'], data=[]]
if handlers is None:
handlers = [] # depends on [control=['if'], data=['handlers']]
if handler is not None:
handlers.append(handler) # depends on [control=['if'], data=['handler']]
handlers.append(RedirectHandler())
if cj is not None:
handlers.append(request.HTTPCookieProcessor(cj)) # depends on [control=['if'], data=['cj']]
if proxy_url is not None:
if proxy_port is None:
proxy_port = 80 # depends on [control=['if'], data=['proxy_port']]
proxies = {'http': 'http://%s:%s' % (proxy_url, proxy_port), 'https': 'https://%s:%s' % (proxy_url, proxy_port)}
proxy_support = request.ProxyHandler(proxies)
handlers.append(proxy_support) # depends on [control=['if'], data=['proxy_url']]
opener = request.build_opener(*handlers)
opener.addheaders = headers
if param_dict is None:
resp = opener.open(url, data=param_dict) # depends on [control=['if'], data=['param_dict']]
elif len(str(urlencode(param_dict))) + len(url) >= 1999:
resp = opener.open(url, data=urlencode(param_dict)) # depends on [control=['if'], data=[]]
else:
format_url = url + '?%s' % urlencode(param_dict)
resp = opener.open(fullurl=format_url)
self._last_code = resp.getcode()
self._last_url = resp.geturl()
# Get some headers from the response
maintype = self._mainType(resp)
contentDisposition = resp.headers.get('content-disposition')
contentEncoding = resp.headers.get('content-encoding')
contentType = resp.headers.get('content-Type').split(';')[0].lower()
contentLength = resp.headers.get('content-length')
if maintype.lower() in ('image', 'application/x-zip-compressed') or contentType == 'application/x-zip-compressed' or (contentDisposition is not None and contentDisposition.lower().find('attachment;') > -1):
fname = self._get_file_name(contentDisposition=contentDisposition, url=url)
if out_folder is None:
out_folder = tempfile.gettempdir() # depends on [control=['if'], data=['out_folder']]
if contentLength is not None:
max_length = int(contentLength)
if max_length < CHUNK:
CHUNK = max_length # depends on [control=['if'], data=['max_length', 'CHUNK']] # depends on [control=['if'], data=['contentLength']]
file_name = os.path.join(out_folder, fname)
with open(file_name, 'wb') as writer:
for data in self._chunk(response=resp, size=CHUNK):
writer.write(data)
writer.flush() # depends on [control=['for'], data=['data']]
writer.flush()
del writer # depends on [control=['with'], data=['writer']]
return file_name # depends on [control=['if'], data=[]]
else:
read = ''
for data in self._chunk(response=resp, size=CHUNK):
if self.PY3 == True:
read += data.decode('utf-8') # depends on [control=['if'], data=[]]
else:
read += data
del data # depends on [control=['for'], data=['data']]
try:
results = json.loads(read)
if 'error' in results:
if 'message' in results['error']:
if results['error']['message'] == 'Request not made over ssl':
if url.startswith('http://'):
url = url.replace('http://', 'https://')
return self._get(url, param_dict, securityHandler, additional_headers, handlers, proxy_url, proxy_port, compress, custom_handlers, out_folder, file_name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['results']]
return results # depends on [control=['try'], data=[]]
except:
return read # depends on [control=['except'], data=[]] |
def remove_handler(self, handler):
""" Removes a previously added event handler. """
while handler in self.handlers:
self.handlers.remove(handler) | def function[remove_handler, parameter[self, handler]]:
constant[ Removes a previously added event handler. ]
while compare[name[handler] in name[self].handlers] begin[:]
call[name[self].handlers.remove, parameter[name[handler]]] | keyword[def] identifier[remove_handler] ( identifier[self] , identifier[handler] ):
literal[string]
keyword[while] identifier[handler] keyword[in] identifier[self] . identifier[handlers] :
identifier[self] . identifier[handlers] . identifier[remove] ( identifier[handler] ) | def remove_handler(self, handler):
""" Removes a previously added event handler. """
while handler in self.handlers:
self.handlers.remove(handler) # depends on [control=['while'], data=['handler']] |
def plot(self, figsize=None, ax=None, **kwargs):
'''
geo.plot()
Returns plot of raster data
'''
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
ax.matshow(self.raster, **kwargs)
plt.draw()
return ax | def function[plot, parameter[self, figsize, ax]]:
constant[
geo.plot()
Returns plot of raster data
]
if compare[name[ax] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b2864550> assign[=] call[name[plt].subplots, parameter[]]
call[name[ax].set_aspect, parameter[constant[equal]]]
call[name[ax].matshow, parameter[name[self].raster]]
call[name[plt].draw, parameter[]]
return[name[ax]] | keyword[def] identifier[plot] ( identifier[self] , identifier[figsize] = keyword[None] , identifier[ax] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[ax] keyword[is] keyword[None] :
identifier[fig] , identifier[ax] = identifier[plt] . identifier[subplots] ( identifier[figsize] = identifier[figsize] )
identifier[ax] . identifier[set_aspect] ( literal[string] )
identifier[ax] . identifier[matshow] ( identifier[self] . identifier[raster] ,** identifier[kwargs] )
identifier[plt] . identifier[draw] ()
keyword[return] identifier[ax] | def plot(self, figsize=None, ax=None, **kwargs):
"""
geo.plot()
Returns plot of raster data
"""
if ax is None:
(fig, ax) = plt.subplots(figsize=figsize) # depends on [control=['if'], data=['ax']]
ax.set_aspect('equal')
ax.matshow(self.raster, **kwargs)
plt.draw()
return ax |
def draw_sparse_matrix(
array_filename,
output_image,
vmax=DEFAULT_SATURATION_THRESHOLD,
max_size_matrix=DEFAULT_MAX_SIZE_MATRIX,
):
"""Draw a quick preview of a sparse matrix with automated
binning and normalization.
"""
matrix = np.loadtxt(array_filename, dtype=np.int32, skiprows=1)
try:
row, col, data = matrix.T
except ValueError:
row, col, data = matrix
size = max(np.amax(row), np.amax(col)) + 1
S = sparse.coo_matrix((data, (row, col)), shape=(size, size))
if max_size_matrix <= 0:
binning = 1
else:
binning = (size // max_size_matrix) + 1
binned_S = hcs.bin_sparse(S, subsampling_factor=binning)
dense_S = binned_S.todense()
dense_S = dense_S + dense_S.T - np.diag(np.diag(dense_S))
normed_S = hcs.normalize_dense(dense_S)
spaceless_pdf_plot_maker(normed_S, output_image, vmax=vmax) | def function[draw_sparse_matrix, parameter[array_filename, output_image, vmax, max_size_matrix]]:
constant[Draw a quick preview of a sparse matrix with automated
binning and normalization.
]
variable[matrix] assign[=] call[name[np].loadtxt, parameter[name[array_filename]]]
<ast.Try object at 0x7da1b235eda0>
variable[size] assign[=] binary_operation[call[name[max], parameter[call[name[np].amax, parameter[name[row]]], call[name[np].amax, parameter[name[col]]]]] + constant[1]]
variable[S] assign[=] call[name[sparse].coo_matrix, parameter[tuple[[<ast.Name object at 0x7da1b23726e0>, <ast.Tuple object at 0x7da1b2371d50>]]]]
if compare[name[max_size_matrix] less_or_equal[<=] constant[0]] begin[:]
variable[binning] assign[=] constant[1]
variable[binned_S] assign[=] call[name[hcs].bin_sparse, parameter[name[S]]]
variable[dense_S] assign[=] call[name[binned_S].todense, parameter[]]
variable[dense_S] assign[=] binary_operation[binary_operation[name[dense_S] + name[dense_S].T] - call[name[np].diag, parameter[call[name[np].diag, parameter[name[dense_S]]]]]]
variable[normed_S] assign[=] call[name[hcs].normalize_dense, parameter[name[dense_S]]]
call[name[spaceless_pdf_plot_maker], parameter[name[normed_S], name[output_image]]] | keyword[def] identifier[draw_sparse_matrix] (
identifier[array_filename] ,
identifier[output_image] ,
identifier[vmax] = identifier[DEFAULT_SATURATION_THRESHOLD] ,
identifier[max_size_matrix] = identifier[DEFAULT_MAX_SIZE_MATRIX] ,
):
literal[string]
identifier[matrix] = identifier[np] . identifier[loadtxt] ( identifier[array_filename] , identifier[dtype] = identifier[np] . identifier[int32] , identifier[skiprows] = literal[int] )
keyword[try] :
identifier[row] , identifier[col] , identifier[data] = identifier[matrix] . identifier[T]
keyword[except] identifier[ValueError] :
identifier[row] , identifier[col] , identifier[data] = identifier[matrix]
identifier[size] = identifier[max] ( identifier[np] . identifier[amax] ( identifier[row] ), identifier[np] . identifier[amax] ( identifier[col] ))+ literal[int]
identifier[S] = identifier[sparse] . identifier[coo_matrix] (( identifier[data] ,( identifier[row] , identifier[col] )), identifier[shape] =( identifier[size] , identifier[size] ))
keyword[if] identifier[max_size_matrix] <= literal[int] :
identifier[binning] = literal[int]
keyword[else] :
identifier[binning] =( identifier[size] // identifier[max_size_matrix] )+ literal[int]
identifier[binned_S] = identifier[hcs] . identifier[bin_sparse] ( identifier[S] , identifier[subsampling_factor] = identifier[binning] )
identifier[dense_S] = identifier[binned_S] . identifier[todense] ()
identifier[dense_S] = identifier[dense_S] + identifier[dense_S] . identifier[T] - identifier[np] . identifier[diag] ( identifier[np] . identifier[diag] ( identifier[dense_S] ))
identifier[normed_S] = identifier[hcs] . identifier[normalize_dense] ( identifier[dense_S] )
identifier[spaceless_pdf_plot_maker] ( identifier[normed_S] , identifier[output_image] , identifier[vmax] = identifier[vmax] ) | def draw_sparse_matrix(array_filename, output_image, vmax=DEFAULT_SATURATION_THRESHOLD, max_size_matrix=DEFAULT_MAX_SIZE_MATRIX):
"""Draw a quick preview of a sparse matrix with automated
binning and normalization.
"""
matrix = np.loadtxt(array_filename, dtype=np.int32, skiprows=1)
try:
(row, col, data) = matrix.T # depends on [control=['try'], data=[]]
except ValueError:
(row, col, data) = matrix # depends on [control=['except'], data=[]]
size = max(np.amax(row), np.amax(col)) + 1
S = sparse.coo_matrix((data, (row, col)), shape=(size, size))
if max_size_matrix <= 0:
binning = 1 # depends on [control=['if'], data=[]]
else:
binning = size // max_size_matrix + 1
binned_S = hcs.bin_sparse(S, subsampling_factor=binning)
dense_S = binned_S.todense()
dense_S = dense_S + dense_S.T - np.diag(np.diag(dense_S))
normed_S = hcs.normalize_dense(dense_S)
spaceless_pdf_plot_maker(normed_S, output_image, vmax=vmax) |
def get_all_vpn_gateways(self, vpn_gateway_ids=None, filters=None):
"""
Retrieve information about your VpnGateways. You can filter results to
return information only about those VpnGateways that match your search
parameters. Otherwise, all VpnGateways associated with your account
are returned.
:type vpn_gateway_ids: list
:param vpn_gateway_ids: A list of strings with the desired VpnGateway ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, the state of the VpnGateway
(pending,available,deleting,deleted)
- *type*, the type of customer gateway (ipsec.1)
- *availabilityZone*, the Availability zone the
VPN gateway is in.
:rtype: list
:return: A list of :class:`boto.vpc.customergateway.VpnGateway`
"""
params = {}
if vpn_gateway_ids:
self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId')
if filters:
i = 1
for filter in filters:
params[('Filter.%d.Name' % i)] = filter[0]
params[('Filter.%d.Value.1')] = filter[1]
i += 1
return self.get_list('DescribeVpnGateways', params, [('item', VpnGateway)]) | def function[get_all_vpn_gateways, parameter[self, vpn_gateway_ids, filters]]:
constant[
Retrieve information about your VpnGateways. You can filter results to
return information only about those VpnGateways that match your search
parameters. Otherwise, all VpnGateways associated with your account
are returned.
:type vpn_gateway_ids: list
:param vpn_gateway_ids: A list of strings with the desired VpnGateway ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, the state of the VpnGateway
(pending,available,deleting,deleted)
- *type*, the type of customer gateway (ipsec.1)
- *availabilityZone*, the Availability zone the
VPN gateway is in.
:rtype: list
:return: A list of :class:`boto.vpc.customergateway.VpnGateway`
]
variable[params] assign[=] dictionary[[], []]
if name[vpn_gateway_ids] begin[:]
call[name[self].build_list_params, parameter[name[params], name[vpn_gateway_ids], constant[VpnGatewayId]]]
if name[filters] begin[:]
variable[i] assign[=] constant[1]
for taget[name[filter]] in starred[name[filters]] begin[:]
call[name[params]][binary_operation[constant[Filter.%d.Name] <ast.Mod object at 0x7da2590d6920> name[i]]] assign[=] call[name[filter]][constant[0]]
call[name[params]][constant[Filter.%d.Value.1]] assign[=] call[name[filter]][constant[1]]
<ast.AugAssign object at 0x7da1b26ee8f0>
return[call[name[self].get_list, parameter[constant[DescribeVpnGateways], name[params], list[[<ast.Tuple object at 0x7da1b26edfc0>]]]]] | keyword[def] identifier[get_all_vpn_gateways] ( identifier[self] , identifier[vpn_gateway_ids] = keyword[None] , identifier[filters] = keyword[None] ):
literal[string]
identifier[params] ={}
keyword[if] identifier[vpn_gateway_ids] :
identifier[self] . identifier[build_list_params] ( identifier[params] , identifier[vpn_gateway_ids] , literal[string] )
keyword[if] identifier[filters] :
identifier[i] = literal[int]
keyword[for] identifier[filter] keyword[in] identifier[filters] :
identifier[params] [( literal[string] % identifier[i] )]= identifier[filter] [ literal[int] ]
identifier[params] [( literal[string] )]= identifier[filter] [ literal[int] ]
identifier[i] += literal[int]
keyword[return] identifier[self] . identifier[get_list] ( literal[string] , identifier[params] ,[( literal[string] , identifier[VpnGateway] )]) | def get_all_vpn_gateways(self, vpn_gateway_ids=None, filters=None):
"""
Retrieve information about your VpnGateways. You can filter results to
return information only about those VpnGateways that match your search
parameters. Otherwise, all VpnGateways associated with your account
are returned.
:type vpn_gateway_ids: list
:param vpn_gateway_ids: A list of strings with the desired VpnGateway ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, the state of the VpnGateway
(pending,available,deleting,deleted)
- *type*, the type of customer gateway (ipsec.1)
- *availabilityZone*, the Availability zone the
VPN gateway is in.
:rtype: list
:return: A list of :class:`boto.vpc.customergateway.VpnGateway`
"""
params = {}
if vpn_gateway_ids:
self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId') # depends on [control=['if'], data=[]]
if filters:
i = 1
for filter in filters:
params['Filter.%d.Name' % i] = filter[0]
params['Filter.%d.Value.1'] = filter[1]
i += 1 # depends on [control=['for'], data=['filter']] # depends on [control=['if'], data=[]]
return self.get_list('DescribeVpnGateways', params, [('item', VpnGateway)]) |
def verify_merkle_path(merkle_root_hex, serialized_path, leaf_hash_hex, hash_function=bin_double_sha256):
"""
Verify a merkle path. The given path is the path from two leaf nodes to the root itself.
merkle_root_hex is a little-endian, hex-encoded hash.
serialized_path is the serialized merkle path
path_hex is a list of little-endian, hex-encoded hashes.
Return True if the path is consistent with the merkle root.
Return False if not.
"""
merkle_root = hex_to_bin_reversed(merkle_root_hex)
leaf_hash = hex_to_bin_reversed(leaf_hash_hex)
path = MerkleTree.path_deserialize(serialized_path)
path = [{'order': p['order'], 'hash': hex_to_bin_reversed(p['hash'])} for p in path]
if len(path) == 0:
raise ValueError("Empty path")
cur_hash = leaf_hash
for i in range(0, len(path)):
if path[i]['order'] == 'l':
# left sibling
cur_hash = hash_function(path[i]['hash'] + cur_hash)
elif path[i]['order'] == 'r':
# right sibling
cur_hash = hash_function(cur_hash + path[i]['hash'])
elif path[i]['order'] == 'm':
# merkle root
assert len(path) == 1
return cur_hash == path[i]['hash']
return cur_hash == merkle_root | def function[verify_merkle_path, parameter[merkle_root_hex, serialized_path, leaf_hash_hex, hash_function]]:
constant[
Verify a merkle path. The given path is the path from two leaf nodes to the root itself.
merkle_root_hex is a little-endian, hex-encoded hash.
serialized_path is the serialized merkle path
path_hex is a list of little-endian, hex-encoded hashes.
Return True if the path is consistent with the merkle root.
Return False if not.
]
variable[merkle_root] assign[=] call[name[hex_to_bin_reversed], parameter[name[merkle_root_hex]]]
variable[leaf_hash] assign[=] call[name[hex_to_bin_reversed], parameter[name[leaf_hash_hex]]]
variable[path] assign[=] call[name[MerkleTree].path_deserialize, parameter[name[serialized_path]]]
variable[path] assign[=] <ast.ListComp object at 0x7da18ede4dc0>
if compare[call[name[len], parameter[name[path]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18ede6260>
variable[cur_hash] assign[=] name[leaf_hash]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[path]]]]]] begin[:]
if compare[call[call[name[path]][name[i]]][constant[order]] equal[==] constant[l]] begin[:]
variable[cur_hash] assign[=] call[name[hash_function], parameter[binary_operation[call[call[name[path]][name[i]]][constant[hash]] + name[cur_hash]]]]
return[compare[name[cur_hash] equal[==] name[merkle_root]]] | keyword[def] identifier[verify_merkle_path] ( identifier[merkle_root_hex] , identifier[serialized_path] , identifier[leaf_hash_hex] , identifier[hash_function] = identifier[bin_double_sha256] ):
literal[string]
identifier[merkle_root] = identifier[hex_to_bin_reversed] ( identifier[merkle_root_hex] )
identifier[leaf_hash] = identifier[hex_to_bin_reversed] ( identifier[leaf_hash_hex] )
identifier[path] = identifier[MerkleTree] . identifier[path_deserialize] ( identifier[serialized_path] )
identifier[path] =[{ literal[string] : identifier[p] [ literal[string] ], literal[string] : identifier[hex_to_bin_reversed] ( identifier[p] [ literal[string] ])} keyword[for] identifier[p] keyword[in] identifier[path] ]
keyword[if] identifier[len] ( identifier[path] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[cur_hash] = identifier[leaf_hash]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[path] )):
keyword[if] identifier[path] [ identifier[i] ][ literal[string] ]== literal[string] :
identifier[cur_hash] = identifier[hash_function] ( identifier[path] [ identifier[i] ][ literal[string] ]+ identifier[cur_hash] )
keyword[elif] identifier[path] [ identifier[i] ][ literal[string] ]== literal[string] :
identifier[cur_hash] = identifier[hash_function] ( identifier[cur_hash] + identifier[path] [ identifier[i] ][ literal[string] ])
keyword[elif] identifier[path] [ identifier[i] ][ literal[string] ]== literal[string] :
keyword[assert] identifier[len] ( identifier[path] )== literal[int]
keyword[return] identifier[cur_hash] == identifier[path] [ identifier[i] ][ literal[string] ]
keyword[return] identifier[cur_hash] == identifier[merkle_root] | def verify_merkle_path(merkle_root_hex, serialized_path, leaf_hash_hex, hash_function=bin_double_sha256):
"""
Verify a merkle path. The given path is the path from two leaf nodes to the root itself.
merkle_root_hex is a little-endian, hex-encoded hash.
serialized_path is the serialized merkle path
path_hex is a list of little-endian, hex-encoded hashes.
Return True if the path is consistent with the merkle root.
Return False if not.
"""
merkle_root = hex_to_bin_reversed(merkle_root_hex)
leaf_hash = hex_to_bin_reversed(leaf_hash_hex)
path = MerkleTree.path_deserialize(serialized_path)
path = [{'order': p['order'], 'hash': hex_to_bin_reversed(p['hash'])} for p in path]
if len(path) == 0:
raise ValueError('Empty path') # depends on [control=['if'], data=[]]
cur_hash = leaf_hash
for i in range(0, len(path)):
if path[i]['order'] == 'l':
# left sibling
cur_hash = hash_function(path[i]['hash'] + cur_hash) # depends on [control=['if'], data=[]]
elif path[i]['order'] == 'r':
# right sibling
cur_hash = hash_function(cur_hash + path[i]['hash']) # depends on [control=['if'], data=[]]
elif path[i]['order'] == 'm':
# merkle root
assert len(path) == 1
return cur_hash == path[i]['hash'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return cur_hash == merkle_root |
def get_package_counts(package_descriptors, targets, repos_data):
"""
Get the number of packages per target and repository.
:return: a dict indexed by targets containing
a list of integer values (one for each repo)
"""
counts = {}
for target in targets:
counts[target] = [0] * len(repos_data)
for package_descriptor in package_descriptors.values():
debian_pkg_name = package_descriptor.debian_pkg_name
for target in targets:
for i, repo_data in enumerate(repos_data):
version = repo_data.get(target, {}).get(debian_pkg_name, None)
if version:
counts[target][i] += 1
return counts | def function[get_package_counts, parameter[package_descriptors, targets, repos_data]]:
constant[
Get the number of packages per target and repository.
:return: a dict indexed by targets containing
a list of integer values (one for each repo)
]
variable[counts] assign[=] dictionary[[], []]
for taget[name[target]] in starred[name[targets]] begin[:]
call[name[counts]][name[target]] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b00ea4d0>]] * call[name[len], parameter[name[repos_data]]]]
for taget[name[package_descriptor]] in starred[call[name[package_descriptors].values, parameter[]]] begin[:]
variable[debian_pkg_name] assign[=] name[package_descriptor].debian_pkg_name
for taget[name[target]] in starred[name[targets]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b00ea860>, <ast.Name object at 0x7da1b00ea830>]]] in starred[call[name[enumerate], parameter[name[repos_data]]]] begin[:]
variable[version] assign[=] call[call[name[repo_data].get, parameter[name[target], dictionary[[], []]]].get, parameter[name[debian_pkg_name], constant[None]]]
if name[version] begin[:]
<ast.AugAssign object at 0x7da1b00de1d0>
return[name[counts]] | keyword[def] identifier[get_package_counts] ( identifier[package_descriptors] , identifier[targets] , identifier[repos_data] ):
literal[string]
identifier[counts] ={}
keyword[for] identifier[target] keyword[in] identifier[targets] :
identifier[counts] [ identifier[target] ]=[ literal[int] ]* identifier[len] ( identifier[repos_data] )
keyword[for] identifier[package_descriptor] keyword[in] identifier[package_descriptors] . identifier[values] ():
identifier[debian_pkg_name] = identifier[package_descriptor] . identifier[debian_pkg_name]
keyword[for] identifier[target] keyword[in] identifier[targets] :
keyword[for] identifier[i] , identifier[repo_data] keyword[in] identifier[enumerate] ( identifier[repos_data] ):
identifier[version] = identifier[repo_data] . identifier[get] ( identifier[target] ,{}). identifier[get] ( identifier[debian_pkg_name] , keyword[None] )
keyword[if] identifier[version] :
identifier[counts] [ identifier[target] ][ identifier[i] ]+= literal[int]
keyword[return] identifier[counts] | def get_package_counts(package_descriptors, targets, repos_data):
"""
Get the number of packages per target and repository.
:return: a dict indexed by targets containing
a list of integer values (one for each repo)
"""
counts = {}
for target in targets:
counts[target] = [0] * len(repos_data) # depends on [control=['for'], data=['target']]
for package_descriptor in package_descriptors.values():
debian_pkg_name = package_descriptor.debian_pkg_name
for target in targets:
for (i, repo_data) in enumerate(repos_data):
version = repo_data.get(target, {}).get(debian_pkg_name, None)
if version:
counts[target][i] += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['target']] # depends on [control=['for'], data=['package_descriptor']]
return counts |
def parse_on_start(self, node):
"""
Parses <OnStart>
@param node: Node containing the <OnStart> element
@type node: xml.etree.Element
"""
event_handler = OnStart()
self.current_regime.add_event_handler(event_handler)
self.current_event_handler = event_handler
self.process_nested_tags(node)
self.current_event_handler = None | def function[parse_on_start, parameter[self, node]]:
constant[
Parses <OnStart>
@param node: Node containing the <OnStart> element
@type node: xml.etree.Element
]
variable[event_handler] assign[=] call[name[OnStart], parameter[]]
call[name[self].current_regime.add_event_handler, parameter[name[event_handler]]]
name[self].current_event_handler assign[=] name[event_handler]
call[name[self].process_nested_tags, parameter[name[node]]]
name[self].current_event_handler assign[=] constant[None] | keyword[def] identifier[parse_on_start] ( identifier[self] , identifier[node] ):
literal[string]
identifier[event_handler] = identifier[OnStart] ()
identifier[self] . identifier[current_regime] . identifier[add_event_handler] ( identifier[event_handler] )
identifier[self] . identifier[current_event_handler] = identifier[event_handler]
identifier[self] . identifier[process_nested_tags] ( identifier[node] )
identifier[self] . identifier[current_event_handler] = keyword[None] | def parse_on_start(self, node):
"""
Parses <OnStart>
@param node: Node containing the <OnStart> element
@type node: xml.etree.Element
"""
event_handler = OnStart()
self.current_regime.add_event_handler(event_handler)
self.current_event_handler = event_handler
self.process_nested_tags(node)
self.current_event_handler = None |
def launch_R_script(template, arguments, output_function=None,
verbose=True, debug=False):
"""Launch an R script, starting from a template and replacing text in file
before execution.
Args:
template (str): path to the template of the R script
arguments (dict): Arguments that modify the template's placeholders
with arguments
output_function (function): Function to execute **after** the execution
of the R script, and its output is returned by this function. Used
traditionally as a function to retrieve the results of the
execution.
verbose (bool): Sets the verbosity of the R subprocess.
debug (bool): If True, the generated scripts are not deleted.
Return:
Returns the output of the ``output_function`` if not `None`
else `True` or `False` depending on whether the execution was
successful.
"""
id = str(uuid.uuid4())
os.makedirs('/tmp/cdt_R_script_' + id + '/')
try:
scriptpath = '/tmp/cdt_R_script_' + id + '/instance_{}'.format(os.path.basename(template))
copy(template, scriptpath)
with fileinput.FileInput(scriptpath, inplace=True) as file:
for line in file:
mline = line
for elt in arguments:
mline = mline.replace(elt, arguments[elt])
print(mline, end='')
if output_function is None:
output = subprocess.call("Rscript --vanilla {}".format(scriptpath), shell=True,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
if verbose:
process = subprocess.Popen("Rscript --vanilla {}".format(scriptpath), shell=True)
else:
process = subprocess.Popen("Rscript --vanilla {}".format(scriptpath), shell=True,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
process.wait()
output = output_function()
# Cleaning up
except Exception as e:
if not debug:
rmtree('/tmp/cdt_R_script_' + id + '/')
raise e
except KeyboardInterrupt:
if not debug:
rmtree('/tmp/cdt_R_script_' + id + '/')
raise KeyboardInterrupt
if not debug:
rmtree('/tmp/cdt_R_script_' + id + '/')
return output | def function[launch_R_script, parameter[template, arguments, output_function, verbose, debug]]:
constant[Launch an R script, starting from a template and replacing text in file
before execution.
Args:
template (str): path to the template of the R script
arguments (dict): Arguments that modify the template's placeholders
with arguments
output_function (function): Function to execute **after** the execution
of the R script, and its output is returned by this function. Used
traditionally as a function to retrieve the results of the
execution.
verbose (bool): Sets the verbosity of the R subprocess.
debug (bool): If True, the generated scripts are not deleted.
Return:
Returns the output of the ``output_function`` if not `None`
else `True` or `False` depending on whether the execution was
successful.
]
variable[id] assign[=] call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]]
call[name[os].makedirs, parameter[binary_operation[binary_operation[constant[/tmp/cdt_R_script_] + name[id]] + constant[/]]]]
<ast.Try object at 0x7da2046220e0>
if <ast.UnaryOp object at 0x7da1b020c400> begin[:]
call[name[rmtree], parameter[binary_operation[binary_operation[constant[/tmp/cdt_R_script_] + name[id]] + constant[/]]]]
return[name[output]] | keyword[def] identifier[launch_R_script] ( identifier[template] , identifier[arguments] , identifier[output_function] = keyword[None] ,
identifier[verbose] = keyword[True] , identifier[debug] = keyword[False] ):
literal[string]
identifier[id] = identifier[str] ( identifier[uuid] . identifier[uuid4] ())
identifier[os] . identifier[makedirs] ( literal[string] + identifier[id] + literal[string] )
keyword[try] :
identifier[scriptpath] = literal[string] + identifier[id] + literal[string] . identifier[format] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[template] ))
identifier[copy] ( identifier[template] , identifier[scriptpath] )
keyword[with] identifier[fileinput] . identifier[FileInput] ( identifier[scriptpath] , identifier[inplace] = keyword[True] ) keyword[as] identifier[file] :
keyword[for] identifier[line] keyword[in] identifier[file] :
identifier[mline] = identifier[line]
keyword[for] identifier[elt] keyword[in] identifier[arguments] :
identifier[mline] = identifier[mline] . identifier[replace] ( identifier[elt] , identifier[arguments] [ identifier[elt] ])
identifier[print] ( identifier[mline] , identifier[end] = literal[string] )
keyword[if] identifier[output_function] keyword[is] keyword[None] :
identifier[output] = identifier[subprocess] . identifier[call] ( literal[string] . identifier[format] ( identifier[scriptpath] ), identifier[shell] = keyword[True] ,
identifier[stdout] = identifier[subprocess] . identifier[DEVNULL] , identifier[stderr] = identifier[subprocess] . identifier[DEVNULL] )
keyword[else] :
keyword[if] identifier[verbose] :
identifier[process] = identifier[subprocess] . identifier[Popen] ( literal[string] . identifier[format] ( identifier[scriptpath] ), identifier[shell] = keyword[True] )
keyword[else] :
identifier[process] = identifier[subprocess] . identifier[Popen] ( literal[string] . identifier[format] ( identifier[scriptpath] ), identifier[shell] = keyword[True] ,
identifier[stdout] = identifier[subprocess] . identifier[DEVNULL] , identifier[stderr] = identifier[subprocess] . identifier[DEVNULL] )
identifier[process] . identifier[wait] ()
identifier[output] = identifier[output_function] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[if] keyword[not] identifier[debug] :
identifier[rmtree] ( literal[string] + identifier[id] + literal[string] )
keyword[raise] identifier[e]
keyword[except] identifier[KeyboardInterrupt] :
keyword[if] keyword[not] identifier[debug] :
identifier[rmtree] ( literal[string] + identifier[id] + literal[string] )
keyword[raise] identifier[KeyboardInterrupt]
keyword[if] keyword[not] identifier[debug] :
identifier[rmtree] ( literal[string] + identifier[id] + literal[string] )
keyword[return] identifier[output] | def launch_R_script(template, arguments, output_function=None, verbose=True, debug=False):
"""Launch an R script, starting from a template and replacing text in file
before execution.
Args:
template (str): path to the template of the R script
arguments (dict): Arguments that modify the template's placeholders
with arguments
output_function (function): Function to execute **after** the execution
of the R script, and its output is returned by this function. Used
traditionally as a function to retrieve the results of the
execution.
verbose (bool): Sets the verbosity of the R subprocess.
debug (bool): If True, the generated scripts are not deleted.
Return:
Returns the output of the ``output_function`` if not `None`
else `True` or `False` depending on whether the execution was
successful.
"""
id = str(uuid.uuid4())
os.makedirs('/tmp/cdt_R_script_' + id + '/')
try:
scriptpath = '/tmp/cdt_R_script_' + id + '/instance_{}'.format(os.path.basename(template))
copy(template, scriptpath)
with fileinput.FileInput(scriptpath, inplace=True) as file:
for line in file:
mline = line
for elt in arguments:
mline = mline.replace(elt, arguments[elt]) # depends on [control=['for'], data=['elt']]
print(mline, end='') # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['file']]
if output_function is None:
output = subprocess.call('Rscript --vanilla {}'.format(scriptpath), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # depends on [control=['if'], data=[]]
else:
if verbose:
process = subprocess.Popen('Rscript --vanilla {}'.format(scriptpath), shell=True) # depends on [control=['if'], data=[]]
else:
process = subprocess.Popen('Rscript --vanilla {}'.format(scriptpath), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
process.wait()
output = output_function() # depends on [control=['try'], data=[]]
# Cleaning up
except Exception as e:
if not debug:
rmtree('/tmp/cdt_R_script_' + id + '/') # depends on [control=['if'], data=[]]
raise e # depends on [control=['except'], data=['e']]
except KeyboardInterrupt:
if not debug:
rmtree('/tmp/cdt_R_script_' + id + '/') # depends on [control=['if'], data=[]]
raise KeyboardInterrupt # depends on [control=['except'], data=[]]
if not debug:
rmtree('/tmp/cdt_R_script_' + id + '/') # depends on [control=['if'], data=[]]
return output |
def _parse_qualimap_rnaseq(table):
"""
Retrieve metrics of interest from globals table.
"""
out = {}
for row in table.find_all("tr"):
col, val = [x.text for x in row.find_all("td")]
col = col.replace(":", "").strip()
val = val.replace(",", "")
m = {col: val}
if val.find("/") > -1:
m = _parse_num_pct(col, val.replace("%", ""))
out.update(m)
return out | def function[_parse_qualimap_rnaseq, parameter[table]]:
constant[
Retrieve metrics of interest from globals table.
]
variable[out] assign[=] dictionary[[], []]
for taget[name[row]] in starred[call[name[table].find_all, parameter[constant[tr]]]] begin[:]
<ast.Tuple object at 0x7da1b19855a0> assign[=] <ast.ListComp object at 0x7da1b1987700>
variable[col] assign[=] call[call[name[col].replace, parameter[constant[:], constant[]]].strip, parameter[]]
variable[val] assign[=] call[name[val].replace, parameter[constant[,], constant[]]]
variable[m] assign[=] dictionary[[<ast.Name object at 0x7da1b1985fc0>], [<ast.Name object at 0x7da1b1984070>]]
if compare[call[name[val].find, parameter[constant[/]]] greater[>] <ast.UnaryOp object at 0x7da1b1986dd0>] begin[:]
variable[m] assign[=] call[name[_parse_num_pct], parameter[name[col], call[name[val].replace, parameter[constant[%], constant[]]]]]
call[name[out].update, parameter[name[m]]]
return[name[out]] | keyword[def] identifier[_parse_qualimap_rnaseq] ( identifier[table] ):
literal[string]
identifier[out] ={}
keyword[for] identifier[row] keyword[in] identifier[table] . identifier[find_all] ( literal[string] ):
identifier[col] , identifier[val] =[ identifier[x] . identifier[text] keyword[for] identifier[x] keyword[in] identifier[row] . identifier[find_all] ( literal[string] )]
identifier[col] = identifier[col] . identifier[replace] ( literal[string] , literal[string] ). identifier[strip] ()
identifier[val] = identifier[val] . identifier[replace] ( literal[string] , literal[string] )
identifier[m] ={ identifier[col] : identifier[val] }
keyword[if] identifier[val] . identifier[find] ( literal[string] )>- literal[int] :
identifier[m] = identifier[_parse_num_pct] ( identifier[col] , identifier[val] . identifier[replace] ( literal[string] , literal[string] ))
identifier[out] . identifier[update] ( identifier[m] )
keyword[return] identifier[out] | def _parse_qualimap_rnaseq(table):
"""
Retrieve metrics of interest from globals table.
"""
out = {}
for row in table.find_all('tr'):
(col, val) = [x.text for x in row.find_all('td')]
col = col.replace(':', '').strip()
val = val.replace(',', '')
m = {col: val}
if val.find('/') > -1:
m = _parse_num_pct(col, val.replace('%', '')) # depends on [control=['if'], data=[]]
out.update(m) # depends on [control=['for'], data=['row']]
return out |
def compand(self, attack=0.2, decay=1, soft_knee=2.0, threshold=-20, db_from=-20.0, db_to=-20.0):
"""compand takes 6 parameters:
attack (seconds), decay (seconds), soft_knee (ex. 6 results
in 6:1 compression ratio), threshold (a negative value
in dB), the level below which the signal will NOT be companded
(a negative value in dB), the level above which the signal will
NOT be companded (a negative value in dB). This effect
manipulates dynamic range of the input file.
"""
self.command.append('compand')
self.command.append(str(attack) + ',' + str(decay))
self.command.append(str(soft_knee) + ':' + str(threshold) + ',' + str(db_from) + ',' + str(db_to))
return self | def function[compand, parameter[self, attack, decay, soft_knee, threshold, db_from, db_to]]:
constant[compand takes 6 parameters:
attack (seconds), decay (seconds), soft_knee (ex. 6 results
in 6:1 compression ratio), threshold (a negative value
in dB), the level below which the signal will NOT be companded
(a negative value in dB), the level above which the signal will
NOT be companded (a negative value in dB). This effect
manipulates dynamic range of the input file.
]
call[name[self].command.append, parameter[constant[compand]]]
call[name[self].command.append, parameter[binary_operation[binary_operation[call[name[str], parameter[name[attack]]] + constant[,]] + call[name[str], parameter[name[decay]]]]]]
call[name[self].command.append, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[str], parameter[name[soft_knee]]] + constant[:]] + call[name[str], parameter[name[threshold]]]] + constant[,]] + call[name[str], parameter[name[db_from]]]] + constant[,]] + call[name[str], parameter[name[db_to]]]]]]
return[name[self]] | keyword[def] identifier[compand] ( identifier[self] , identifier[attack] = literal[int] , identifier[decay] = literal[int] , identifier[soft_knee] = literal[int] , identifier[threshold] =- literal[int] , identifier[db_from] =- literal[int] , identifier[db_to] =- literal[int] ):
literal[string]
identifier[self] . identifier[command] . identifier[append] ( literal[string] )
identifier[self] . identifier[command] . identifier[append] ( identifier[str] ( identifier[attack] )+ literal[string] + identifier[str] ( identifier[decay] ))
identifier[self] . identifier[command] . identifier[append] ( identifier[str] ( identifier[soft_knee] )+ literal[string] + identifier[str] ( identifier[threshold] )+ literal[string] + identifier[str] ( identifier[db_from] )+ literal[string] + identifier[str] ( identifier[db_to] ))
keyword[return] identifier[self] | def compand(self, attack=0.2, decay=1, soft_knee=2.0, threshold=-20, db_from=-20.0, db_to=-20.0):
"""compand takes 6 parameters:
attack (seconds), decay (seconds), soft_knee (ex. 6 results
in 6:1 compression ratio), threshold (a negative value
in dB), the level below which the signal will NOT be companded
(a negative value in dB), the level above which the signal will
NOT be companded (a negative value in dB). This effect
manipulates dynamic range of the input file.
"""
self.command.append('compand')
self.command.append(str(attack) + ',' + str(decay))
self.command.append(str(soft_knee) + ':' + str(threshold) + ',' + str(db_from) + ',' + str(db_to))
return self |
def prepare_schedule(schedule):
""" Prepares the schedule to be JSONified. That is: convert the non
JSON serializable objects such as the datetime.time's """
ret = {}
for room in schedule:
ret[room] = []
for event in schedule[room]:
ret[room].append(((event[0].hour,
event[0].minute),
(event[1].hour,
event[1].minute),
event[2]))
return ret | def function[prepare_schedule, parameter[schedule]]:
constant[ Prepares the schedule to be JSONified. That is: convert the non
JSON serializable objects such as the datetime.time's ]
variable[ret] assign[=] dictionary[[], []]
for taget[name[room]] in starred[name[schedule]] begin[:]
call[name[ret]][name[room]] assign[=] list[[]]
for taget[name[event]] in starred[call[name[schedule]][name[room]]] begin[:]
call[call[name[ret]][name[room]].append, parameter[tuple[[<ast.Tuple object at 0x7da20c76e5c0>, <ast.Tuple object at 0x7da20c76e200>, <ast.Subscript object at 0x7da20c76d5d0>]]]]
return[name[ret]] | keyword[def] identifier[prepare_schedule] ( identifier[schedule] ):
literal[string]
identifier[ret] ={}
keyword[for] identifier[room] keyword[in] identifier[schedule] :
identifier[ret] [ identifier[room] ]=[]
keyword[for] identifier[event] keyword[in] identifier[schedule] [ identifier[room] ]:
identifier[ret] [ identifier[room] ]. identifier[append] ((( identifier[event] [ literal[int] ]. identifier[hour] ,
identifier[event] [ literal[int] ]. identifier[minute] ),
( identifier[event] [ literal[int] ]. identifier[hour] ,
identifier[event] [ literal[int] ]. identifier[minute] ),
identifier[event] [ literal[int] ]))
keyword[return] identifier[ret] | def prepare_schedule(schedule):
""" Prepares the schedule to be JSONified. That is: convert the non
JSON serializable objects such as the datetime.time's """
ret = {}
for room in schedule:
ret[room] = []
for event in schedule[room]:
ret[room].append(((event[0].hour, event[0].minute), (event[1].hour, event[1].minute), event[2])) # depends on [control=['for'], data=['event']] # depends on [control=['for'], data=['room']]
return ret |
def _s3_intermediate_upload(file_obj, file_name, fields, session, callback_url):
"""Uploads a single file-like object to an intermediate S3 bucket which One Codex can pull from
after receiving a callback.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
callback_url : `string`
API callback at One Codex which will trigger a pull from this S3 bucket.
Raises
------
UploadException
In the case of a fatal exception during an upload. Note we rely on boto3 to handle its own retry logic.
Returns
-------
`dict` : JSON results from internal confirm import callback URL
"""
import boto3
from boto3.s3.transfer import TransferConfig
from boto3.exceptions import S3UploadFailedError
# actually do the upload
client = boto3.client(
"s3",
aws_access_key_id=fields["upload_aws_access_key_id"],
aws_secret_access_key=fields["upload_aws_secret_access_key"],
)
# if boto uses threads, ctrl+c won't work
config = TransferConfig(use_threads=False)
# let boto3 update our progressbar rather than our FASTX wrappers, if applicable
boto_kwargs = {}
if hasattr(file_obj, "progressbar"):
boto_kwargs["Callback"] = file_obj.progressbar.update
file_obj.progressbar = None
try:
client.upload_fileobj(
file_obj,
fields["s3_bucket"],
fields["file_id"],
ExtraArgs={"ServerSideEncryption": "AES256"},
Config=config,
**boto_kwargs
)
except S3UploadFailedError:
raise_connectivity_error(file_name)
# issue a callback
try:
resp = session.post(
callback_url,
json={
"s3_path": "s3://{}/{}".format(fields["s3_bucket"], fields["file_id"]),
"filename": file_name,
"import_as_document": fields.get("import_as_document", False),
},
)
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
if resp.status_code != 200:
raise_connectivity_error(file_name)
try:
return resp.json()
except ValueError:
return {} | def function[_s3_intermediate_upload, parameter[file_obj, file_name, fields, session, callback_url]]:
constant[Uploads a single file-like object to an intermediate S3 bucket which One Codex can pull from
after receiving a callback.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
callback_url : `string`
API callback at One Codex which will trigger a pull from this S3 bucket.
Raises
------
UploadException
In the case of a fatal exception during an upload. Note we rely on boto3 to handle its own retry logic.
Returns
-------
`dict` : JSON results from internal confirm import callback URL
]
import module[boto3]
from relative_module[boto3.s3.transfer] import module[TransferConfig]
from relative_module[boto3.exceptions] import module[S3UploadFailedError]
variable[client] assign[=] call[name[boto3].client, parameter[constant[s3]]]
variable[config] assign[=] call[name[TransferConfig], parameter[]]
variable[boto_kwargs] assign[=] dictionary[[], []]
if call[name[hasattr], parameter[name[file_obj], constant[progressbar]]] begin[:]
call[name[boto_kwargs]][constant[Callback]] assign[=] name[file_obj].progressbar.update
name[file_obj].progressbar assign[=] constant[None]
<ast.Try object at 0x7da20c993700>
<ast.Try object at 0x7da20c992da0>
if compare[name[resp].status_code not_equal[!=] constant[200]] begin[:]
call[name[raise_connectivity_error], parameter[name[file_name]]]
<ast.Try object at 0x7da20c991180> | keyword[def] identifier[_s3_intermediate_upload] ( identifier[file_obj] , identifier[file_name] , identifier[fields] , identifier[session] , identifier[callback_url] ):
literal[string]
keyword[import] identifier[boto3]
keyword[from] identifier[boto3] . identifier[s3] . identifier[transfer] keyword[import] identifier[TransferConfig]
keyword[from] identifier[boto3] . identifier[exceptions] keyword[import] identifier[S3UploadFailedError]
identifier[client] = identifier[boto3] . identifier[client] (
literal[string] ,
identifier[aws_access_key_id] = identifier[fields] [ literal[string] ],
identifier[aws_secret_access_key] = identifier[fields] [ literal[string] ],
)
identifier[config] = identifier[TransferConfig] ( identifier[use_threads] = keyword[False] )
identifier[boto_kwargs] ={}
keyword[if] identifier[hasattr] ( identifier[file_obj] , literal[string] ):
identifier[boto_kwargs] [ literal[string] ]= identifier[file_obj] . identifier[progressbar] . identifier[update]
identifier[file_obj] . identifier[progressbar] = keyword[None]
keyword[try] :
identifier[client] . identifier[upload_fileobj] (
identifier[file_obj] ,
identifier[fields] [ literal[string] ],
identifier[fields] [ literal[string] ],
identifier[ExtraArgs] ={ literal[string] : literal[string] },
identifier[Config] = identifier[config] ,
** identifier[boto_kwargs]
)
keyword[except] identifier[S3UploadFailedError] :
identifier[raise_connectivity_error] ( identifier[file_name] )
keyword[try] :
identifier[resp] = identifier[session] . identifier[post] (
identifier[callback_url] ,
identifier[json] ={
literal[string] : literal[string] . identifier[format] ( identifier[fields] [ literal[string] ], identifier[fields] [ literal[string] ]),
literal[string] : identifier[file_name] ,
literal[string] : identifier[fields] . identifier[get] ( literal[string] , keyword[False] ),
},
)
keyword[except] identifier[requests] . identifier[exceptions] . identifier[ConnectionError] :
identifier[raise_connectivity_error] ( identifier[file_name] )
keyword[if] identifier[resp] . identifier[status_code] != literal[int] :
identifier[raise_connectivity_error] ( identifier[file_name] )
keyword[try] :
keyword[return] identifier[resp] . identifier[json] ()
keyword[except] identifier[ValueError] :
keyword[return] {} | def _s3_intermediate_upload(file_obj, file_name, fields, session, callback_url):
"""Uploads a single file-like object to an intermediate S3 bucket which One Codex can pull from
after receiving a callback.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
callback_url : `string`
API callback at One Codex which will trigger a pull from this S3 bucket.
Raises
------
UploadException
In the case of a fatal exception during an upload. Note we rely on boto3 to handle its own retry logic.
Returns
-------
`dict` : JSON results from internal confirm import callback URL
"""
import boto3
from boto3.s3.transfer import TransferConfig
from boto3.exceptions import S3UploadFailedError
# actually do the upload
client = boto3.client('s3', aws_access_key_id=fields['upload_aws_access_key_id'], aws_secret_access_key=fields['upload_aws_secret_access_key'])
# if boto uses threads, ctrl+c won't work
config = TransferConfig(use_threads=False)
# let boto3 update our progressbar rather than our FASTX wrappers, if applicable
boto_kwargs = {}
if hasattr(file_obj, 'progressbar'):
boto_kwargs['Callback'] = file_obj.progressbar.update
file_obj.progressbar = None # depends on [control=['if'], data=[]]
try:
client.upload_fileobj(file_obj, fields['s3_bucket'], fields['file_id'], ExtraArgs={'ServerSideEncryption': 'AES256'}, Config=config, **boto_kwargs) # depends on [control=['try'], data=[]]
except S3UploadFailedError:
raise_connectivity_error(file_name) # depends on [control=['except'], data=[]]
# issue a callback
try:
resp = session.post(callback_url, json={'s3_path': 's3://{}/{}'.format(fields['s3_bucket'], fields['file_id']), 'filename': file_name, 'import_as_document': fields.get('import_as_document', False)}) # depends on [control=['try'], data=[]]
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name) # depends on [control=['except'], data=[]]
if resp.status_code != 200:
raise_connectivity_error(file_name) # depends on [control=['if'], data=[]]
try:
return resp.json() # depends on [control=['try'], data=[]]
except ValueError:
return {} # depends on [control=['except'], data=[]] |
def on_created(self, event):
""" on_created handler """
logger.debug("file created: %s", event.src_path)
if not event.is_directory:
self.update_file(event.src_path) | def function[on_created, parameter[self, event]]:
constant[ on_created handler ]
call[name[logger].debug, parameter[constant[file created: %s], name[event].src_path]]
if <ast.UnaryOp object at 0x7da1b26ad720> begin[:]
call[name[self].update_file, parameter[name[event].src_path]] | keyword[def] identifier[on_created] ( identifier[self] , identifier[event] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] , identifier[event] . identifier[src_path] )
keyword[if] keyword[not] identifier[event] . identifier[is_directory] :
identifier[self] . identifier[update_file] ( identifier[event] . identifier[src_path] ) | def on_created(self, event):
""" on_created handler """
logger.debug('file created: %s', event.src_path)
if not event.is_directory:
self.update_file(event.src_path) # depends on [control=['if'], data=[]] |
def set_high_water_mark(socket, config):
""" Set a high water mark on the zmq socket. Do so in a way that is
cross-compatible with zeromq2 and zeromq3.
"""
if config['high_water_mark']:
if hasattr(zmq, 'HWM'):
# zeromq2
socket.setsockopt(zmq.HWM, config['high_water_mark'])
else:
# zeromq3
socket.setsockopt(zmq.SNDHWM, config['high_water_mark'])
socket.setsockopt(zmq.RCVHWM, config['high_water_mark']) | def function[set_high_water_mark, parameter[socket, config]]:
constant[ Set a high water mark on the zmq socket. Do so in a way that is
cross-compatible with zeromq2 and zeromq3.
]
if call[name[config]][constant[high_water_mark]] begin[:]
if call[name[hasattr], parameter[name[zmq], constant[HWM]]] begin[:]
call[name[socket].setsockopt, parameter[name[zmq].HWM, call[name[config]][constant[high_water_mark]]]] | keyword[def] identifier[set_high_water_mark] ( identifier[socket] , identifier[config] ):
literal[string]
keyword[if] identifier[config] [ literal[string] ]:
keyword[if] identifier[hasattr] ( identifier[zmq] , literal[string] ):
identifier[socket] . identifier[setsockopt] ( identifier[zmq] . identifier[HWM] , identifier[config] [ literal[string] ])
keyword[else] :
identifier[socket] . identifier[setsockopt] ( identifier[zmq] . identifier[SNDHWM] , identifier[config] [ literal[string] ])
identifier[socket] . identifier[setsockopt] ( identifier[zmq] . identifier[RCVHWM] , identifier[config] [ literal[string] ]) | def set_high_water_mark(socket, config):
""" Set a high water mark on the zmq socket. Do so in a way that is
cross-compatible with zeromq2 and zeromq3.
"""
if config['high_water_mark']:
if hasattr(zmq, 'HWM'):
# zeromq2
socket.setsockopt(zmq.HWM, config['high_water_mark']) # depends on [control=['if'], data=[]]
else:
# zeromq3
socket.setsockopt(zmq.SNDHWM, config['high_water_mark'])
socket.setsockopt(zmq.RCVHWM, config['high_water_mark']) # depends on [control=['if'], data=[]] |
def update(self):
'''
Update our object's data
'''
self._json = self._request(
method='GET',
url=self.API
)._json | def function[update, parameter[self]]:
constant[
Update our object's data
]
name[self]._json assign[=] call[name[self]._request, parameter[]]._json | keyword[def] identifier[update] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_json] = identifier[self] . identifier[_request] (
identifier[method] = literal[string] ,
identifier[url] = identifier[self] . identifier[API]
). identifier[_json] | def update(self):
"""
Update our object's data
"""
self._json = self._request(method='GET', url=self.API)._json |
def date2seas( date, seas=['DJF','MAM','JJA','SON'], shift=1):
"""
#===============================================================================
# ;+
# ;
# ; CDO_date2seas : provides the equivalent CDO season to a julian date array
# ;
# ; @param date {in}{required}{type=NUMERIC} julian date (CNES days)
# ; @keyword seas {in}{optional}{type=STRING} seasons vector !! CAUTION !! This <br />
# ; must be accompanied by the SHIFT variable
# ; @keyword shift {in}{optional}{type=NUMERIC} Backward in time offset of the
# ; first element of SEAS vector in number of months from January <br />
# ; (e.g. Decembre -> 1, Novembre -> 2, etc... )
# ;
# ;
# ;-
#===============================================================================
"""
corr_months=np.roll(np.arange(12).reshape((4,3))+1,shift)
nt = len(date)
outvec=(cnes_convert(date))[1]
month=np.array([d.month for d in outvec])
outseas_ind=-np.ones(nt)
# outseas=np.repeat(' ',nt)
for i,m in enumerate(month) : outseas_ind[i] = int(np.where(m == corr_months)[0])
outseas =[seas[int(i)] for i in outseas_ind]
return outseas, outseas_ind | def function[date2seas, parameter[date, seas, shift]]:
constant[
#===============================================================================
# ;+
# ;
# ; CDO_date2seas : provides the equivalent CDO season to a julian date array
# ;
# ; @param date {in}{required}{type=NUMERIC} julian date (CNES days)
# ; @keyword seas {in}{optional}{type=STRING} seasons vector !! CAUTION !! This <br />
# ; must be accompanied by the SHIFT variable
# ; @keyword shift {in}{optional}{type=NUMERIC} Backward in time offset of the
# ; first element of SEAS vector in number of months from January <br />
# ; (e.g. Decembre -> 1, Novembre -> 2, etc... )
# ;
# ;
# ;-
#===============================================================================
]
variable[corr_months] assign[=] call[name[np].roll, parameter[binary_operation[call[call[name[np].arange, parameter[constant[12]]].reshape, parameter[tuple[[<ast.Constant object at 0x7da1b0862320>, <ast.Constant object at 0x7da1b0861120>]]]] + constant[1]], name[shift]]]
variable[nt] assign[=] call[name[len], parameter[name[date]]]
variable[outvec] assign[=] call[call[name[cnes_convert], parameter[name[date]]]][constant[1]]
variable[month] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b0861090>]]
variable[outseas_ind] assign[=] <ast.UnaryOp object at 0x7da1b0860280>
for taget[tuple[[<ast.Name object at 0x7da1b08619c0>, <ast.Name object at 0x7da1b0861b70>]]] in starred[call[name[enumerate], parameter[name[month]]]] begin[:]
call[name[outseas_ind]][name[i]] assign[=] call[name[int], parameter[call[call[name[np].where, parameter[compare[name[m] equal[==] name[corr_months]]]]][constant[0]]]]
variable[outseas] assign[=] <ast.ListComp object at 0x7da1b08627a0>
return[tuple[[<ast.Name object at 0x7da1b0860e20>, <ast.Name object at 0x7da1b0860dc0>]]] | keyword[def] identifier[date2seas] ( identifier[date] , identifier[seas] =[ literal[string] , literal[string] , literal[string] , literal[string] ], identifier[shift] = literal[int] ):
literal[string]
identifier[corr_months] = identifier[np] . identifier[roll] ( identifier[np] . identifier[arange] ( literal[int] ). identifier[reshape] (( literal[int] , literal[int] ))+ literal[int] , identifier[shift] )
identifier[nt] = identifier[len] ( identifier[date] )
identifier[outvec] =( identifier[cnes_convert] ( identifier[date] ))[ literal[int] ]
identifier[month] = identifier[np] . identifier[array] ([ identifier[d] . identifier[month] keyword[for] identifier[d] keyword[in] identifier[outvec] ])
identifier[outseas_ind] =- identifier[np] . identifier[ones] ( identifier[nt] )
keyword[for] identifier[i] , identifier[m] keyword[in] identifier[enumerate] ( identifier[month] ): identifier[outseas_ind] [ identifier[i] ]= identifier[int] ( identifier[np] . identifier[where] ( identifier[m] == identifier[corr_months] )[ literal[int] ])
identifier[outseas] =[ identifier[seas] [ identifier[int] ( identifier[i] )] keyword[for] identifier[i] keyword[in] identifier[outseas_ind] ]
keyword[return] identifier[outseas] , identifier[outseas_ind] | def date2seas(date, seas=['DJF', 'MAM', 'JJA', 'SON'], shift=1):
"""
#===============================================================================
# ;+
# ;
# ; CDO_date2seas : provides the equivalent CDO season to a julian date array
# ;
# ; @param date {in}{required}{type=NUMERIC} julian date (CNES days)
# ; @keyword seas {in}{optional}{type=STRING} seasons vector !! CAUTION !! This <br />
# ; must be accompanied by the SHIFT variable
# ; @keyword shift {in}{optional}{type=NUMERIC} Backward in time offset of the
# ; first element of SEAS vector in number of months from January <br />
# ; (e.g. Decembre -> 1, Novembre -> 2, etc... )
# ;
# ;
# ;-
#===============================================================================
"""
corr_months = np.roll(np.arange(12).reshape((4, 3)) + 1, shift)
nt = len(date)
outvec = cnes_convert(date)[1]
month = np.array([d.month for d in outvec])
outseas_ind = -np.ones(nt) # outseas=np.repeat(' ',nt)
for (i, m) in enumerate(month):
outseas_ind[i] = int(np.where(m == corr_months)[0]) # depends on [control=['for'], data=[]]
outseas = [seas[int(i)] for i in outseas_ind]
return (outseas, outseas_ind) |
def view_limits(self, vmin, vmax):
"""
Try to choose the view limits intelligently.
"""
b = self._transform.base
if vmax < vmin:
vmin, vmax = vmax, vmin
if not matplotlib.ticker.is_decade(abs(vmin), b):
if vmin < 0:
vmin = -matplotlib.ticker.decade_up(-vmin, b)
else:
vmin = matplotlib.ticker.decade_down(vmin, b)
if not matplotlib.ticker.is_decade(abs(vmax), b):
if vmax < 0:
vmax = -matplotlib.ticker.decade_down(-vmax, b)
else:
vmax = matplotlib.ticker.decade_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -matplotlib.ticker.decade_up(-vmin, b)
vmax = -matplotlib.ticker.decade_down(-vmax, b)
else:
vmin = matplotlib.ticker.decade_down(vmin, b)
vmax = matplotlib.ticker.decade_up(vmax, b)
result = matplotlib.transforms.nonsingular(vmin, vmax)
return result | def function[view_limits, parameter[self, vmin, vmax]]:
constant[
Try to choose the view limits intelligently.
]
variable[b] assign[=] name[self]._transform.base
if compare[name[vmax] less[<] name[vmin]] begin[:]
<ast.Tuple object at 0x7da204620d60> assign[=] tuple[[<ast.Name object at 0x7da204620ee0>, <ast.Name object at 0x7da204623790>]]
if <ast.UnaryOp object at 0x7da204622410> begin[:]
if compare[name[vmin] less[<] constant[0]] begin[:]
variable[vmin] assign[=] <ast.UnaryOp object at 0x7da2044c0940>
if <ast.UnaryOp object at 0x7da2044c3b20> begin[:]
if compare[name[vmax] less[<] constant[0]] begin[:]
variable[vmax] assign[=] <ast.UnaryOp object at 0x7da1b1bb0e20>
if compare[name[vmin] equal[==] name[vmax]] begin[:]
if compare[name[vmin] less[<] constant[0]] begin[:]
variable[vmin] assign[=] <ast.UnaryOp object at 0x7da1b1bb32b0>
variable[vmax] assign[=] <ast.UnaryOp object at 0x7da1b1bb30d0>
variable[result] assign[=] call[name[matplotlib].transforms.nonsingular, parameter[name[vmin], name[vmax]]]
return[name[result]] | keyword[def] identifier[view_limits] ( identifier[self] , identifier[vmin] , identifier[vmax] ):
literal[string]
identifier[b] = identifier[self] . identifier[_transform] . identifier[base]
keyword[if] identifier[vmax] < identifier[vmin] :
identifier[vmin] , identifier[vmax] = identifier[vmax] , identifier[vmin]
keyword[if] keyword[not] identifier[matplotlib] . identifier[ticker] . identifier[is_decade] ( identifier[abs] ( identifier[vmin] ), identifier[b] ):
keyword[if] identifier[vmin] < literal[int] :
identifier[vmin] =- identifier[matplotlib] . identifier[ticker] . identifier[decade_up] (- identifier[vmin] , identifier[b] )
keyword[else] :
identifier[vmin] = identifier[matplotlib] . identifier[ticker] . identifier[decade_down] ( identifier[vmin] , identifier[b] )
keyword[if] keyword[not] identifier[matplotlib] . identifier[ticker] . identifier[is_decade] ( identifier[abs] ( identifier[vmax] ), identifier[b] ):
keyword[if] identifier[vmax] < literal[int] :
identifier[vmax] =- identifier[matplotlib] . identifier[ticker] . identifier[decade_down] (- identifier[vmax] , identifier[b] )
keyword[else] :
identifier[vmax] = identifier[matplotlib] . identifier[ticker] . identifier[decade_up] ( identifier[vmax] , identifier[b] )
keyword[if] identifier[vmin] == identifier[vmax] :
keyword[if] identifier[vmin] < literal[int] :
identifier[vmin] =- identifier[matplotlib] . identifier[ticker] . identifier[decade_up] (- identifier[vmin] , identifier[b] )
identifier[vmax] =- identifier[matplotlib] . identifier[ticker] . identifier[decade_down] (- identifier[vmax] , identifier[b] )
keyword[else] :
identifier[vmin] = identifier[matplotlib] . identifier[ticker] . identifier[decade_down] ( identifier[vmin] , identifier[b] )
identifier[vmax] = identifier[matplotlib] . identifier[ticker] . identifier[decade_up] ( identifier[vmax] , identifier[b] )
identifier[result] = identifier[matplotlib] . identifier[transforms] . identifier[nonsingular] ( identifier[vmin] , identifier[vmax] )
keyword[return] identifier[result] | def view_limits(self, vmin, vmax):
"""
Try to choose the view limits intelligently.
"""
b = self._transform.base
if vmax < vmin:
(vmin, vmax) = (vmax, vmin) # depends on [control=['if'], data=['vmax', 'vmin']]
if not matplotlib.ticker.is_decade(abs(vmin), b):
if vmin < 0:
vmin = -matplotlib.ticker.decade_up(-vmin, b) # depends on [control=['if'], data=['vmin']]
else:
vmin = matplotlib.ticker.decade_down(vmin, b) # depends on [control=['if'], data=[]]
if not matplotlib.ticker.is_decade(abs(vmax), b):
if vmax < 0:
vmax = -matplotlib.ticker.decade_down(-vmax, b) # depends on [control=['if'], data=['vmax']]
else:
vmax = matplotlib.ticker.decade_up(vmax, b) # depends on [control=['if'], data=[]]
if vmin == vmax:
if vmin < 0:
vmin = -matplotlib.ticker.decade_up(-vmin, b)
vmax = -matplotlib.ticker.decade_down(-vmax, b) # depends on [control=['if'], data=['vmin']]
else:
vmin = matplotlib.ticker.decade_down(vmin, b)
vmax = matplotlib.ticker.decade_up(vmax, b) # depends on [control=['if'], data=['vmin', 'vmax']]
result = matplotlib.transforms.nonsingular(vmin, vmax)
return result |
def create_subnet_group(name, description, subnet_ids, tags=None,
region=None, key=None, keyid=None, profile=None):
'''
Create an RDS subnet group
CLI example to create an RDS subnet group::
salt myminion boto_rds.create_subnet_group my-subnet-group \
"group description" '[subnet-12345678, subnet-87654321]' \
region=us-east-1
'''
res = __salt__['boto_rds.subnet_group_exists'](name, tags, region, key,
keyid, profile)
if res.get('exists'):
return {'exists': bool(res)}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {'results': bool(conn)}
taglist = _tag_doc(tags)
rds = conn.create_db_subnet_group(DBSubnetGroupName=name,
DBSubnetGroupDescription=description,
SubnetIds=subnet_ids, Tags=taglist)
return {'created': bool(rds)}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | def function[create_subnet_group, parameter[name, description, subnet_ids, tags, region, key, keyid, profile]]:
constant[
Create an RDS subnet group
CLI example to create an RDS subnet group::
salt myminion boto_rds.create_subnet_group my-subnet-group "group description" '[subnet-12345678, subnet-87654321]' region=us-east-1
]
variable[res] assign[=] call[call[name[__salt__]][constant[boto_rds.subnet_group_exists]], parameter[name[name], name[tags], name[region], name[key], name[keyid], name[profile]]]
if call[name[res].get, parameter[constant[exists]]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b2344e20>], [<ast.Call object at 0x7da1b2346860>]]]
<ast.Try object at 0x7da1b2344df0> | keyword[def] identifier[create_subnet_group] ( identifier[name] , identifier[description] , identifier[subnet_ids] , identifier[tags] = keyword[None] ,
identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ):
literal[string]
identifier[res] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[tags] , identifier[region] , identifier[key] ,
identifier[keyid] , identifier[profile] )
keyword[if] identifier[res] . identifier[get] ( literal[string] ):
keyword[return] { literal[string] : identifier[bool] ( identifier[res] )}
keyword[try] :
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[if] keyword[not] identifier[conn] :
keyword[return] { literal[string] : identifier[bool] ( identifier[conn] )}
identifier[taglist] = identifier[_tag_doc] ( identifier[tags] )
identifier[rds] = identifier[conn] . identifier[create_db_subnet_group] ( identifier[DBSubnetGroupName] = identifier[name] ,
identifier[DBSubnetGroupDescription] = identifier[description] ,
identifier[SubnetIds] = identifier[subnet_ids] , identifier[Tags] = identifier[taglist] )
keyword[return] { literal[string] : identifier[bool] ( identifier[rds] )}
keyword[except] identifier[ClientError] keyword[as] identifier[e] :
keyword[return] { literal[string] : identifier[__utils__] [ literal[string] ]( identifier[e] )} | def create_subnet_group(name, description, subnet_ids, tags=None, region=None, key=None, keyid=None, profile=None):
"""
Create an RDS subnet group
CLI example to create an RDS subnet group::
salt myminion boto_rds.create_subnet_group my-subnet-group "group description" '[subnet-12345678, subnet-87654321]' region=us-east-1
"""
res = __salt__['boto_rds.subnet_group_exists'](name, tags, region, key, keyid, profile)
if res.get('exists'):
return {'exists': bool(res)} # depends on [control=['if'], data=[]]
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {'results': bool(conn)} # depends on [control=['if'], data=[]]
taglist = _tag_doc(tags)
rds = conn.create_db_subnet_group(DBSubnetGroupName=name, DBSubnetGroupDescription=description, SubnetIds=subnet_ids, Tags=taglist)
return {'created': bool(rds)} # depends on [control=['try'], data=[]]
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} # depends on [control=['except'], data=['e']] |
def set_cache(self, value):
"""
Assign the cache in cache.
"""
value.update(self.cache)
return self.cache_backend.set(self.cache_key, value) | def function[set_cache, parameter[self, value]]:
constant[
Assign the cache in cache.
]
call[name[value].update, parameter[name[self].cache]]
return[call[name[self].cache_backend.set, parameter[name[self].cache_key, name[value]]]] | keyword[def] identifier[set_cache] ( identifier[self] , identifier[value] ):
literal[string]
identifier[value] . identifier[update] ( identifier[self] . identifier[cache] )
keyword[return] identifier[self] . identifier[cache_backend] . identifier[set] ( identifier[self] . identifier[cache_key] , identifier[value] ) | def set_cache(self, value):
"""
Assign the cache in cache.
"""
value.update(self.cache)
return self.cache_backend.set(self.cache_key, value) |
def path_entry(self, path):
"""
The parsed data given a path, which is separated into its directory
and entry name.
"""
if path[0] != '/':
return None
path_parts = path.split('/')
# Note that here the first element will be '' because it's before the
# first separator. That's OK, the join puts it back together.
directory = '/'.join(path_parts[:-1])
name = path_parts[-1]
if directory not in self.listings:
return None
if name not in self.listings[directory]['entries']:
return None
return self.listings[directory]['entries'][name] | def function[path_entry, parameter[self, path]]:
constant[
The parsed data given a path, which is separated into its directory
and entry name.
]
if compare[call[name[path]][constant[0]] not_equal[!=] constant[/]] begin[:]
return[constant[None]]
variable[path_parts] assign[=] call[name[path].split, parameter[constant[/]]]
variable[directory] assign[=] call[constant[/].join, parameter[call[name[path_parts]][<ast.Slice object at 0x7da20c991840>]]]
variable[name] assign[=] call[name[path_parts]][<ast.UnaryOp object at 0x7da20c9910f0>]
if compare[name[directory] <ast.NotIn object at 0x7da2590d7190> name[self].listings] begin[:]
return[constant[None]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> call[call[name[self].listings][name[directory]]][constant[entries]]] begin[:]
return[constant[None]]
return[call[call[call[name[self].listings][name[directory]]][constant[entries]]][name[name]]] | keyword[def] identifier[path_entry] ( identifier[self] , identifier[path] ):
literal[string]
keyword[if] identifier[path] [ literal[int] ]!= literal[string] :
keyword[return] keyword[None]
identifier[path_parts] = identifier[path] . identifier[split] ( literal[string] )
identifier[directory] = literal[string] . identifier[join] ( identifier[path_parts] [:- literal[int] ])
identifier[name] = identifier[path_parts] [- literal[int] ]
keyword[if] identifier[directory] keyword[not] keyword[in] identifier[self] . identifier[listings] :
keyword[return] keyword[None]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[listings] [ identifier[directory] ][ literal[string] ]:
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[listings] [ identifier[directory] ][ literal[string] ][ identifier[name] ] | def path_entry(self, path):
"""
The parsed data given a path, which is separated into its directory
and entry name.
"""
if path[0] != '/':
return None # depends on [control=['if'], data=[]]
path_parts = path.split('/')
# Note that here the first element will be '' because it's before the
# first separator. That's OK, the join puts it back together.
directory = '/'.join(path_parts[:-1])
name = path_parts[-1]
if directory not in self.listings:
return None # depends on [control=['if'], data=[]]
if name not in self.listings[directory]['entries']:
return None # depends on [control=['if'], data=[]]
return self.listings[directory]['entries'][name] |
def metrics(ty, query, query_type, **kwargs):
"""
Outputs runtime metrics collected from cocaine-runtime and its services.
This command shows runtime metrics collected from cocaine-runtime and its services during their
lifetime.
There are four kind of metrics available: gauges, counters, meters and timers.
\b
- Gauges - an instantaneous measurement of a value.
- Counters - just a gauge for an atomic integer instance.
- Meters - measures the rate of events over time (e.g., "requests per second"). In addition
to the mean rate, meters also track 1-, 5-, and 15-minute moving averages.
- Timers - measures both the rate that a particular piece of code is called and the
distribution of its duration.
Every metric in has a unique name, which is just a dotted-name string like "connections.count"
or "node.queue.size".
An output type can be configured using --type option. The default one results in plain
formatting where there is only one depth level.
As an alternative you can expanded the JSON tree by specifying --type=json option. The depth of
the result tree depends on metric name which is split by dot symbol.
The result output will be probably too large without any customization. To reduce this output
there are custom filters, which can be specified using --query option. Technically it's a
special metrics query language (MQL) which supports the following operations and functions:
\b
- contains(<expr>, <expr>) - checks whether the result of second expression contains in the
result of first expression. These expressions must resolve in strings. An output type of this
function is bool.
- name() - resolves in metric name.
- type() - resolves in metric type (counter, meter, etc.).
- tag(<expr>) - extracts custom metric tag and results in string.
- && - combines several expressions in one, which applies when all of them apply.
- || - combines several expressions in one, which applies when any of them apply.
- == - compares two expressions for equality.
- != - compares two expressions for an non-equality.
- Also string literals (alphanumeric with dots) can be used as an expressions, for
example "name() == locator.connections.accepted".
Priorities can be specified using braces as in usual math expressions.
The grammar for this query language is:
\b
expr ::= term ((AND | OR) term)*
term ::= factor ((EQ | NE) factor)*
factor ::= func | literal | number | LPAREN expr RPAREN
func ::= literal LPAREN expr (,expr)* RPAREN
literal ::= alphanum | .
number ::= <floating point number>
An example of the query, which returns all meters (for all services) and the number of accepted
connections for the Locator
service: "contains(type(), meter) || name() == locator.connections.accepted".
"""
ctx = Context(**kwargs)
ctx.execute_action('metrics', **{
'metrics': ctx.repo.create_secure_service('metrics'),
'ty': ty,
'query': query,
'query_type': query_type,
}) | def function[metrics, parameter[ty, query, query_type]]:
constant[
Outputs runtime metrics collected from cocaine-runtime and its services.
This command shows runtime metrics collected from cocaine-runtime and its services during their
lifetime.
There are four kind of metrics available: gauges, counters, meters and timers.
- Gauges - an instantaneous measurement of a value.
- Counters - just a gauge for an atomic integer instance.
- Meters - measures the rate of events over time (e.g., "requests per second"). In addition
to the mean rate, meters also track 1-, 5-, and 15-minute moving averages.
- Timers - measures both the rate that a particular piece of code is called and the
distribution of its duration.
Every metric in has a unique name, which is just a dotted-name string like "connections.count"
or "node.queue.size".
An output type can be configured using --type option. The default one results in plain
formatting where there is only one depth level.
As an alternative you can expanded the JSON tree by specifying --type=json option. The depth of
the result tree depends on metric name which is split by dot symbol.
The result output will be probably too large without any customization. To reduce this output
there are custom filters, which can be specified using --query option. Technically it's a
special metrics query language (MQL) which supports the following operations and functions:
- contains(<expr>, <expr>) - checks whether the result of second expression contains in the
result of first expression. These expressions must resolve in strings. An output type of this
function is bool.
- name() - resolves in metric name.
- type() - resolves in metric type (counter, meter, etc.).
- tag(<expr>) - extracts custom metric tag and results in string.
- && - combines several expressions in one, which applies when all of them apply.
- || - combines several expressions in one, which applies when any of them apply.
- == - compares two expressions for equality.
- != - compares two expressions for an non-equality.
- Also string literals (alphanumeric with dots) can be used as an expressions, for
example "name() == locator.connections.accepted".
Priorities can be specified using braces as in usual math expressions.
The grammar for this query language is:
expr ::= term ((AND | OR) term)*
term ::= factor ((EQ | NE) factor)*
factor ::= func | literal | number | LPAREN expr RPAREN
func ::= literal LPAREN expr (,expr)* RPAREN
literal ::= alphanum | .
number ::= <floating point number>
An example of the query, which returns all meters (for all services) and the number of accepted
connections for the Locator
service: "contains(type(), meter) || name() == locator.connections.accepted".
]
variable[ctx] assign[=] call[name[Context], parameter[]]
call[name[ctx].execute_action, parameter[constant[metrics]]] | keyword[def] identifier[metrics] ( identifier[ty] , identifier[query] , identifier[query_type] ,** identifier[kwargs] ):
literal[string]
identifier[ctx] = identifier[Context] (** identifier[kwargs] )
identifier[ctx] . identifier[execute_action] ( literal[string] ,**{
literal[string] : identifier[ctx] . identifier[repo] . identifier[create_secure_service] ( literal[string] ),
literal[string] : identifier[ty] ,
literal[string] : identifier[query] ,
literal[string] : identifier[query_type] ,
}) | def metrics(ty, query, query_type, **kwargs):
"""
Outputs runtime metrics collected from cocaine-runtime and its services.
This command shows runtime metrics collected from cocaine-runtime and its services during their
lifetime.
There are four kind of metrics available: gauges, counters, meters and timers.
\x08
- Gauges - an instantaneous measurement of a value.
- Counters - just a gauge for an atomic integer instance.
- Meters - measures the rate of events over time (e.g., "requests per second"). In addition
to the mean rate, meters also track 1-, 5-, and 15-minute moving averages.
- Timers - measures both the rate that a particular piece of code is called and the
distribution of its duration.
Every metric in has a unique name, which is just a dotted-name string like "connections.count"
or "node.queue.size".
An output type can be configured using --type option. The default one results in plain
formatting where there is only one depth level.
As an alternative you can expanded the JSON tree by specifying --type=json option. The depth of
the result tree depends on metric name which is split by dot symbol.
The result output will be probably too large without any customization. To reduce this output
there are custom filters, which can be specified using --query option. Technically it's a
special metrics query language (MQL) which supports the following operations and functions:
\x08
- contains(<expr>, <expr>) - checks whether the result of second expression contains in the
result of first expression. These expressions must resolve in strings. An output type of this
function is bool.
- name() - resolves in metric name.
- type() - resolves in metric type (counter, meter, etc.).
- tag(<expr>) - extracts custom metric tag and results in string.
- && - combines several expressions in one, which applies when all of them apply.
- || - combines several expressions in one, which applies when any of them apply.
- == - compares two expressions for equality.
- != - compares two expressions for an non-equality.
- Also string literals (alphanumeric with dots) can be used as an expressions, for
example "name() == locator.connections.accepted".
Priorities can be specified using braces as in usual math expressions.
The grammar for this query language is:
\x08
expr ::= term ((AND | OR) term)*
term ::= factor ((EQ | NE) factor)*
factor ::= func | literal | number | LPAREN expr RPAREN
func ::= literal LPAREN expr (,expr)* RPAREN
literal ::= alphanum | .
number ::= <floating point number>
An example of the query, which returns all meters (for all services) and the number of accepted
connections for the Locator
service: "contains(type(), meter) || name() == locator.connections.accepted".
"""
ctx = Context(**kwargs)
ctx.execute_action('metrics', **{'metrics': ctx.repo.create_secure_service('metrics'), 'ty': ty, 'query': query, 'query_type': query_type}) |
def regions(self):
"""gets the regions value"""
url = "%s/regions" % self.root
params = {"f": "json"}
return self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | def function[regions, parameter[self]]:
constant[gets the regions value]
variable[url] assign[=] binary_operation[constant[%s/regions] <ast.Mod object at 0x7da2590d6920> name[self].root]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1228130>], [<ast.Constant object at 0x7da1b1228160>]]
return[call[name[self]._get, parameter[]]] | keyword[def] identifier[regions] ( identifier[self] ):
literal[string]
identifier[url] = literal[string] % identifier[self] . identifier[root]
identifier[params] ={ literal[string] : literal[string] }
keyword[return] identifier[self] . identifier[_get] ( identifier[url] = identifier[url] ,
identifier[param_dict] = identifier[params] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ) | def regions(self):
"""gets the regions value"""
url = '%s/regions' % self.root
params = {'f': 'json'}
return self._get(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port) |
def tx(self, message):
"""
Transmit a series of bytes
:param message: a list of bytes to send
:return: None
"""
message = message if isinstance(message, list) else [message]
length = len(message)
length_high_byte = (length & 0xff00) >> 8
length_low_byte = length & 0x00ff
message_with_length = [length_low_byte, length_high_byte] + message
sum1, sum2 = self._fletcher16_checksum(message_with_length)
message_with_length.append(sum1)
message_with_length.append(sum2)
message = [self._START_OF_FRAME]
for b in message_with_length:
if b in [self._START_OF_FRAME, self._END_OF_FRAME, self._ESC]:
message.append(self._ESC)
message.append(b ^ self._ESC_XOR)
else:
message.append(b)
message.append(self._END_OF_FRAME)
self._port.write(message) | def function[tx, parameter[self, message]]:
constant[
Transmit a series of bytes
:param message: a list of bytes to send
:return: None
]
variable[message] assign[=] <ast.IfExp object at 0x7da1afe0d0c0>
variable[length] assign[=] call[name[len], parameter[name[message]]]
variable[length_high_byte] assign[=] binary_operation[binary_operation[name[length] <ast.BitAnd object at 0x7da2590d6b60> constant[65280]] <ast.RShift object at 0x7da2590d6a40> constant[8]]
variable[length_low_byte] assign[=] binary_operation[name[length] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]
variable[message_with_length] assign[=] binary_operation[list[[<ast.Name object at 0x7da1afe0f760>, <ast.Name object at 0x7da1afe0ff70>]] + name[message]]
<ast.Tuple object at 0x7da1afe0ee90> assign[=] call[name[self]._fletcher16_checksum, parameter[name[message_with_length]]]
call[name[message_with_length].append, parameter[name[sum1]]]
call[name[message_with_length].append, parameter[name[sum2]]]
variable[message] assign[=] list[[<ast.Attribute object at 0x7da1afe0f0a0>]]
for taget[name[b]] in starred[name[message_with_length]] begin[:]
if compare[name[b] in list[[<ast.Attribute object at 0x7da1afe0eda0>, <ast.Attribute object at 0x7da1afe0d810>, <ast.Attribute object at 0x7da1afe0e050>]]] begin[:]
call[name[message].append, parameter[name[self]._ESC]]
call[name[message].append, parameter[binary_operation[name[b] <ast.BitXor object at 0x7da2590d6b00> name[self]._ESC_XOR]]]
call[name[message].append, parameter[name[self]._END_OF_FRAME]]
call[name[self]._port.write, parameter[name[message]]] | keyword[def] identifier[tx] ( identifier[self] , identifier[message] ):
literal[string]
identifier[message] = identifier[message] keyword[if] identifier[isinstance] ( identifier[message] , identifier[list] ) keyword[else] [ identifier[message] ]
identifier[length] = identifier[len] ( identifier[message] )
identifier[length_high_byte] =( identifier[length] & literal[int] )>> literal[int]
identifier[length_low_byte] = identifier[length] & literal[int]
identifier[message_with_length] =[ identifier[length_low_byte] , identifier[length_high_byte] ]+ identifier[message]
identifier[sum1] , identifier[sum2] = identifier[self] . identifier[_fletcher16_checksum] ( identifier[message_with_length] )
identifier[message_with_length] . identifier[append] ( identifier[sum1] )
identifier[message_with_length] . identifier[append] ( identifier[sum2] )
identifier[message] =[ identifier[self] . identifier[_START_OF_FRAME] ]
keyword[for] identifier[b] keyword[in] identifier[message_with_length] :
keyword[if] identifier[b] keyword[in] [ identifier[self] . identifier[_START_OF_FRAME] , identifier[self] . identifier[_END_OF_FRAME] , identifier[self] . identifier[_ESC] ]:
identifier[message] . identifier[append] ( identifier[self] . identifier[_ESC] )
identifier[message] . identifier[append] ( identifier[b] ^ identifier[self] . identifier[_ESC_XOR] )
keyword[else] :
identifier[message] . identifier[append] ( identifier[b] )
identifier[message] . identifier[append] ( identifier[self] . identifier[_END_OF_FRAME] )
identifier[self] . identifier[_port] . identifier[write] ( identifier[message] ) | def tx(self, message):
"""
Transmit a series of bytes
:param message: a list of bytes to send
:return: None
"""
message = message if isinstance(message, list) else [message]
length = len(message)
length_high_byte = (length & 65280) >> 8
length_low_byte = length & 255
message_with_length = [length_low_byte, length_high_byte] + message
(sum1, sum2) = self._fletcher16_checksum(message_with_length)
message_with_length.append(sum1)
message_with_length.append(sum2)
message = [self._START_OF_FRAME]
for b in message_with_length:
if b in [self._START_OF_FRAME, self._END_OF_FRAME, self._ESC]:
message.append(self._ESC)
message.append(b ^ self._ESC_XOR) # depends on [control=['if'], data=['b']]
else:
message.append(b) # depends on [control=['for'], data=['b']]
message.append(self._END_OF_FRAME)
self._port.write(message) |
def do_file_sub(self, srcpath, regexp, subst):
'''Apply a regexp substitution to a file archived by sosreport.
srcpath is the path in the archive where the file can be found. regexp
can be a regexp string or a compiled re object. subst is a string to
replace each occurance of regexp in the content of srcpath.
This function returns the number of replacements made.
'''
try:
path = self._get_dest_for_srcpath(srcpath)
self._log_debug("substituting scrpath '%s'" % srcpath)
self._log_debug("substituting '%s' for '%s' in '%s'"
% (subst, regexp, path))
if not path:
return 0
readable = self.archive.open_file(path)
content = readable.read()
if not isinstance(content, six.string_types):
content = content.decode('utf8', 'ignore')
result, replacements = re.subn(regexp, subst, content)
if replacements:
self.archive.add_string(result, srcpath)
else:
replacements = 0
except (OSError, IOError) as e:
# if trying to regexp a nonexisting file, dont log it as an
# error to stdout
if e.errno == errno.ENOENT:
msg = "file '%s' not collected, substitution skipped"
self._log_debug(msg % path)
else:
msg = "regex substitution failed for '%s' with: '%s'"
self._log_error(msg % (path, e))
replacements = 0
return replacements | def function[do_file_sub, parameter[self, srcpath, regexp, subst]]:
constant[Apply a regexp substitution to a file archived by sosreport.
srcpath is the path in the archive where the file can be found. regexp
can be a regexp string or a compiled re object. subst is a string to
replace each occurance of regexp in the content of srcpath.
This function returns the number of replacements made.
]
<ast.Try object at 0x7da1b17a8700>
return[name[replacements]] | keyword[def] identifier[do_file_sub] ( identifier[self] , identifier[srcpath] , identifier[regexp] , identifier[subst] ):
literal[string]
keyword[try] :
identifier[path] = identifier[self] . identifier[_get_dest_for_srcpath] ( identifier[srcpath] )
identifier[self] . identifier[_log_debug] ( literal[string] % identifier[srcpath] )
identifier[self] . identifier[_log_debug] ( literal[string]
%( identifier[subst] , identifier[regexp] , identifier[path] ))
keyword[if] keyword[not] identifier[path] :
keyword[return] literal[int]
identifier[readable] = identifier[self] . identifier[archive] . identifier[open_file] ( identifier[path] )
identifier[content] = identifier[readable] . identifier[read] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[content] , identifier[six] . identifier[string_types] ):
identifier[content] = identifier[content] . identifier[decode] ( literal[string] , literal[string] )
identifier[result] , identifier[replacements] = identifier[re] . identifier[subn] ( identifier[regexp] , identifier[subst] , identifier[content] )
keyword[if] identifier[replacements] :
identifier[self] . identifier[archive] . identifier[add_string] ( identifier[result] , identifier[srcpath] )
keyword[else] :
identifier[replacements] = literal[int]
keyword[except] ( identifier[OSError] , identifier[IOError] ) keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] == identifier[errno] . identifier[ENOENT] :
identifier[msg] = literal[string]
identifier[self] . identifier[_log_debug] ( identifier[msg] % identifier[path] )
keyword[else] :
identifier[msg] = literal[string]
identifier[self] . identifier[_log_error] ( identifier[msg] %( identifier[path] , identifier[e] ))
identifier[replacements] = literal[int]
keyword[return] identifier[replacements] | def do_file_sub(self, srcpath, regexp, subst):
"""Apply a regexp substitution to a file archived by sosreport.
srcpath is the path in the archive where the file can be found. regexp
can be a regexp string or a compiled re object. subst is a string to
replace each occurance of regexp in the content of srcpath.
This function returns the number of replacements made.
"""
try:
path = self._get_dest_for_srcpath(srcpath)
self._log_debug("substituting scrpath '%s'" % srcpath)
self._log_debug("substituting '%s' for '%s' in '%s'" % (subst, regexp, path))
if not path:
return 0 # depends on [control=['if'], data=[]]
readable = self.archive.open_file(path)
content = readable.read()
if not isinstance(content, six.string_types):
content = content.decode('utf8', 'ignore') # depends on [control=['if'], data=[]]
(result, replacements) = re.subn(regexp, subst, content)
if replacements:
self.archive.add_string(result, srcpath) # depends on [control=['if'], data=[]]
else:
replacements = 0 # depends on [control=['try'], data=[]]
except (OSError, IOError) as e:
# if trying to regexp a nonexisting file, dont log it as an
# error to stdout
if e.errno == errno.ENOENT:
msg = "file '%s' not collected, substitution skipped"
self._log_debug(msg % path) # depends on [control=['if'], data=[]]
else:
msg = "regex substitution failed for '%s' with: '%s'"
self._log_error(msg % (path, e))
replacements = 0 # depends on [control=['except'], data=['e']]
return replacements |
def edit(self, name=None, description=None, version=None, **kwargs):
"""
Edit Service details.
.. versionadded:: 1.13
:param name: (optional) name of the service to change.
:type name: basestring or None
:param description: (optional) description of the service.
:type description: basestring or None
:param version: (optional) version number of the service.
:type version: basestring or None
:param kwargs: (optional) additional keyword arguments to change.
:type kwargs: dict or None
:raises IllegalArgumentError: when you provide an illegal argument.
:raises APIError: if the service could not be updated.
"""
update_dict = {'id': self.id}
if name:
if not isinstance(name, str):
raise IllegalArgumentError("name should be provided as a string")
update_dict.update({'name': name})
if description:
if not isinstance(description, str):
raise IllegalArgumentError("description should be provided as a string")
update_dict.update({'description': description})
if version:
if not isinstance(version, str):
raise IllegalArgumentError("description should be provided as a string")
update_dict.update({'script_version': version})
if kwargs: # pragma: no cover
update_dict.update(**kwargs)
response = self._client._request('PUT',
self._client._build_url('service', service_id=self.id), json=update_dict)
if response.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not update Service ({})".format(response))
if name:
self.name = name
if version:
self.version = version | def function[edit, parameter[self, name, description, version]]:
constant[
Edit Service details.
.. versionadded:: 1.13
:param name: (optional) name of the service to change.
:type name: basestring or None
:param description: (optional) description of the service.
:type description: basestring or None
:param version: (optional) version number of the service.
:type version: basestring or None
:param kwargs: (optional) additional keyword arguments to change.
:type kwargs: dict or None
:raises IllegalArgumentError: when you provide an illegal argument.
:raises APIError: if the service could not be updated.
]
variable[update_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b25ef160>], [<ast.Attribute object at 0x7da1b25ee650>]]
if name[name] begin[:]
if <ast.UnaryOp object at 0x7da1b25ef070> begin[:]
<ast.Raise object at 0x7da1b25ee350>
call[name[update_dict].update, parameter[dictionary[[<ast.Constant object at 0x7da1b25ed1b0>], [<ast.Name object at 0x7da1b25ef460>]]]]
if name[description] begin[:]
if <ast.UnaryOp object at 0x7da1b25ec6d0> begin[:]
<ast.Raise object at 0x7da1b25edc30>
call[name[update_dict].update, parameter[dictionary[[<ast.Constant object at 0x7da1b25efdf0>], [<ast.Name object at 0x7da1b25ef100>]]]]
if name[version] begin[:]
if <ast.UnaryOp object at 0x7da1b25ec520> begin[:]
<ast.Raise object at 0x7da1b25ee0e0>
call[name[update_dict].update, parameter[dictionary[[<ast.Constant object at 0x7da1b25ee4a0>], [<ast.Name object at 0x7da1b25ec670>]]]]
if name[kwargs] begin[:]
call[name[update_dict].update, parameter[]]
variable[response] assign[=] call[name[self]._client._request, parameter[constant[PUT], call[name[self]._client._build_url, parameter[constant[service]]]]]
if compare[name[response].status_code not_equal[!=] name[requests].codes.ok] begin[:]
<ast.Raise object at 0x7da1b25ed690>
if name[name] begin[:]
name[self].name assign[=] name[name]
if name[version] begin[:]
name[self].version assign[=] name[version] | keyword[def] identifier[edit] ( identifier[self] , identifier[name] = keyword[None] , identifier[description] = keyword[None] , identifier[version] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[update_dict] ={ literal[string] : identifier[self] . identifier[id] }
keyword[if] identifier[name] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[name] , identifier[str] ):
keyword[raise] identifier[IllegalArgumentError] ( literal[string] )
identifier[update_dict] . identifier[update] ({ literal[string] : identifier[name] })
keyword[if] identifier[description] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[description] , identifier[str] ):
keyword[raise] identifier[IllegalArgumentError] ( literal[string] )
identifier[update_dict] . identifier[update] ({ literal[string] : identifier[description] })
keyword[if] identifier[version] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[version] , identifier[str] ):
keyword[raise] identifier[IllegalArgumentError] ( literal[string] )
identifier[update_dict] . identifier[update] ({ literal[string] : identifier[version] })
keyword[if] identifier[kwargs] :
identifier[update_dict] . identifier[update] (** identifier[kwargs] )
identifier[response] = identifier[self] . identifier[_client] . identifier[_request] ( literal[string] ,
identifier[self] . identifier[_client] . identifier[_build_url] ( literal[string] , identifier[service_id] = identifier[self] . identifier[id] ), identifier[json] = identifier[update_dict] )
keyword[if] identifier[response] . identifier[status_code] != identifier[requests] . identifier[codes] . identifier[ok] :
keyword[raise] identifier[APIError] ( literal[string] . identifier[format] ( identifier[response] ))
keyword[if] identifier[name] :
identifier[self] . identifier[name] = identifier[name]
keyword[if] identifier[version] :
identifier[self] . identifier[version] = identifier[version] | def edit(self, name=None, description=None, version=None, **kwargs):
"""
Edit Service details.
.. versionadded:: 1.13
:param name: (optional) name of the service to change.
:type name: basestring or None
:param description: (optional) description of the service.
:type description: basestring or None
:param version: (optional) version number of the service.
:type version: basestring or None
:param kwargs: (optional) additional keyword arguments to change.
:type kwargs: dict or None
:raises IllegalArgumentError: when you provide an illegal argument.
:raises APIError: if the service could not be updated.
"""
update_dict = {'id': self.id}
if name:
if not isinstance(name, str):
raise IllegalArgumentError('name should be provided as a string') # depends on [control=['if'], data=[]]
update_dict.update({'name': name}) # depends on [control=['if'], data=[]]
if description:
if not isinstance(description, str):
raise IllegalArgumentError('description should be provided as a string') # depends on [control=['if'], data=[]]
update_dict.update({'description': description}) # depends on [control=['if'], data=[]]
if version:
if not isinstance(version, str):
raise IllegalArgumentError('description should be provided as a string') # depends on [control=['if'], data=[]]
update_dict.update({'script_version': version}) # depends on [control=['if'], data=[]]
if kwargs: # pragma: no cover
update_dict.update(**kwargs) # depends on [control=['if'], data=[]]
response = self._client._request('PUT', self._client._build_url('service', service_id=self.id), json=update_dict)
if response.status_code != requests.codes.ok: # pragma: no cover
raise APIError('Could not update Service ({})'.format(response)) # depends on [control=['if'], data=[]]
if name:
self.name = name # depends on [control=['if'], data=[]]
if version:
self.version = version # depends on [control=['if'], data=[]] |
def iter_list_market_profit_and_loss(
self, market_ids, chunk_size, **kwargs):
"""Split call to `list_market_profit_and_loss` into separate requests.
:param list market_ids: List of market IDs
:param int chunk_size: Number of records per chunk
:param dict kwargs: Arguments passed to `list_market_profit_and_loss`
"""
return itertools.chain(*(
self.list_market_profit_and_loss(market_chunk, **kwargs)
for market_chunk in utils.get_chunks(market_ids, chunk_size)
)) | def function[iter_list_market_profit_and_loss, parameter[self, market_ids, chunk_size]]:
constant[Split call to `list_market_profit_and_loss` into separate requests.
:param list market_ids: List of market IDs
:param int chunk_size: Number of records per chunk
:param dict kwargs: Arguments passed to `list_market_profit_and_loss`
]
return[call[name[itertools].chain, parameter[<ast.Starred object at 0x7da1b0ec21d0>]]] | keyword[def] identifier[iter_list_market_profit_and_loss] (
identifier[self] , identifier[market_ids] , identifier[chunk_size] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[itertools] . identifier[chain] (*(
identifier[self] . identifier[list_market_profit_and_loss] ( identifier[market_chunk] ,** identifier[kwargs] )
keyword[for] identifier[market_chunk] keyword[in] identifier[utils] . identifier[get_chunks] ( identifier[market_ids] , identifier[chunk_size] )
)) | def iter_list_market_profit_and_loss(self, market_ids, chunk_size, **kwargs):
"""Split call to `list_market_profit_and_loss` into separate requests.
:param list market_ids: List of market IDs
:param int chunk_size: Number of records per chunk
:param dict kwargs: Arguments passed to `list_market_profit_and_loss`
"""
return itertools.chain(*(self.list_market_profit_and_loss(market_chunk, **kwargs) for market_chunk in utils.get_chunks(market_ids, chunk_size))) |
def kill_window(self):
"""Kill the current :class:`Window` object. ``$ tmux kill-window``."""
proc = self.cmd(
'kill-window',
# '-t:%s' % self.id
'-t%s:%s' % (self.get('session_id'), self.index),
)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
self.server._update_windows() | def function[kill_window, parameter[self]]:
constant[Kill the current :class:`Window` object. ``$ tmux kill-window``.]
variable[proc] assign[=] call[name[self].cmd, parameter[constant[kill-window], binary_operation[constant[-t%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20e961330>, <ast.Attribute object at 0x7da1b2345690>]]]]]
if name[proc].stderr begin[:]
<ast.Raise object at 0x7da1b2344100>
call[name[self].server._update_windows, parameter[]] | keyword[def] identifier[kill_window] ( identifier[self] ):
literal[string]
identifier[proc] = identifier[self] . identifier[cmd] (
literal[string] ,
literal[string] %( identifier[self] . identifier[get] ( literal[string] ), identifier[self] . identifier[index] ),
)
keyword[if] identifier[proc] . identifier[stderr] :
keyword[raise] identifier[exc] . identifier[LibTmuxException] ( identifier[proc] . identifier[stderr] )
identifier[self] . identifier[server] . identifier[_update_windows] () | def kill_window(self):
"""Kill the current :class:`Window` object. ``$ tmux kill-window``."""
# '-t:%s' % self.id
proc = self.cmd('kill-window', '-t%s:%s' % (self.get('session_id'), self.index))
if proc.stderr:
raise exc.LibTmuxException(proc.stderr) # depends on [control=['if'], data=[]]
self.server._update_windows() |
def main():
"""
$ ls --color=always | ansi2html > directories.html
$ sudo tail /var/log/messages | ccze -A | ansi2html > logs.html
$ task burndown | ansi2html > burndown.html
"""
scheme_names = sorted(six.iterkeys(SCHEME))
version_str = pkg_resources.get_distribution('ansi2html').version
parser = optparse.OptionParser(
usage=main.__doc__,
version="%%prog %s" % version_str)
parser.add_option(
"-p", "--partial", dest="partial",
default=False, action="store_true",
help="Process lines as them come in. No headers are produced.")
parser.add_option(
"-L", "--latex", dest="latex",
default=False, action="store_true",
help="Export as LaTeX instead of HTML.")
parser.add_option(
"-i", "--inline", dest="inline",
default=False, action="store_true",
help="Inline style without headers or template.")
parser.add_option(
"-H", "--headers", dest="headers",
default=False, action="store_true",
help="Just produce the <style> tag.")
parser.add_option(
"-f", '--font-size', dest='font_size', metavar='SIZE',
default="normal",
help="Set the global font size in the output.")
parser.add_option(
"-l", '--light-background', dest='light_background',
default=False, action="store_true",
help="Set output to 'light background' mode.")
parser.add_option(
"-W", '--no-line-wrap', dest='no_line_wrap',
default=False, action="store_true",
help="Disable line wrapping.")
parser.add_option(
"-a", '--linkify', dest='linkify',
default=False, action="store_true",
help="Transform URLs into <a> links.")
parser.add_option(
"-u", '--unescape', dest='escaped',
default=True, action="store_false",
help="Do not escape XML tags found in the input.")
parser.add_option(
"-m", '--markup-lines', dest="markup_lines",
default=False, action="store_true",
help="Surround lines with <span id='line-n'>..</span>.")
parser.add_option(
'--input-encoding', dest='input_encoding', metavar='ENCODING',
default='utf-8',
help="Specify input encoding")
parser.add_option(
'--output-encoding', dest='output_encoding', metavar='ENCODING',
default='utf-8',
help="Specify output encoding")
parser.add_option(
'-s', '--scheme', dest='scheme', metavar='SCHEME',
default='ansi2html', choices=scheme_names,
help=("Specify color palette scheme. Default: %%default. Choices: %s"
% scheme_names))
parser.add_option(
'-t', '--title', dest='output_title',
default='',
help="Specify output title")
opts, args = parser.parse_args()
conv = Ansi2HTMLConverter(
latex=opts.latex,
inline=opts.inline,
dark_bg=not opts.light_background,
line_wrap=not opts.no_line_wrap,
font_size=opts.font_size,
linkify=opts.linkify,
escaped=opts.escaped,
markup_lines=opts.markup_lines,
output_encoding=opts.output_encoding,
scheme=opts.scheme,
title=opts.output_title,
)
if six.PY3:
try:
sys.stdin = io.TextIOWrapper(sys.stdin.detach(), opts.input_encoding, "replace")
except io.UnsupportedOperation:
# This only fails in the test suite...
pass
def _read(input_bytes):
if six.PY3:
return input_bytes
else:
return input_bytes.decode(opts.input_encoding)
def _print(output_unicode, end='\n'):
if hasattr(sys.stdout, 'buffer'):
output_bytes = (output_unicode + end).encode(opts.output_encoding)
sys.stdout.buffer.write(output_bytes)
elif not six.PY3:
sys.stdout.write((output_unicode + end).encode(opts.output_encoding))
else:
sys.stdout.write(output_unicode + end)
# Produce only the headers and quit
if opts.headers:
_print(conv.produce_headers(), end='')
return
full = not bool(opts.partial or opts.inline)
if six.PY3:
output = conv.convert("".join(sys.stdin.readlines()), full=full, ensure_trailing_newline=True)
_print(output, end='')
else:
output = conv.convert(six.u("").join(
map(_read, sys.stdin.readlines())
), full=full, ensure_trailing_newline=True)
_print(output, end='') | def function[main, parameter[]]:
constant[
$ ls --color=always | ansi2html > directories.html
$ sudo tail /var/log/messages | ccze -A | ansi2html > logs.html
$ task burndown | ansi2html > burndown.html
]
variable[scheme_names] assign[=] call[name[sorted], parameter[call[name[six].iterkeys, parameter[name[SCHEME]]]]]
variable[version_str] assign[=] call[name[pkg_resources].get_distribution, parameter[constant[ansi2html]]].version
variable[parser] assign[=] call[name[optparse].OptionParser, parameter[]]
call[name[parser].add_option, parameter[constant[-p], constant[--partial]]]
call[name[parser].add_option, parameter[constant[-L], constant[--latex]]]
call[name[parser].add_option, parameter[constant[-i], constant[--inline]]]
call[name[parser].add_option, parameter[constant[-H], constant[--headers]]]
call[name[parser].add_option, parameter[constant[-f], constant[--font-size]]]
call[name[parser].add_option, parameter[constant[-l], constant[--light-background]]]
call[name[parser].add_option, parameter[constant[-W], constant[--no-line-wrap]]]
call[name[parser].add_option, parameter[constant[-a], constant[--linkify]]]
call[name[parser].add_option, parameter[constant[-u], constant[--unescape]]]
call[name[parser].add_option, parameter[constant[-m], constant[--markup-lines]]]
call[name[parser].add_option, parameter[constant[--input-encoding]]]
call[name[parser].add_option, parameter[constant[--output-encoding]]]
call[name[parser].add_option, parameter[constant[-s], constant[--scheme]]]
call[name[parser].add_option, parameter[constant[-t], constant[--title]]]
<ast.Tuple object at 0x7da1b12bed10> assign[=] call[name[parser].parse_args, parameter[]]
variable[conv] assign[=] call[name[Ansi2HTMLConverter], parameter[]]
if name[six].PY3 begin[:]
<ast.Try object at 0x7da1b12be920>
def function[_read, parameter[input_bytes]]:
if name[six].PY3 begin[:]
return[name[input_bytes]]
def function[_print, parameter[output_unicode, end]]:
if call[name[hasattr], parameter[name[sys].stdout, constant[buffer]]] begin[:]
variable[output_bytes] assign[=] call[binary_operation[name[output_unicode] + name[end]].encode, parameter[name[opts].output_encoding]]
call[name[sys].stdout.buffer.write, parameter[name[output_bytes]]]
if name[opts].headers begin[:]
call[name[_print], parameter[call[name[conv].produce_headers, parameter[]]]]
return[None]
variable[full] assign[=] <ast.UnaryOp object at 0x7da1b1291210>
if name[six].PY3 begin[:]
variable[output] assign[=] call[name[conv].convert, parameter[call[constant[].join, parameter[call[name[sys].stdin.readlines, parameter[]]]]]]
call[name[_print], parameter[name[output]]] | keyword[def] identifier[main] ():
literal[string]
identifier[scheme_names] = identifier[sorted] ( identifier[six] . identifier[iterkeys] ( identifier[SCHEME] ))
identifier[version_str] = identifier[pkg_resources] . identifier[get_distribution] ( literal[string] ). identifier[version]
identifier[parser] = identifier[optparse] . identifier[OptionParser] (
identifier[usage] = identifier[main] . identifier[__doc__] ,
identifier[version] = literal[string] % identifier[version_str] )
identifier[parser] . identifier[add_option] (
literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] (
literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] (
literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] (
literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] (
literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] (
literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] (
literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] (
literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] (
literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[True] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] (
literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] (
literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] (
literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] (
literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[default] = literal[string] , identifier[choices] = identifier[scheme_names] ,
identifier[help] =( literal[string]
% identifier[scheme_names] ))
identifier[parser] . identifier[add_option] (
literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[opts] , identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[conv] = identifier[Ansi2HTMLConverter] (
identifier[latex] = identifier[opts] . identifier[latex] ,
identifier[inline] = identifier[opts] . identifier[inline] ,
identifier[dark_bg] = keyword[not] identifier[opts] . identifier[light_background] ,
identifier[line_wrap] = keyword[not] identifier[opts] . identifier[no_line_wrap] ,
identifier[font_size] = identifier[opts] . identifier[font_size] ,
identifier[linkify] = identifier[opts] . identifier[linkify] ,
identifier[escaped] = identifier[opts] . identifier[escaped] ,
identifier[markup_lines] = identifier[opts] . identifier[markup_lines] ,
identifier[output_encoding] = identifier[opts] . identifier[output_encoding] ,
identifier[scheme] = identifier[opts] . identifier[scheme] ,
identifier[title] = identifier[opts] . identifier[output_title] ,
)
keyword[if] identifier[six] . identifier[PY3] :
keyword[try] :
identifier[sys] . identifier[stdin] = identifier[io] . identifier[TextIOWrapper] ( identifier[sys] . identifier[stdin] . identifier[detach] (), identifier[opts] . identifier[input_encoding] , literal[string] )
keyword[except] identifier[io] . identifier[UnsupportedOperation] :
keyword[pass]
keyword[def] identifier[_read] ( identifier[input_bytes] ):
keyword[if] identifier[six] . identifier[PY3] :
keyword[return] identifier[input_bytes]
keyword[else] :
keyword[return] identifier[input_bytes] . identifier[decode] ( identifier[opts] . identifier[input_encoding] )
keyword[def] identifier[_print] ( identifier[output_unicode] , identifier[end] = literal[string] ):
keyword[if] identifier[hasattr] ( identifier[sys] . identifier[stdout] , literal[string] ):
identifier[output_bytes] =( identifier[output_unicode] + identifier[end] ). identifier[encode] ( identifier[opts] . identifier[output_encoding] )
identifier[sys] . identifier[stdout] . identifier[buffer] . identifier[write] ( identifier[output_bytes] )
keyword[elif] keyword[not] identifier[six] . identifier[PY3] :
identifier[sys] . identifier[stdout] . identifier[write] (( identifier[output_unicode] + identifier[end] ). identifier[encode] ( identifier[opts] . identifier[output_encoding] ))
keyword[else] :
identifier[sys] . identifier[stdout] . identifier[write] ( identifier[output_unicode] + identifier[end] )
keyword[if] identifier[opts] . identifier[headers] :
identifier[_print] ( identifier[conv] . identifier[produce_headers] (), identifier[end] = literal[string] )
keyword[return]
identifier[full] = keyword[not] identifier[bool] ( identifier[opts] . identifier[partial] keyword[or] identifier[opts] . identifier[inline] )
keyword[if] identifier[six] . identifier[PY3] :
identifier[output] = identifier[conv] . identifier[convert] ( literal[string] . identifier[join] ( identifier[sys] . identifier[stdin] . identifier[readlines] ()), identifier[full] = identifier[full] , identifier[ensure_trailing_newline] = keyword[True] )
identifier[_print] ( identifier[output] , identifier[end] = literal[string] )
keyword[else] :
identifier[output] = identifier[conv] . identifier[convert] ( identifier[six] . identifier[u] ( literal[string] ). identifier[join] (
identifier[map] ( identifier[_read] , identifier[sys] . identifier[stdin] . identifier[readlines] ())
), identifier[full] = identifier[full] , identifier[ensure_trailing_newline] = keyword[True] )
identifier[_print] ( identifier[output] , identifier[end] = literal[string] ) | def main():
"""
$ ls --color=always | ansi2html > directories.html
$ sudo tail /var/log/messages | ccze -A | ansi2html > logs.html
$ task burndown | ansi2html > burndown.html
"""
scheme_names = sorted(six.iterkeys(SCHEME))
version_str = pkg_resources.get_distribution('ansi2html').version
parser = optparse.OptionParser(usage=main.__doc__, version='%%prog %s' % version_str)
parser.add_option('-p', '--partial', dest='partial', default=False, action='store_true', help='Process lines as them come in. No headers are produced.')
parser.add_option('-L', '--latex', dest='latex', default=False, action='store_true', help='Export as LaTeX instead of HTML.')
parser.add_option('-i', '--inline', dest='inline', default=False, action='store_true', help='Inline style without headers or template.')
parser.add_option('-H', '--headers', dest='headers', default=False, action='store_true', help='Just produce the <style> tag.')
parser.add_option('-f', '--font-size', dest='font_size', metavar='SIZE', default='normal', help='Set the global font size in the output.')
parser.add_option('-l', '--light-background', dest='light_background', default=False, action='store_true', help="Set output to 'light background' mode.")
parser.add_option('-W', '--no-line-wrap', dest='no_line_wrap', default=False, action='store_true', help='Disable line wrapping.')
parser.add_option('-a', '--linkify', dest='linkify', default=False, action='store_true', help='Transform URLs into <a> links.')
parser.add_option('-u', '--unescape', dest='escaped', default=True, action='store_false', help='Do not escape XML tags found in the input.')
parser.add_option('-m', '--markup-lines', dest='markup_lines', default=False, action='store_true', help="Surround lines with <span id='line-n'>..</span>.")
parser.add_option('--input-encoding', dest='input_encoding', metavar='ENCODING', default='utf-8', help='Specify input encoding')
parser.add_option('--output-encoding', dest='output_encoding', metavar='ENCODING', default='utf-8', help='Specify output encoding')
parser.add_option('-s', '--scheme', dest='scheme', metavar='SCHEME', default='ansi2html', choices=scheme_names, help='Specify color palette scheme. Default: %%default. Choices: %s' % scheme_names)
parser.add_option('-t', '--title', dest='output_title', default='', help='Specify output title')
(opts, args) = parser.parse_args()
conv = Ansi2HTMLConverter(latex=opts.latex, inline=opts.inline, dark_bg=not opts.light_background, line_wrap=not opts.no_line_wrap, font_size=opts.font_size, linkify=opts.linkify, escaped=opts.escaped, markup_lines=opts.markup_lines, output_encoding=opts.output_encoding, scheme=opts.scheme, title=opts.output_title)
if six.PY3:
try:
sys.stdin = io.TextIOWrapper(sys.stdin.detach(), opts.input_encoding, 'replace') # depends on [control=['try'], data=[]]
except io.UnsupportedOperation:
# This only fails in the test suite...
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
def _read(input_bytes):
if six.PY3:
return input_bytes # depends on [control=['if'], data=[]]
else:
return input_bytes.decode(opts.input_encoding)
def _print(output_unicode, end='\n'):
if hasattr(sys.stdout, 'buffer'):
output_bytes = (output_unicode + end).encode(opts.output_encoding)
sys.stdout.buffer.write(output_bytes) # depends on [control=['if'], data=[]]
elif not six.PY3:
sys.stdout.write((output_unicode + end).encode(opts.output_encoding)) # depends on [control=['if'], data=[]]
else:
sys.stdout.write(output_unicode + end)
# Produce only the headers and quit
if opts.headers:
_print(conv.produce_headers(), end='')
return # depends on [control=['if'], data=[]]
full = not bool(opts.partial or opts.inline)
if six.PY3:
output = conv.convert(''.join(sys.stdin.readlines()), full=full, ensure_trailing_newline=True)
_print(output, end='') # depends on [control=['if'], data=[]]
else:
output = conv.convert(six.u('').join(map(_read, sys.stdin.readlines())), full=full, ensure_trailing_newline=True)
_print(output, end='') |
def _supplementary_files_download_worker(*args):
"""A worker to download supplementary files.
To be used with multiprocessing.
"""
gsm = args[0][0]
download_sra = args[0][1]
email = args[0][2]
dirpath = args[0][3]
sra_kwargs = args[0][4]
return (gsm.get_accession(), gsm.download_supplementary_files(
directory=dirpath,
download_sra=download_sra,
email=email, **sra_kwargs)) | def function[_supplementary_files_download_worker, parameter[]]:
constant[A worker to download supplementary files.
To be used with multiprocessing.
]
variable[gsm] assign[=] call[call[name[args]][constant[0]]][constant[0]]
variable[download_sra] assign[=] call[call[name[args]][constant[0]]][constant[1]]
variable[email] assign[=] call[call[name[args]][constant[0]]][constant[2]]
variable[dirpath] assign[=] call[call[name[args]][constant[0]]][constant[3]]
variable[sra_kwargs] assign[=] call[call[name[args]][constant[0]]][constant[4]]
return[tuple[[<ast.Call object at 0x7da1b0889450>, <ast.Call object at 0x7da1b088aa70>]]] | keyword[def] identifier[_supplementary_files_download_worker] (* identifier[args] ):
literal[string]
identifier[gsm] = identifier[args] [ literal[int] ][ literal[int] ]
identifier[download_sra] = identifier[args] [ literal[int] ][ literal[int] ]
identifier[email] = identifier[args] [ literal[int] ][ literal[int] ]
identifier[dirpath] = identifier[args] [ literal[int] ][ literal[int] ]
identifier[sra_kwargs] = identifier[args] [ literal[int] ][ literal[int] ]
keyword[return] ( identifier[gsm] . identifier[get_accession] (), identifier[gsm] . identifier[download_supplementary_files] (
identifier[directory] = identifier[dirpath] ,
identifier[download_sra] = identifier[download_sra] ,
identifier[email] = identifier[email] ,** identifier[sra_kwargs] )) | def _supplementary_files_download_worker(*args):
"""A worker to download supplementary files.
To be used with multiprocessing.
"""
gsm = args[0][0]
download_sra = args[0][1]
email = args[0][2]
dirpath = args[0][3]
sra_kwargs = args[0][4]
return (gsm.get_accession(), gsm.download_supplementary_files(directory=dirpath, download_sra=download_sra, email=email, **sra_kwargs)) |
def transmit(self, payload, **kwargs):
"""
Transmit content metadata items to the integrated channel.
"""
items_to_create, items_to_update, items_to_delete, transmission_map = self._partition_items(payload)
self._transmit_delete(items_to_delete)
self._transmit_create(items_to_create)
self._transmit_update(items_to_update, transmission_map) | def function[transmit, parameter[self, payload]]:
constant[
Transmit content metadata items to the integrated channel.
]
<ast.Tuple object at 0x7da1b0124eb0> assign[=] call[name[self]._partition_items, parameter[name[payload]]]
call[name[self]._transmit_delete, parameter[name[items_to_delete]]]
call[name[self]._transmit_create, parameter[name[items_to_create]]]
call[name[self]._transmit_update, parameter[name[items_to_update], name[transmission_map]]] | keyword[def] identifier[transmit] ( identifier[self] , identifier[payload] ,** identifier[kwargs] ):
literal[string]
identifier[items_to_create] , identifier[items_to_update] , identifier[items_to_delete] , identifier[transmission_map] = identifier[self] . identifier[_partition_items] ( identifier[payload] )
identifier[self] . identifier[_transmit_delete] ( identifier[items_to_delete] )
identifier[self] . identifier[_transmit_create] ( identifier[items_to_create] )
identifier[self] . identifier[_transmit_update] ( identifier[items_to_update] , identifier[transmission_map] ) | def transmit(self, payload, **kwargs):
"""
Transmit content metadata items to the integrated channel.
"""
(items_to_create, items_to_update, items_to_delete, transmission_map) = self._partition_items(payload)
self._transmit_delete(items_to_delete)
self._transmit_create(items_to_create)
self._transmit_update(items_to_update, transmission_map) |
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df | def function[GetServices, parameter[]]:
constant[
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
]
variable[pulKey] assign[=] call[name[c_ulong], parameter[constant[0]]]
variable[szType] assign[=] call[name[c_char_p], parameter[call[constant[].encode, parameter[constant[utf-8]]]]]
variable[szStartSvcName] assign[=] call[name[c_char_p], parameter[call[constant[].encode, parameter[constant[utf-8]]]]]
<ast.Tuple object at 0x7da1b1ff1f30> assign[=] tuple[[<ast.Call object at 0x7da1b1ff0fa0>, <ast.Call object at 0x7da1b1ff14e0>]]
<ast.Tuple object at 0x7da1b1ff10c0> assign[=] tuple[[<ast.Call object at 0x7da1b1ff02b0>, <ast.Call object at 0x7da1b1ff1630>]]
<ast.Tuple object at 0x7da1b1ff2080> assign[=] tuple[[<ast.Call object at 0x7da1b1ff04c0>, <ast.Call object at 0x7da1b1ff0820>]]
<ast.Tuple object at 0x7da1b1ff0520> assign[=] tuple[[<ast.Call object at 0x7da1b1ff0a90>, <ast.Call object at 0x7da18f09c640>]]
<ast.Tuple object at 0x7da18f09e950> assign[=] tuple[[<ast.Call object at 0x7da18f09ef50>, <ast.Call object at 0x7da18f09e410>]]
<ast.Tuple object at 0x7da18f09c9a0> assign[=] tuple[[<ast.Call object at 0x7da18f09fbb0>, <ast.Call object at 0x7da18f09dfc0>]]
variable[services] assign[=] list[[]]
variable[nRet] assign[=] call[name[dna_dll].DnaGetServiceEntry, parameter[name[szType], name[szStartSvcName], call[name[byref], parameter[name[pulKey]]], call[name[byref], parameter[name[szSvcName]]], name[nSvcName], call[name[byref], parameter[name[szSvcDesc]]], name[nSvcDesc], call[name[byref], parameter[name[szSvcType]]], name[nSvcType], call[name[byref], parameter[name[szSvcStat]]], name[nSvcStat]]]
variable[serv] assign[=] call[name[_FormatServices], parameter[name[szSvcName], name[szSvcDesc], name[szSvcType], name[szSvcStat]]]
if name[serv] begin[:]
call[name[services].append, parameter[name[serv]]]
while compare[name[nRet] equal[==] constant[0]] begin[:]
variable[nRet] assign[=] call[name[dna_dll].DnaGetNextServiceEntry, parameter[name[pulKey], call[name[byref], parameter[name[szSvcName2]]], name[nSvcName], call[name[byref], parameter[name[szSvcDesc2]]], name[nSvcDesc], call[name[byref], parameter[name[szSvcType2]]], name[nSvcType], call[name[byref], parameter[name[szSvcStat2]]], name[nSvcStat]]]
variable[serv] assign[=] call[name[_FormatServices], parameter[name[szSvcName2], name[szSvcDesc2], name[szSvcType2], name[szSvcStat2]]]
if name[serv] begin[:]
call[name[services].append, parameter[name[serv]]]
variable[df] assign[=] call[name[pd].DataFrame, parameter[]]
if name[services] begin[:]
variable[df] assign[=] call[name[pd].DataFrame, parameter[name[services]]]
return[name[df]] | keyword[def] identifier[GetServices] ():
literal[string]
identifier[pulKey] = identifier[c_ulong] ( literal[int] )
identifier[szType] = identifier[c_char_p] ( literal[string] . identifier[encode] ( literal[string] ))
identifier[szStartSvcName] = identifier[c_char_p] ( literal[string] . identifier[encode] ( literal[string] ))
identifier[szSvcName] , identifier[szSvcDesc] = identifier[create_string_buffer] ( literal[int] ), identifier[create_string_buffer] ( literal[int] )
identifier[szSvcType] , identifier[szSvcStat] = identifier[create_string_buffer] ( literal[int] ), identifier[create_string_buffer] ( literal[int] )
identifier[szSvcName2] , identifier[szSvcDesc2] = identifier[create_string_buffer] ( literal[int] ), identifier[create_string_buffer] ( literal[int] )
identifier[szSvcType2] , identifier[szSvcStat2] = identifier[create_string_buffer] ( literal[int] ), identifier[create_string_buffer] ( literal[int] )
identifier[nSvcName] , identifier[nSvcDesc] = identifier[c_ushort] ( literal[int] ), identifier[c_ushort] ( literal[int] )
identifier[nSvcType] , identifier[nSvcStat] = identifier[c_ushort] ( literal[int] ), identifier[c_ushort] ( literal[int] )
identifier[services] =[]
identifier[nRet] = identifier[dna_dll] . identifier[DnaGetServiceEntry] ( identifier[szType] , identifier[szStartSvcName] , identifier[byref] ( identifier[pulKey] ),
identifier[byref] ( identifier[szSvcName] ), identifier[nSvcName] , identifier[byref] ( identifier[szSvcDesc] ), identifier[nSvcDesc] ,
identifier[byref] ( identifier[szSvcType] ), identifier[nSvcType] , identifier[byref] ( identifier[szSvcStat] ), identifier[nSvcStat] )
identifier[serv] = identifier[_FormatServices] ( identifier[szSvcName] , identifier[szSvcDesc] , identifier[szSvcType] , identifier[szSvcStat] )
keyword[if] identifier[serv] :
identifier[services] . identifier[append] ( identifier[serv] )
keyword[while] identifier[nRet] == literal[int] :
identifier[nRet] = identifier[dna_dll] . identifier[DnaGetNextServiceEntry] ( identifier[pulKey] ,
identifier[byref] ( identifier[szSvcName2] ), identifier[nSvcName] , identifier[byref] ( identifier[szSvcDesc2] ), identifier[nSvcDesc] ,
identifier[byref] ( identifier[szSvcType2] ), identifier[nSvcType] , identifier[byref] ( identifier[szSvcStat2] ), identifier[nSvcStat] )
identifier[serv] = identifier[_FormatServices] ( identifier[szSvcName2] , identifier[szSvcDesc2] , identifier[szSvcType2] , identifier[szSvcStat2] )
keyword[if] identifier[serv] :
identifier[services] . identifier[append] ( identifier[serv] )
identifier[df] = identifier[pd] . identifier[DataFrame] ()
keyword[if] identifier[services] :
identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[services] , identifier[columns] =[ literal[string] , literal[string] , literal[string] ,
literal[string] ])
keyword[else] :
identifier[warnings] . identifier[warn] ( literal[string] +
literal[string] )
keyword[return] identifier[df] | def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
""" # Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p(''.encode('utf-8'))
szStartSvcName = c_char_p(''.encode('utf-8'))
(szSvcName, szSvcDesc) = (create_string_buffer(30), create_string_buffer(90))
(szSvcType, szSvcStat) = (create_string_buffer(30), create_string_buffer(30))
(szSvcName2, szSvcDesc2) = (create_string_buffer(30), create_string_buffer(90))
(szSvcType2, szSvcStat2) = (create_string_buffer(30), create_string_buffer(30))
(nSvcName, nSvcDesc) = (c_ushort(30), c_ushort(90))
(nSvcType, nSvcStat) = (c_ushort(30), c_ushort(30)) # Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey), byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc, byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv) # depends on [control=['if'], data=[]] # Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey, byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc, byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat) # We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['nRet']] # If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=['Name', 'Description', 'Type', 'Status']) # depends on [control=['if'], data=[]]
else:
warnings.warn('WARNING- No connected eDNA services detected. Check ' + 'your DNASys.ini file and your network connection.')
return df |
def hasmethod(obj, meth):
"""
Checks if an object, obj, has a callable method, meth
return True or False
"""
if hasattr(obj, meth):
return callable(getattr(obj,meth))
return False | def function[hasmethod, parameter[obj, meth]]:
constant[
Checks if an object, obj, has a callable method, meth
return True or False
]
if call[name[hasattr], parameter[name[obj], name[meth]]] begin[:]
return[call[name[callable], parameter[call[name[getattr], parameter[name[obj], name[meth]]]]]]
return[constant[False]] | keyword[def] identifier[hasmethod] ( identifier[obj] , identifier[meth] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[obj] , identifier[meth] ):
keyword[return] identifier[callable] ( identifier[getattr] ( identifier[obj] , identifier[meth] ))
keyword[return] keyword[False] | def hasmethod(obj, meth):
"""
Checks if an object, obj, has a callable method, meth
return True or False
"""
if hasattr(obj, meth):
return callable(getattr(obj, meth)) # depends on [control=['if'], data=[]]
return False |
def reduce(self, body):
'''
remove nodes from a list
'''
i = 0
while i < len(body):
stmnt = body[i]
if self.visit(stmnt):
body.pop(i)
else:
i += 1 | def function[reduce, parameter[self, body]]:
constant[
remove nodes from a list
]
variable[i] assign[=] constant[0]
while compare[name[i] less[<] call[name[len], parameter[name[body]]]] begin[:]
variable[stmnt] assign[=] call[name[body]][name[i]]
if call[name[self].visit, parameter[name[stmnt]]] begin[:]
call[name[body].pop, parameter[name[i]]] | keyword[def] identifier[reduce] ( identifier[self] , identifier[body] ):
literal[string]
identifier[i] = literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[body] ):
identifier[stmnt] = identifier[body] [ identifier[i] ]
keyword[if] identifier[self] . identifier[visit] ( identifier[stmnt] ):
identifier[body] . identifier[pop] ( identifier[i] )
keyword[else] :
identifier[i] += literal[int] | def reduce(self, body):
"""
remove nodes from a list
"""
i = 0
while i < len(body):
stmnt = body[i]
if self.visit(stmnt):
body.pop(i) # depends on [control=['if'], data=[]]
else:
i += 1 # depends on [control=['while'], data=['i']] |
def flip_constraint(self, twig=None, solve_for=None, **kwargs):
"""
Flip an existing constraint to solve for a different parameter
:parameter str twig: twig to filter the constraint
:parameter solve_for: twig or actual parameter object of the new
parameter which this constraint should constraint (solve for).
:type solve_for: str or :class:`phoebe.parameters.parameters.Parameter
:parameter **kwargs: any other tags to do the filter
(except twig or context)
"""
self._kwargs_checks(kwargs, additional_allowed_keys=['check_nan'])
kwargs['twig'] = twig
redo_kwargs = deepcopy(kwargs)
undo_kwargs = deepcopy(kwargs)
changed_params = self.run_delayed_constraints()
param = self.get_constraint(**kwargs)
if kwargs.pop('check_nan', True) and np.any(np.isnan([p.get_value() for p in param.vars.to_list()])):
raise ValueError("cannot flip constraint while the value of {} is nan".format([p.twig for p in param.vars.to_list() if np.isnan(p.get_value())]))
if solve_for is None:
return param
if isinstance(solve_for, Parameter):
solve_for = solve_for.uniquetwig
redo_kwargs['solve_for'] = solve_for
undo_kwargs['solve_for'] = param.constrained_parameter.uniquetwig
logger.info("flipping constraint '{}' to solve for '{}'".format(param.uniquetwig, solve_for))
param.flip_for(solve_for)
result = self.run_constraint(uniqueid=param.uniqueid, skip_kwargs_checks=True)
self._add_history(redo_func='flip_constraint',
redo_kwargs=redo_kwargs,
undo_func='flip_constraint',
undo_kwargs=undo_kwargs)
return param | def function[flip_constraint, parameter[self, twig, solve_for]]:
constant[
Flip an existing constraint to solve for a different parameter
:parameter str twig: twig to filter the constraint
:parameter solve_for: twig or actual parameter object of the new
parameter which this constraint should constraint (solve for).
:type solve_for: str or :class:`phoebe.parameters.parameters.Parameter
:parameter **kwargs: any other tags to do the filter
(except twig or context)
]
call[name[self]._kwargs_checks, parameter[name[kwargs]]]
call[name[kwargs]][constant[twig]] assign[=] name[twig]
variable[redo_kwargs] assign[=] call[name[deepcopy], parameter[name[kwargs]]]
variable[undo_kwargs] assign[=] call[name[deepcopy], parameter[name[kwargs]]]
variable[changed_params] assign[=] call[name[self].run_delayed_constraints, parameter[]]
variable[param] assign[=] call[name[self].get_constraint, parameter[]]
if <ast.BoolOp object at 0x7da18bc71060> begin[:]
<ast.Raise object at 0x7da18bc73a00>
if compare[name[solve_for] is constant[None]] begin[:]
return[name[param]]
if call[name[isinstance], parameter[name[solve_for], name[Parameter]]] begin[:]
variable[solve_for] assign[=] name[solve_for].uniquetwig
call[name[redo_kwargs]][constant[solve_for]] assign[=] name[solve_for]
call[name[undo_kwargs]][constant[solve_for]] assign[=] name[param].constrained_parameter.uniquetwig
call[name[logger].info, parameter[call[constant[flipping constraint '{}' to solve for '{}'].format, parameter[name[param].uniquetwig, name[solve_for]]]]]
call[name[param].flip_for, parameter[name[solve_for]]]
variable[result] assign[=] call[name[self].run_constraint, parameter[]]
call[name[self]._add_history, parameter[]]
return[name[param]] | keyword[def] identifier[flip_constraint] ( identifier[self] , identifier[twig] = keyword[None] , identifier[solve_for] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_kwargs_checks] ( identifier[kwargs] , identifier[additional_allowed_keys] =[ literal[string] ])
identifier[kwargs] [ literal[string] ]= identifier[twig]
identifier[redo_kwargs] = identifier[deepcopy] ( identifier[kwargs] )
identifier[undo_kwargs] = identifier[deepcopy] ( identifier[kwargs] )
identifier[changed_params] = identifier[self] . identifier[run_delayed_constraints] ()
identifier[param] = identifier[self] . identifier[get_constraint] (** identifier[kwargs] )
keyword[if] identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] ) keyword[and] identifier[np] . identifier[any] ( identifier[np] . identifier[isnan] ([ identifier[p] . identifier[get_value] () keyword[for] identifier[p] keyword[in] identifier[param] . identifier[vars] . identifier[to_list] ()])):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ([ identifier[p] . identifier[twig] keyword[for] identifier[p] keyword[in] identifier[param] . identifier[vars] . identifier[to_list] () keyword[if] identifier[np] . identifier[isnan] ( identifier[p] . identifier[get_value] ())]))
keyword[if] identifier[solve_for] keyword[is] keyword[None] :
keyword[return] identifier[param]
keyword[if] identifier[isinstance] ( identifier[solve_for] , identifier[Parameter] ):
identifier[solve_for] = identifier[solve_for] . identifier[uniquetwig]
identifier[redo_kwargs] [ literal[string] ]= identifier[solve_for]
identifier[undo_kwargs] [ literal[string] ]= identifier[param] . identifier[constrained_parameter] . identifier[uniquetwig]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[param] . identifier[uniquetwig] , identifier[solve_for] ))
identifier[param] . identifier[flip_for] ( identifier[solve_for] )
identifier[result] = identifier[self] . identifier[run_constraint] ( identifier[uniqueid] = identifier[param] . identifier[uniqueid] , identifier[skip_kwargs_checks] = keyword[True] )
identifier[self] . identifier[_add_history] ( identifier[redo_func] = literal[string] ,
identifier[redo_kwargs] = identifier[redo_kwargs] ,
identifier[undo_func] = literal[string] ,
identifier[undo_kwargs] = identifier[undo_kwargs] )
keyword[return] identifier[param] | def flip_constraint(self, twig=None, solve_for=None, **kwargs):
"""
Flip an existing constraint to solve for a different parameter
:parameter str twig: twig to filter the constraint
:parameter solve_for: twig or actual parameter object of the new
parameter which this constraint should constraint (solve for).
:type solve_for: str or :class:`phoebe.parameters.parameters.Parameter
:parameter **kwargs: any other tags to do the filter
(except twig or context)
"""
self._kwargs_checks(kwargs, additional_allowed_keys=['check_nan'])
kwargs['twig'] = twig
redo_kwargs = deepcopy(kwargs)
undo_kwargs = deepcopy(kwargs)
changed_params = self.run_delayed_constraints()
param = self.get_constraint(**kwargs)
if kwargs.pop('check_nan', True) and np.any(np.isnan([p.get_value() for p in param.vars.to_list()])):
raise ValueError('cannot flip constraint while the value of {} is nan'.format([p.twig for p in param.vars.to_list() if np.isnan(p.get_value())])) # depends on [control=['if'], data=[]]
if solve_for is None:
return param # depends on [control=['if'], data=[]]
if isinstance(solve_for, Parameter):
solve_for = solve_for.uniquetwig # depends on [control=['if'], data=[]]
redo_kwargs['solve_for'] = solve_for
undo_kwargs['solve_for'] = param.constrained_parameter.uniquetwig
logger.info("flipping constraint '{}' to solve for '{}'".format(param.uniquetwig, solve_for))
param.flip_for(solve_for)
result = self.run_constraint(uniqueid=param.uniqueid, skip_kwargs_checks=True)
self._add_history(redo_func='flip_constraint', redo_kwargs=redo_kwargs, undo_func='flip_constraint', undo_kwargs=undo_kwargs)
return param |
def print_ipython(self):
"""
Renders the javascript table to a jupyter/ipython notebook cell
Usage example:
>>> t = Table(ind)
>>> t.print_ipython()
... renders the table to notebook cell
"""
from IPython.display import display, HTML
self._listen()
try: shutil.rmtree('viz')
except: None
shutil.copytree(self.html_path, 'viz')
pth = "viz/index.html"
html = open(pth).read()
html = html.replace("__SERVER_DATA__", '"http://localhost:'+str(self.port)+'"')
display(HTML(html)) | def function[print_ipython, parameter[self]]:
constant[
Renders the javascript table to a jupyter/ipython notebook cell
Usage example:
>>> t = Table(ind)
>>> t.print_ipython()
... renders the table to notebook cell
]
from relative_module[IPython.display] import module[display], module[HTML]
call[name[self]._listen, parameter[]]
<ast.Try object at 0x7da18dc06800>
call[name[shutil].copytree, parameter[name[self].html_path, constant[viz]]]
variable[pth] assign[=] constant[viz/index.html]
variable[html] assign[=] call[call[name[open], parameter[name[pth]]].read, parameter[]]
variable[html] assign[=] call[name[html].replace, parameter[constant[__SERVER_DATA__], binary_operation[binary_operation[constant["http://localhost:] + call[name[str], parameter[name[self].port]]] + constant["]]]]
call[name[display], parameter[call[name[HTML], parameter[name[html]]]]] | keyword[def] identifier[print_ipython] ( identifier[self] ):
literal[string]
keyword[from] identifier[IPython] . identifier[display] keyword[import] identifier[display] , identifier[HTML]
identifier[self] . identifier[_listen] ()
keyword[try] : identifier[shutil] . identifier[rmtree] ( literal[string] )
keyword[except] : keyword[None]
identifier[shutil] . identifier[copytree] ( identifier[self] . identifier[html_path] , literal[string] )
identifier[pth] = literal[string]
identifier[html] = identifier[open] ( identifier[pth] ). identifier[read] ()
identifier[html] = identifier[html] . identifier[replace] ( literal[string] , literal[string] + identifier[str] ( identifier[self] . identifier[port] )+ literal[string] )
identifier[display] ( identifier[HTML] ( identifier[html] )) | def print_ipython(self):
"""
Renders the javascript table to a jupyter/ipython notebook cell
Usage example:
>>> t = Table(ind)
>>> t.print_ipython()
... renders the table to notebook cell
"""
from IPython.display import display, HTML
self._listen()
try:
shutil.rmtree('viz') # depends on [control=['try'], data=[]]
except:
None # depends on [control=['except'], data=[]]
shutil.copytree(self.html_path, 'viz')
pth = 'viz/index.html'
html = open(pth).read()
html = html.replace('__SERVER_DATA__', '"http://localhost:' + str(self.port) + '"')
display(HTML(html)) |
def post_series_publish(self, id, **data):
"""
POST /series/:id/publish/
Publishes a repeating event series and all of its occurrences that are not already canceled or deleted. Once a date is cancelled it can still be uncancelled and can be viewed by the public. A deleted date cannot be undeleted and cannot by viewed by the public. In order for
publish to be permitted, the event must have all necessary information, including a name and description, an organizer,
at least one ticket, and valid payment options. This API endpoint will return argument errors for event fields that
fail to validate the publish requirements. Returns a boolean indicating success or failure of the publish.
field_error event.name MISSING
Your event must have a name to be published.
field_error event.start MISSING
Your event must have a start date to be published.
field_error event.end MISSING
Your event must have an end date to be published.
field_error event.start.timezone MISSING
Your event start and end dates must have matching time zones to be published.
field_error event.organizer MISSING
Your event must have an organizer to be published.
field_error event.currency MISSING
Your event must have a currency to be published.
field_error event.currency INVALID
Your event must have a valid currency to be published.
field_error event.tickets MISSING
Your event must have at least one ticket to be published.
field_error event.tickets.N.name MISSING
All tickets must have names in order for your event to be published. The N will be the ticket class ID with the
error.
field_error event.tickets.N.quantity_total MISSING
All non-donation tickets must have an available quantity value in order for your event to be published. The N
will be the ticket class ID with the error.
field_error event.tickets.N.cost MISSING
All non-donation tickets must have a cost (which can be ``0.00`` for free tickets) in order for your event to
be published. The N will be the ticket class ID with the error.
.. _unpublish-series-by-id:
"""
return self.post("/series/{0}/publish/".format(id), data=data) | def function[post_series_publish, parameter[self, id]]:
constant[
POST /series/:id/publish/
Publishes a repeating event series and all of its occurrences that are not already canceled or deleted. Once a date is cancelled it can still be uncancelled and can be viewed by the public. A deleted date cannot be undeleted and cannot by viewed by the public. In order for
publish to be permitted, the event must have all necessary information, including a name and description, an organizer,
at least one ticket, and valid payment options. This API endpoint will return argument errors for event fields that
fail to validate the publish requirements. Returns a boolean indicating success or failure of the publish.
field_error event.name MISSING
Your event must have a name to be published.
field_error event.start MISSING
Your event must have a start date to be published.
field_error event.end MISSING
Your event must have an end date to be published.
field_error event.start.timezone MISSING
Your event start and end dates must have matching time zones to be published.
field_error event.organizer MISSING
Your event must have an organizer to be published.
field_error event.currency MISSING
Your event must have a currency to be published.
field_error event.currency INVALID
Your event must have a valid currency to be published.
field_error event.tickets MISSING
Your event must have at least one ticket to be published.
field_error event.tickets.N.name MISSING
All tickets must have names in order for your event to be published. The N will be the ticket class ID with the
error.
field_error event.tickets.N.quantity_total MISSING
All non-donation tickets must have an available quantity value in order for your event to be published. The N
will be the ticket class ID with the error.
field_error event.tickets.N.cost MISSING
All non-donation tickets must have a cost (which can be ``0.00`` for free tickets) in order for your event to
be published. The N will be the ticket class ID with the error.
.. _unpublish-series-by-id:
]
return[call[name[self].post, parameter[call[constant[/series/{0}/publish/].format, parameter[name[id]]]]]] | keyword[def] identifier[post_series_publish] ( identifier[self] , identifier[id] ,** identifier[data] ):
literal[string]
keyword[return] identifier[self] . identifier[post] ( literal[string] . identifier[format] ( identifier[id] ), identifier[data] = identifier[data] ) | def post_series_publish(self, id, **data):
"""
POST /series/:id/publish/
Publishes a repeating event series and all of its occurrences that are not already canceled or deleted. Once a date is cancelled it can still be uncancelled and can be viewed by the public. A deleted date cannot be undeleted and cannot by viewed by the public. In order for
publish to be permitted, the event must have all necessary information, including a name and description, an organizer,
at least one ticket, and valid payment options. This API endpoint will return argument errors for event fields that
fail to validate the publish requirements. Returns a boolean indicating success or failure of the publish.
field_error event.name MISSING
Your event must have a name to be published.
field_error event.start MISSING
Your event must have a start date to be published.
field_error event.end MISSING
Your event must have an end date to be published.
field_error event.start.timezone MISSING
Your event start and end dates must have matching time zones to be published.
field_error event.organizer MISSING
Your event must have an organizer to be published.
field_error event.currency MISSING
Your event must have a currency to be published.
field_error event.currency INVALID
Your event must have a valid currency to be published.
field_error event.tickets MISSING
Your event must have at least one ticket to be published.
field_error event.tickets.N.name MISSING
All tickets must have names in order for your event to be published. The N will be the ticket class ID with the
error.
field_error event.tickets.N.quantity_total MISSING
All non-donation tickets must have an available quantity value in order for your event to be published. The N
will be the ticket class ID with the error.
field_error event.tickets.N.cost MISSING
All non-donation tickets must have a cost (which can be ``0.00`` for free tickets) in order for your event to
be published. The N will be the ticket class ID with the error.
.. _unpublish-series-by-id:
"""
return self.post('/series/{0}/publish/'.format(id), data=data) |
def dispatk(keyer):
"""This is the decorator for the generic function and it accepts
only one argument *keyer*, it'll be called with the same arguments
of the function call and it must return an hashable object
(int, tuple, etc.).
Rhe generic function has a *register* method used to decorate the
function for some specific keys.
*register* accepts one or more keys and returns the decorated
function.
"""
calls = {}
def _dispatk(main):
def register(*keys):
def _register(spec):
for key in keys:
if key in calls:
raise ValueError(
"function already registered for %r"
% (main.__name__, key))
calls[key] = spec
return spec
return _register
@wraps(main)
def run(*args, **kwargs):
return calls.get(keyer(*args, **kwargs), main)(*args, **kwargs)
run.register = register
return run
return _dispatk | def function[dispatk, parameter[keyer]]:
constant[This is the decorator for the generic function and it accepts
only one argument *keyer*, it'll be called with the same arguments
of the function call and it must return an hashable object
(int, tuple, etc.).
Rhe generic function has a *register* method used to decorate the
function for some specific keys.
*register* accepts one or more keys and returns the decorated
function.
]
variable[calls] assign[=] dictionary[[], []]
def function[_dispatk, parameter[main]]:
def function[register, parameter[]]:
def function[_register, parameter[spec]]:
for taget[name[key]] in starred[name[keys]] begin[:]
if compare[name[key] in name[calls]] begin[:]
<ast.Raise object at 0x7da2044c25f0>
call[name[calls]][name[key]] assign[=] name[spec]
return[name[spec]]
return[name[_register]]
def function[run, parameter[]]:
return[call[call[name[calls].get, parameter[call[name[keyer], parameter[<ast.Starred object at 0x7da20c6abca0>]], name[main]]], parameter[<ast.Starred object at 0x7da20c6a8c40>]]]
name[run].register assign[=] name[register]
return[name[run]]
return[name[_dispatk]] | keyword[def] identifier[dispatk] ( identifier[keyer] ):
literal[string]
identifier[calls] ={}
keyword[def] identifier[_dispatk] ( identifier[main] ):
keyword[def] identifier[register] (* identifier[keys] ):
keyword[def] identifier[_register] ( identifier[spec] ):
keyword[for] identifier[key] keyword[in] identifier[keys] :
keyword[if] identifier[key] keyword[in] identifier[calls] :
keyword[raise] identifier[ValueError] (
literal[string]
%( identifier[main] . identifier[__name__] , identifier[key] ))
identifier[calls] [ identifier[key] ]= identifier[spec]
keyword[return] identifier[spec]
keyword[return] identifier[_register]
@ identifier[wraps] ( identifier[main] )
keyword[def] identifier[run] (* identifier[args] ,** identifier[kwargs] ):
keyword[return] identifier[calls] . identifier[get] ( identifier[keyer] (* identifier[args] ,** identifier[kwargs] ), identifier[main] )(* identifier[args] ,** identifier[kwargs] )
identifier[run] . identifier[register] = identifier[register]
keyword[return] identifier[run]
keyword[return] identifier[_dispatk] | def dispatk(keyer):
"""This is the decorator for the generic function and it accepts
only one argument *keyer*, it'll be called with the same arguments
of the function call and it must return an hashable object
(int, tuple, etc.).
Rhe generic function has a *register* method used to decorate the
function for some specific keys.
*register* accepts one or more keys and returns the decorated
function.
"""
calls = {}
def _dispatk(main):
def register(*keys):
def _register(spec):
for key in keys:
if key in calls:
raise ValueError('function already registered for %r' % (main.__name__, key)) # depends on [control=['if'], data=['key']]
calls[key] = spec # depends on [control=['for'], data=['key']]
return spec
return _register
@wraps(main)
def run(*args, **kwargs):
return calls.get(keyer(*args, **kwargs), main)(*args, **kwargs)
run.register = register
return run
return _dispatk |
def querying_context(self, packet_type):
""" Context manager for querying.
Sets state to TDS_QUERYING, and reverts it to TDS_IDLE if exception happens inside managed block,
and to TDS_PENDING if managed block succeeds and flushes buffer.
"""
if self.set_state(tds_base.TDS_QUERYING) != tds_base.TDS_QUERYING:
raise tds_base.Error("Couldn't switch to state")
self._writer.begin_packet(packet_type)
try:
yield
except:
if self.state != tds_base.TDS_DEAD:
self.set_state(tds_base.TDS_IDLE)
raise
else:
self.set_state(tds_base.TDS_PENDING)
self._writer.flush() | def function[querying_context, parameter[self, packet_type]]:
constant[ Context manager for querying.
Sets state to TDS_QUERYING, and reverts it to TDS_IDLE if exception happens inside managed block,
and to TDS_PENDING if managed block succeeds and flushes buffer.
]
if compare[call[name[self].set_state, parameter[name[tds_base].TDS_QUERYING]] not_equal[!=] name[tds_base].TDS_QUERYING] begin[:]
<ast.Raise object at 0x7da1b0578580>
call[name[self]._writer.begin_packet, parameter[name[packet_type]]]
<ast.Try object at 0x7da1b05795a0> | keyword[def] identifier[querying_context] ( identifier[self] , identifier[packet_type] ):
literal[string]
keyword[if] identifier[self] . identifier[set_state] ( identifier[tds_base] . identifier[TDS_QUERYING] )!= identifier[tds_base] . identifier[TDS_QUERYING] :
keyword[raise] identifier[tds_base] . identifier[Error] ( literal[string] )
identifier[self] . identifier[_writer] . identifier[begin_packet] ( identifier[packet_type] )
keyword[try] :
keyword[yield]
keyword[except] :
keyword[if] identifier[self] . identifier[state] != identifier[tds_base] . identifier[TDS_DEAD] :
identifier[self] . identifier[set_state] ( identifier[tds_base] . identifier[TDS_IDLE] )
keyword[raise]
keyword[else] :
identifier[self] . identifier[set_state] ( identifier[tds_base] . identifier[TDS_PENDING] )
identifier[self] . identifier[_writer] . identifier[flush] () | def querying_context(self, packet_type):
""" Context manager for querying.
Sets state to TDS_QUERYING, and reverts it to TDS_IDLE if exception happens inside managed block,
and to TDS_PENDING if managed block succeeds and flushes buffer.
"""
if self.set_state(tds_base.TDS_QUERYING) != tds_base.TDS_QUERYING:
raise tds_base.Error("Couldn't switch to state") # depends on [control=['if'], data=[]]
self._writer.begin_packet(packet_type)
try:
yield # depends on [control=['try'], data=[]]
except:
if self.state != tds_base.TDS_DEAD:
self.set_state(tds_base.TDS_IDLE) # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=[]]
else:
self.set_state(tds_base.TDS_PENDING)
self._writer.flush() |
def count_scts_in_sct_extension(certificate: cryptography.x509.Certificate) -> Optional[int]:
"""Return the number of Signed Certificate Timestamps (SCTs) embedded in the certificate.
"""
scts_count = 0
try:
# Look for the x509 extension
sct_ext = certificate.extensions.get_extension_for_oid(
ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS
)
if isinstance(sct_ext.value, cryptography.x509.UnrecognizedExtension):
# The version of OpenSSL on the system is too old and can't parse the SCT extension
return None
# Count the number of entries in the extension
scts_count = len(sct_ext.value)
except ExtensionNotFound:
pass
return scts_count | def function[count_scts_in_sct_extension, parameter[certificate]]:
constant[Return the number of Signed Certificate Timestamps (SCTs) embedded in the certificate.
]
variable[scts_count] assign[=] constant[0]
<ast.Try object at 0x7da1b18ca9e0>
return[name[scts_count]] | keyword[def] identifier[count_scts_in_sct_extension] ( identifier[certificate] : identifier[cryptography] . identifier[x509] . identifier[Certificate] )-> identifier[Optional] [ identifier[int] ]:
literal[string]
identifier[scts_count] = literal[int]
keyword[try] :
identifier[sct_ext] = identifier[certificate] . identifier[extensions] . identifier[get_extension_for_oid] (
identifier[ExtensionOID] . identifier[PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS]
)
keyword[if] identifier[isinstance] ( identifier[sct_ext] . identifier[value] , identifier[cryptography] . identifier[x509] . identifier[UnrecognizedExtension] ):
keyword[return] keyword[None]
identifier[scts_count] = identifier[len] ( identifier[sct_ext] . identifier[value] )
keyword[except] identifier[ExtensionNotFound] :
keyword[pass]
keyword[return] identifier[scts_count] | def count_scts_in_sct_extension(certificate: cryptography.x509.Certificate) -> Optional[int]:
"""Return the number of Signed Certificate Timestamps (SCTs) embedded in the certificate.
"""
scts_count = 0
try:
# Look for the x509 extension
sct_ext = certificate.extensions.get_extension_for_oid(ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS)
if isinstance(sct_ext.value, cryptography.x509.UnrecognizedExtension):
# The version of OpenSSL on the system is too old and can't parse the SCT extension
return None # depends on [control=['if'], data=[]]
# Count the number of entries in the extension
scts_count = len(sct_ext.value) # depends on [control=['try'], data=[]]
except ExtensionNotFound:
pass # depends on [control=['except'], data=[]]
return scts_count |
def show_url(context, **kwargs):
"""Return the show feed URL with different protocol."""
if len(kwargs) != 2:
raise TemplateSyntaxError(_('"show_url" tag takes exactly two keyword arguments.'))
request = context['request']
current_site = get_current_site(request)
url = add_domain(current_site.domain, kwargs['url'])
return re.sub(r'https?:\/\/', '%s://' % kwargs['protocol'], url) | def function[show_url, parameter[context]]:
constant[Return the show feed URL with different protocol.]
if compare[call[name[len], parameter[name[kwargs]]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da18c4cc7c0>
variable[request] assign[=] call[name[context]][constant[request]]
variable[current_site] assign[=] call[name[get_current_site], parameter[name[request]]]
variable[url] assign[=] call[name[add_domain], parameter[name[current_site].domain, call[name[kwargs]][constant[url]]]]
return[call[name[re].sub, parameter[constant[https?:\/\/], binary_operation[constant[%s://] <ast.Mod object at 0x7da2590d6920> call[name[kwargs]][constant[protocol]]], name[url]]]] | keyword[def] identifier[show_url] ( identifier[context] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[len] ( identifier[kwargs] )!= literal[int] :
keyword[raise] identifier[TemplateSyntaxError] ( identifier[_] ( literal[string] ))
identifier[request] = identifier[context] [ literal[string] ]
identifier[current_site] = identifier[get_current_site] ( identifier[request] )
identifier[url] = identifier[add_domain] ( identifier[current_site] . identifier[domain] , identifier[kwargs] [ literal[string] ])
keyword[return] identifier[re] . identifier[sub] ( literal[string] , literal[string] % identifier[kwargs] [ literal[string] ], identifier[url] ) | def show_url(context, **kwargs):
"""Return the show feed URL with different protocol."""
if len(kwargs) != 2:
raise TemplateSyntaxError(_('"show_url" tag takes exactly two keyword arguments.')) # depends on [control=['if'], data=[]]
request = context['request']
current_site = get_current_site(request)
url = add_domain(current_site.domain, kwargs['url'])
return re.sub('https?:\\/\\/', '%s://' % kwargs['protocol'], url) |
def hist_bins(self, channels=None, nbins=None, scale='logicle', **kwargs):
"""
Get histogram bin edges for the specified channel(s).
These cover the range specified in ``FCSData.range(channels)`` with
a number of bins `nbins`, with linear, logarithmic, or logicle
spacing.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to generate histogram bins. If None,
return a list with bins for all channels, in the order of
``FCSData.channels``.
nbins : int or list of ints, optional
The number of bins to calculate. If `channels` specifies a list
of channels, `nbins` should be a list of integers. If `nbins`
is None, use ``FCSData.resolution(channel)``.
scale : str, optional
Scale in which to generate bins. Can be either ``linear``,
``log``, or ``logicle``.
kwargs : optional
Keyword arguments specific to the selected bin scaling. Linear
and logarithmic scaling do not use additional arguments.
For logicle scaling, the following parameters can be provided:
T : float, optional
Maximum range of data. If not provided, use ``range[1]``.
M : float, optional
(Asymptotic) number of decades in scaled units. If not
provided, calculate from the following::
max(4.5, 4.5 / np.log10(262144) * np.log10(T))
W : float, optional
Width of linear range in scaled units. If not provided,
calculate using the following relationship::
W = (M - log10(T / abs(r))) / 2
Where ``r`` is the minimum negative event. If no negative
events are present, W is set to zero.
Return
------
array or list of arrays
Histogram bin edges for the specified channel(s).
Notes
-----
If ``range[0]`` is equal or less than zero and `scale` is ``log``,
the lower limit of the range is replaced with one.
Logicle scaling uses the LogicleTransform class in the plot module.
References
----------
.. [1] D.R. Parks, M. Roederer, W.A. Moore, "A New Logicle Display
Method Avoids Deceptive Effects of Logarithmic Scaling for Low
Signals and Compensated Data," Cytometry Part A 69A:541-551, 2006,
PMID 16604519.
"""
# Default: all channels
if channels is None:
channels = list(self._channels)
# Get numerical indices of channels
channels = self._name_to_index(channels)
# Convert to list if necessary
channel_list = channels
if not isinstance(channel_list, list):
channel_list = [channel_list]
if not isinstance(nbins, list):
nbins = [nbins]*len(channel_list)
if not isinstance(scale, list):
scale = [scale]*len(channel_list)
# Iterate
bins = []
for channel, nbins_channel, scale_channel in \
zip(channel_list, nbins, scale):
# Get channel resolution
res_channel = self.resolution(channel)
# Get default nbins
if nbins_channel is None:
nbins_channel = res_channel
# Get range of channel
range_channel = self.range(channel)
# Generate bins according to specified scale
if scale_channel == 'linear':
# We will now generate ``nbins`` uniformly spaced bins centered
# at ``linspace(range_channel[0], range_channel[1], nbins)``. To
# do so, we need to generate ``nbins + 1`` uniformly spaced
# points.
delta_res = (range_channel[1] - range_channel[0]) / \
(res_channel - 1)
bins_channel = np.linspace(range_channel[0] - delta_res/2,
range_channel[1] + delta_res/2,
nbins_channel + 1)
elif scale_channel == 'log':
# Check if the lower limit is equal or less than zero. If so,
# change the lower limit to one or some lower value, such that
# the range covers at least five decades.
if range_channel[0] <= 0:
range_channel[0] = min(1., range_channel[1]/1e5)
# Log range
range_channel = [np.log10(range_channel[0]),
np.log10(range_channel[1])]
# We will now generate ``nbins`` uniformly spaced bins centered
# at ``linspace(range_channel[0], range_channel[1], nbins)``. To
# do so, we need to generate ``nbins + 1`` uniformly spaced
# points.
delta_res = (range_channel[1] - range_channel[0]) / \
(res_channel - 1)
bins_channel = np.linspace(range_channel[0] - delta_res/2,
range_channel[1] + delta_res/2,
nbins_channel + 1)
# Exponentiate bins
bins_channel = 10**(bins_channel)
elif scale_channel == 'logicle':
# Create transform class
# Use the LogicleTransform class from the plot module
t = FlowCal.plot._LogicleTransform(data=self,
channel=channel,
**kwargs)
# We now generate ``nbins`` uniformly spaced bins centered at
# ``linspace(0, M, nbins)``. To do so, we need to generate
# ``nbins + 1`` uniformly spaced points.
delta_res = float(t.M) / (res_channel - 1)
s = np.linspace(- delta_res/2.,
t.M + delta_res/2.,
nbins_channel + 1)
# Finally, apply the logicle transformation to generate bins
bins_channel = t.transform_non_affine(s)
else:
# Scale not supported
raise ValueError('scale "{}" not supported'.format(
scale_channel))
# Accumulate
bins.append(bins_channel)
# Extract from list if channels was not a list
if not isinstance(channels, list):
bins = bins[0]
return bins | def function[hist_bins, parameter[self, channels, nbins, scale]]:
constant[
Get histogram bin edges for the specified channel(s).
These cover the range specified in ``FCSData.range(channels)`` with
a number of bins `nbins`, with linear, logarithmic, or logicle
spacing.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to generate histogram bins. If None,
return a list with bins for all channels, in the order of
``FCSData.channels``.
nbins : int or list of ints, optional
The number of bins to calculate. If `channels` specifies a list
of channels, `nbins` should be a list of integers. If `nbins`
is None, use ``FCSData.resolution(channel)``.
scale : str, optional
Scale in which to generate bins. Can be either ``linear``,
``log``, or ``logicle``.
kwargs : optional
Keyword arguments specific to the selected bin scaling. Linear
and logarithmic scaling do not use additional arguments.
For logicle scaling, the following parameters can be provided:
T : float, optional
Maximum range of data. If not provided, use ``range[1]``.
M : float, optional
(Asymptotic) number of decades in scaled units. If not
provided, calculate from the following::
max(4.5, 4.5 / np.log10(262144) * np.log10(T))
W : float, optional
Width of linear range in scaled units. If not provided,
calculate using the following relationship::
W = (M - log10(T / abs(r))) / 2
Where ``r`` is the minimum negative event. If no negative
events are present, W is set to zero.
Return
------
array or list of arrays
Histogram bin edges for the specified channel(s).
Notes
-----
If ``range[0]`` is equal or less than zero and `scale` is ``log``,
the lower limit of the range is replaced with one.
Logicle scaling uses the LogicleTransform class in the plot module.
References
----------
.. [1] D.R. Parks, M. Roederer, W.A. Moore, "A New Logicle Display
Method Avoids Deceptive Effects of Logarithmic Scaling for Low
Signals and Compensated Data," Cytometry Part A 69A:541-551, 2006,
PMID 16604519.
]
if compare[name[channels] is constant[None]] begin[:]
variable[channels] assign[=] call[name[list], parameter[name[self]._channels]]
variable[channels] assign[=] call[name[self]._name_to_index, parameter[name[channels]]]
variable[channel_list] assign[=] name[channels]
if <ast.UnaryOp object at 0x7da1b1bf8820> begin[:]
variable[channel_list] assign[=] list[[<ast.Name object at 0x7da1b1bf8220>]]
if <ast.UnaryOp object at 0x7da1b1bf8040> begin[:]
variable[nbins] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b1bf83a0>]] * call[name[len], parameter[name[channel_list]]]]
if <ast.UnaryOp object at 0x7da1b1bf8370> begin[:]
variable[scale] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b1c91cf0>]] * call[name[len], parameter[name[channel_list]]]]
variable[bins] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1c92140>, <ast.Name object at 0x7da1b1c92050>, <ast.Name object at 0x7da1b1c91c30>]]] in starred[call[name[zip], parameter[name[channel_list], name[nbins], name[scale]]]] begin[:]
variable[res_channel] assign[=] call[name[self].resolution, parameter[name[channel]]]
if compare[name[nbins_channel] is constant[None]] begin[:]
variable[nbins_channel] assign[=] name[res_channel]
variable[range_channel] assign[=] call[name[self].range, parameter[name[channel]]]
if compare[name[scale_channel] equal[==] constant[linear]] begin[:]
variable[delta_res] assign[=] binary_operation[binary_operation[call[name[range_channel]][constant[1]] - call[name[range_channel]][constant[0]]] / binary_operation[name[res_channel] - constant[1]]]
variable[bins_channel] assign[=] call[name[np].linspace, parameter[binary_operation[call[name[range_channel]][constant[0]] - binary_operation[name[delta_res] / constant[2]]], binary_operation[call[name[range_channel]][constant[1]] + binary_operation[name[delta_res] / constant[2]]], binary_operation[name[nbins_channel] + constant[1]]]]
call[name[bins].append, parameter[name[bins_channel]]]
if <ast.UnaryOp object at 0x7da1b1bf9450> begin[:]
variable[bins] assign[=] call[name[bins]][constant[0]]
return[name[bins]] | keyword[def] identifier[hist_bins] ( identifier[self] , identifier[channels] = keyword[None] , identifier[nbins] = keyword[None] , identifier[scale] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[channels] keyword[is] keyword[None] :
identifier[channels] = identifier[list] ( identifier[self] . identifier[_channels] )
identifier[channels] = identifier[self] . identifier[_name_to_index] ( identifier[channels] )
identifier[channel_list] = identifier[channels]
keyword[if] keyword[not] identifier[isinstance] ( identifier[channel_list] , identifier[list] ):
identifier[channel_list] =[ identifier[channel_list] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[nbins] , identifier[list] ):
identifier[nbins] =[ identifier[nbins] ]* identifier[len] ( identifier[channel_list] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[scale] , identifier[list] ):
identifier[scale] =[ identifier[scale] ]* identifier[len] ( identifier[channel_list] )
identifier[bins] =[]
keyword[for] identifier[channel] , identifier[nbins_channel] , identifier[scale_channel] keyword[in] identifier[zip] ( identifier[channel_list] , identifier[nbins] , identifier[scale] ):
identifier[res_channel] = identifier[self] . identifier[resolution] ( identifier[channel] )
keyword[if] identifier[nbins_channel] keyword[is] keyword[None] :
identifier[nbins_channel] = identifier[res_channel]
identifier[range_channel] = identifier[self] . identifier[range] ( identifier[channel] )
keyword[if] identifier[scale_channel] == literal[string] :
identifier[delta_res] =( identifier[range_channel] [ literal[int] ]- identifier[range_channel] [ literal[int] ])/( identifier[res_channel] - literal[int] )
identifier[bins_channel] = identifier[np] . identifier[linspace] ( identifier[range_channel] [ literal[int] ]- identifier[delta_res] / literal[int] ,
identifier[range_channel] [ literal[int] ]+ identifier[delta_res] / literal[int] ,
identifier[nbins_channel] + literal[int] )
keyword[elif] identifier[scale_channel] == literal[string] :
keyword[if] identifier[range_channel] [ literal[int] ]<= literal[int] :
identifier[range_channel] [ literal[int] ]= identifier[min] ( literal[int] , identifier[range_channel] [ literal[int] ]/ literal[int] )
identifier[range_channel] =[ identifier[np] . identifier[log10] ( identifier[range_channel] [ literal[int] ]),
identifier[np] . identifier[log10] ( identifier[range_channel] [ literal[int] ])]
identifier[delta_res] =( identifier[range_channel] [ literal[int] ]- identifier[range_channel] [ literal[int] ])/( identifier[res_channel] - literal[int] )
identifier[bins_channel] = identifier[np] . identifier[linspace] ( identifier[range_channel] [ literal[int] ]- identifier[delta_res] / literal[int] ,
identifier[range_channel] [ literal[int] ]+ identifier[delta_res] / literal[int] ,
identifier[nbins_channel] + literal[int] )
identifier[bins_channel] = literal[int] **( identifier[bins_channel] )
keyword[elif] identifier[scale_channel] == literal[string] :
identifier[t] = identifier[FlowCal] . identifier[plot] . identifier[_LogicleTransform] ( identifier[data] = identifier[self] ,
identifier[channel] = identifier[channel] ,
** identifier[kwargs] )
identifier[delta_res] = identifier[float] ( identifier[t] . identifier[M] )/( identifier[res_channel] - literal[int] )
identifier[s] = identifier[np] . identifier[linspace] (- identifier[delta_res] / literal[int] ,
identifier[t] . identifier[M] + identifier[delta_res] / literal[int] ,
identifier[nbins_channel] + literal[int] )
identifier[bins_channel] = identifier[t] . identifier[transform_non_affine] ( identifier[s] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[scale_channel] ))
identifier[bins] . identifier[append] ( identifier[bins_channel] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[channels] , identifier[list] ):
identifier[bins] = identifier[bins] [ literal[int] ]
keyword[return] identifier[bins] | def hist_bins(self, channels=None, nbins=None, scale='logicle', **kwargs):
"""
Get histogram bin edges for the specified channel(s).
These cover the range specified in ``FCSData.range(channels)`` with
a number of bins `nbins`, with linear, logarithmic, or logicle
spacing.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to generate histogram bins. If None,
return a list with bins for all channels, in the order of
``FCSData.channels``.
nbins : int or list of ints, optional
The number of bins to calculate. If `channels` specifies a list
of channels, `nbins` should be a list of integers. If `nbins`
is None, use ``FCSData.resolution(channel)``.
scale : str, optional
Scale in which to generate bins. Can be either ``linear``,
``log``, or ``logicle``.
kwargs : optional
Keyword arguments specific to the selected bin scaling. Linear
and logarithmic scaling do not use additional arguments.
For logicle scaling, the following parameters can be provided:
T : float, optional
Maximum range of data. If not provided, use ``range[1]``.
M : float, optional
(Asymptotic) number of decades in scaled units. If not
provided, calculate from the following::
max(4.5, 4.5 / np.log10(262144) * np.log10(T))
W : float, optional
Width of linear range in scaled units. If not provided,
calculate using the following relationship::
W = (M - log10(T / abs(r))) / 2
Where ``r`` is the minimum negative event. If no negative
events are present, W is set to zero.
Return
------
array or list of arrays
Histogram bin edges for the specified channel(s).
Notes
-----
If ``range[0]`` is equal or less than zero and `scale` is ``log``,
the lower limit of the range is replaced with one.
Logicle scaling uses the LogicleTransform class in the plot module.
References
----------
.. [1] D.R. Parks, M. Roederer, W.A. Moore, "A New Logicle Display
Method Avoids Deceptive Effects of Logarithmic Scaling for Low
Signals and Compensated Data," Cytometry Part A 69A:541-551, 2006,
PMID 16604519.
"""
# Default: all channels
if channels is None:
channels = list(self._channels) # depends on [control=['if'], data=['channels']]
# Get numerical indices of channels
channels = self._name_to_index(channels)
# Convert to list if necessary
channel_list = channels
if not isinstance(channel_list, list):
channel_list = [channel_list] # depends on [control=['if'], data=[]]
if not isinstance(nbins, list):
nbins = [nbins] * len(channel_list) # depends on [control=['if'], data=[]]
if not isinstance(scale, list):
scale = [scale] * len(channel_list) # depends on [control=['if'], data=[]]
# Iterate
bins = []
for (channel, nbins_channel, scale_channel) in zip(channel_list, nbins, scale):
# Get channel resolution
res_channel = self.resolution(channel)
# Get default nbins
if nbins_channel is None:
nbins_channel = res_channel # depends on [control=['if'], data=['nbins_channel']]
# Get range of channel
range_channel = self.range(channel)
# Generate bins according to specified scale
if scale_channel == 'linear':
# We will now generate ``nbins`` uniformly spaced bins centered
# at ``linspace(range_channel[0], range_channel[1], nbins)``. To
# do so, we need to generate ``nbins + 1`` uniformly spaced
# points.
delta_res = (range_channel[1] - range_channel[0]) / (res_channel - 1)
bins_channel = np.linspace(range_channel[0] - delta_res / 2, range_channel[1] + delta_res / 2, nbins_channel + 1) # depends on [control=['if'], data=[]]
elif scale_channel == 'log':
# Check if the lower limit is equal or less than zero. If so,
# change the lower limit to one or some lower value, such that
# the range covers at least five decades.
if range_channel[0] <= 0:
range_channel[0] = min(1.0, range_channel[1] / 100000.0) # depends on [control=['if'], data=[]]
# Log range
range_channel = [np.log10(range_channel[0]), np.log10(range_channel[1])]
# We will now generate ``nbins`` uniformly spaced bins centered
# at ``linspace(range_channel[0], range_channel[1], nbins)``. To
# do so, we need to generate ``nbins + 1`` uniformly spaced
# points.
delta_res = (range_channel[1] - range_channel[0]) / (res_channel - 1)
bins_channel = np.linspace(range_channel[0] - delta_res / 2, range_channel[1] + delta_res / 2, nbins_channel + 1)
# Exponentiate bins
bins_channel = 10 ** bins_channel # depends on [control=['if'], data=[]]
elif scale_channel == 'logicle':
# Create transform class
# Use the LogicleTransform class from the plot module
t = FlowCal.plot._LogicleTransform(data=self, channel=channel, **kwargs)
# We now generate ``nbins`` uniformly spaced bins centered at
# ``linspace(0, M, nbins)``. To do so, we need to generate
# ``nbins + 1`` uniformly spaced points.
delta_res = float(t.M) / (res_channel - 1)
s = np.linspace(-delta_res / 2.0, t.M + delta_res / 2.0, nbins_channel + 1)
# Finally, apply the logicle transformation to generate bins
bins_channel = t.transform_non_affine(s) # depends on [control=['if'], data=[]]
else:
# Scale not supported
raise ValueError('scale "{}" not supported'.format(scale_channel))
# Accumulate
bins.append(bins_channel) # depends on [control=['for'], data=[]]
# Extract from list if channels was not a list
if not isinstance(channels, list):
bins = bins[0] # depends on [control=['if'], data=[]]
return bins |
def setFlag(self, index, flag):
"""Set flag for field at index. Flags are special characters such as 'S' for
sequence or 'T' for timestamp.
Parameters:
--------------------------------------------------------------------
index: index of field whose flag is being set
flag: special character
"""
assert len(self.fields)>index
self.fields[index].flag=flag | def function[setFlag, parameter[self, index, flag]]:
constant[Set flag for field at index. Flags are special characters such as 'S' for
sequence or 'T' for timestamp.
Parameters:
--------------------------------------------------------------------
index: index of field whose flag is being set
flag: special character
]
assert[compare[call[name[len], parameter[name[self].fields]] greater[>] name[index]]]
call[name[self].fields][name[index]].flag assign[=] name[flag] | keyword[def] identifier[setFlag] ( identifier[self] , identifier[index] , identifier[flag] ):
literal[string]
keyword[assert] identifier[len] ( identifier[self] . identifier[fields] )> identifier[index]
identifier[self] . identifier[fields] [ identifier[index] ]. identifier[flag] = identifier[flag] | def setFlag(self, index, flag):
"""Set flag for field at index. Flags are special characters such as 'S' for
sequence or 'T' for timestamp.
Parameters:
--------------------------------------------------------------------
index: index of field whose flag is being set
flag: special character
"""
assert len(self.fields) > index
self.fields[index].flag = flag |
def fsplit(file_to_split):
"""
Split the file and return the list of filenames.
"""
dirname = file_to_split + '_splitted'
if not os.path.exists(dirname):
os.mkdir(dirname)
part_file_size = os.path.getsize(file_to_split) / number_of_files + 1
splitted_files = []
with open(file_to_split, "r") as f:
number = 0
actual = 0
while 1:
prec = actual
# Jump of "size" from the current place in the file
f.seek(part_file_size, os.SEEK_CUR)
# find the next separator or EOF
s = f.readline()
if len(s) == 0:
s = f.readline()
while len(s) != 0 and s != separator:
s = f.readline()
# Get the current place
actual = f.tell()
new_file = os.path.join(dirname, str(number))
# Create the new file
with open(file_to_split, "r") as temp:
temp.seek(prec)
# Get the text we want to put in the new file
copy = temp.read(actual - prec)
# Write the new file
open(new_file, 'w').write(copy)
splitted_files.append(new_file)
number += 1
# End of file
if len(s) == 0:
break
return splitted_files | def function[fsplit, parameter[file_to_split]]:
constant[
Split the file and return the list of filenames.
]
variable[dirname] assign[=] binary_operation[name[file_to_split] + constant[_splitted]]
if <ast.UnaryOp object at 0x7da1b0089270> begin[:]
call[name[os].mkdir, parameter[name[dirname]]]
variable[part_file_size] assign[=] binary_operation[binary_operation[call[name[os].path.getsize, parameter[name[file_to_split]]] / name[number_of_files]] + constant[1]]
variable[splitted_files] assign[=] list[[]]
with call[name[open], parameter[name[file_to_split], constant[r]]] begin[:]
variable[number] assign[=] constant[0]
variable[actual] assign[=] constant[0]
while constant[1] begin[:]
variable[prec] assign[=] name[actual]
call[name[f].seek, parameter[name[part_file_size], name[os].SEEK_CUR]]
variable[s] assign[=] call[name[f].readline, parameter[]]
if compare[call[name[len], parameter[name[s]]] equal[==] constant[0]] begin[:]
variable[s] assign[=] call[name[f].readline, parameter[]]
while <ast.BoolOp object at 0x7da1b000f5b0> begin[:]
variable[s] assign[=] call[name[f].readline, parameter[]]
variable[actual] assign[=] call[name[f].tell, parameter[]]
variable[new_file] assign[=] call[name[os].path.join, parameter[name[dirname], call[name[str], parameter[name[number]]]]]
with call[name[open], parameter[name[file_to_split], constant[r]]] begin[:]
call[name[temp].seek, parameter[name[prec]]]
variable[copy] assign[=] call[name[temp].read, parameter[binary_operation[name[actual] - name[prec]]]]
call[call[name[open], parameter[name[new_file], constant[w]]].write, parameter[name[copy]]]
call[name[splitted_files].append, parameter[name[new_file]]]
<ast.AugAssign object at 0x7da1b0033760>
if compare[call[name[len], parameter[name[s]]] equal[==] constant[0]] begin[:]
break
return[name[splitted_files]] | keyword[def] identifier[fsplit] ( identifier[file_to_split] ):
literal[string]
identifier[dirname] = identifier[file_to_split] + literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[dirname] ):
identifier[os] . identifier[mkdir] ( identifier[dirname] )
identifier[part_file_size] = identifier[os] . identifier[path] . identifier[getsize] ( identifier[file_to_split] )/ identifier[number_of_files] + literal[int]
identifier[splitted_files] =[]
keyword[with] identifier[open] ( identifier[file_to_split] , literal[string] ) keyword[as] identifier[f] :
identifier[number] = literal[int]
identifier[actual] = literal[int]
keyword[while] literal[int] :
identifier[prec] = identifier[actual]
identifier[f] . identifier[seek] ( identifier[part_file_size] , identifier[os] . identifier[SEEK_CUR] )
identifier[s] = identifier[f] . identifier[readline] ()
keyword[if] identifier[len] ( identifier[s] )== literal[int] :
identifier[s] = identifier[f] . identifier[readline] ()
keyword[while] identifier[len] ( identifier[s] )!= literal[int] keyword[and] identifier[s] != identifier[separator] :
identifier[s] = identifier[f] . identifier[readline] ()
identifier[actual] = identifier[f] . identifier[tell] ()
identifier[new_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[dirname] , identifier[str] ( identifier[number] ))
keyword[with] identifier[open] ( identifier[file_to_split] , literal[string] ) keyword[as] identifier[temp] :
identifier[temp] . identifier[seek] ( identifier[prec] )
identifier[copy] = identifier[temp] . identifier[read] ( identifier[actual] - identifier[prec] )
identifier[open] ( identifier[new_file] , literal[string] ). identifier[write] ( identifier[copy] )
identifier[splitted_files] . identifier[append] ( identifier[new_file] )
identifier[number] += literal[int]
keyword[if] identifier[len] ( identifier[s] )== literal[int] :
keyword[break]
keyword[return] identifier[splitted_files] | def fsplit(file_to_split):
"""
Split the file and return the list of filenames.
"""
dirname = file_to_split + '_splitted'
if not os.path.exists(dirname):
os.mkdir(dirname) # depends on [control=['if'], data=[]]
part_file_size = os.path.getsize(file_to_split) / number_of_files + 1
splitted_files = []
with open(file_to_split, 'r') as f:
number = 0
actual = 0
while 1:
prec = actual
# Jump of "size" from the current place in the file
f.seek(part_file_size, os.SEEK_CUR)
# find the next separator or EOF
s = f.readline()
if len(s) == 0:
s = f.readline() # depends on [control=['if'], data=[]]
while len(s) != 0 and s != separator:
s = f.readline() # depends on [control=['while'], data=[]]
# Get the current place
actual = f.tell()
new_file = os.path.join(dirname, str(number))
# Create the new file
with open(file_to_split, 'r') as temp:
temp.seek(prec)
# Get the text we want to put in the new file
copy = temp.read(actual - prec)
# Write the new file
open(new_file, 'w').write(copy) # depends on [control=['with'], data=['open', 'temp']]
splitted_files.append(new_file)
number += 1
# End of file
if len(s) == 0:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['with'], data=['open', 'f']]
return splitted_files |
def _BuildFindSpecsFromArtifact(self, definition, environment_variables):
"""Builds find specifications from an artifact definition.
Args:
definition (artifacts.ArtifactDefinition): artifact definition.
environment_variables (list[EnvironmentVariableArtifact]):
environment variables.
Returns:
list[dfvfs.FindSpec|dfwinreg.FindSpec]: dfVFS or dfWinReg find
specifications.
"""
find_specs = []
for source in definition.sources:
if source.type_indicator == artifact_types.TYPE_INDICATOR_FILE:
for path_entry in set(source.paths):
specifications = self._BuildFindSpecsFromFileSourcePath(
path_entry, source.separator, environment_variables,
self._knowledge_base.user_accounts)
find_specs.extend(specifications)
self.file_system_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):
for key_path in set(source.keys):
if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):
# TODO: Handle Registry Values Once Supported in dfwinreg.
# https://github.com/log2timeline/dfwinreg/issues/98
# Use set-comprehension to create a set of the source key paths.
key_paths = {
key_value['key'] for key_value in source.key_value_pairs}
key_paths_string = ', '.join(key_paths)
logger.warning((
'Windows Registry values are not supported, extracting keys: '
'"{0!s}"').format(key_paths_string))
for key_path in key_paths:
if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_ARTIFACT_GROUP):
for name in source.names:
specifications = self._BuildFindSpecsFromGroupName(
name, environment_variables)
find_specs.extend(specifications)
else:
logger.warning(
'Unsupported artifact definition source type: "{0:s}"'.format(
source.type_indicator))
return find_specs | def function[_BuildFindSpecsFromArtifact, parameter[self, definition, environment_variables]]:
constant[Builds find specifications from an artifact definition.
Args:
definition (artifacts.ArtifactDefinition): artifact definition.
environment_variables (list[EnvironmentVariableArtifact]):
environment variables.
Returns:
list[dfvfs.FindSpec|dfwinreg.FindSpec]: dfVFS or dfWinReg find
specifications.
]
variable[find_specs] assign[=] list[[]]
for taget[name[source]] in starred[name[definition].sources] begin[:]
if compare[name[source].type_indicator equal[==] name[artifact_types].TYPE_INDICATOR_FILE] begin[:]
for taget[name[path_entry]] in starred[call[name[set], parameter[name[source].paths]]] begin[:]
variable[specifications] assign[=] call[name[self]._BuildFindSpecsFromFileSourcePath, parameter[name[path_entry], name[source].separator, name[environment_variables], name[self]._knowledge_base.user_accounts]]
call[name[find_specs].extend, parameter[name[specifications]]]
call[name[self].file_system_artifact_names.add, parameter[name[definition].name]]
return[name[find_specs]] | keyword[def] identifier[_BuildFindSpecsFromArtifact] ( identifier[self] , identifier[definition] , identifier[environment_variables] ):
literal[string]
identifier[find_specs] =[]
keyword[for] identifier[source] keyword[in] identifier[definition] . identifier[sources] :
keyword[if] identifier[source] . identifier[type_indicator] == identifier[artifact_types] . identifier[TYPE_INDICATOR_FILE] :
keyword[for] identifier[path_entry] keyword[in] identifier[set] ( identifier[source] . identifier[paths] ):
identifier[specifications] = identifier[self] . identifier[_BuildFindSpecsFromFileSourcePath] (
identifier[path_entry] , identifier[source] . identifier[separator] , identifier[environment_variables] ,
identifier[self] . identifier[_knowledge_base] . identifier[user_accounts] )
identifier[find_specs] . identifier[extend] ( identifier[specifications] )
identifier[self] . identifier[file_system_artifact_names] . identifier[add] ( identifier[definition] . identifier[name] )
keyword[elif] ( identifier[source] . identifier[type_indicator] ==
identifier[artifact_types] . identifier[TYPE_INDICATOR_WINDOWS_REGISTRY_KEY] ):
keyword[for] identifier[key_path] keyword[in] identifier[set] ( identifier[source] . identifier[keys] ):
keyword[if] identifier[ArtifactDefinitionsFilterHelper] . identifier[CheckKeyCompatibility] ( identifier[key_path] ):
identifier[specifications] = identifier[self] . identifier[_BuildFindSpecsFromRegistrySourceKey] ( identifier[key_path] )
identifier[find_specs] . identifier[extend] ( identifier[specifications] )
identifier[self] . identifier[registry_artifact_names] . identifier[add] ( identifier[definition] . identifier[name] )
keyword[elif] ( identifier[source] . identifier[type_indicator] ==
identifier[artifact_types] . identifier[TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE] ):
identifier[key_paths] ={
identifier[key_value] [ literal[string] ] keyword[for] identifier[key_value] keyword[in] identifier[source] . identifier[key_value_pairs] }
identifier[key_paths_string] = literal[string] . identifier[join] ( identifier[key_paths] )
identifier[logger] . identifier[warning] ((
literal[string]
literal[string] ). identifier[format] ( identifier[key_paths_string] ))
keyword[for] identifier[key_path] keyword[in] identifier[key_paths] :
keyword[if] identifier[ArtifactDefinitionsFilterHelper] . identifier[CheckKeyCompatibility] ( identifier[key_path] ):
identifier[specifications] = identifier[self] . identifier[_BuildFindSpecsFromRegistrySourceKey] ( identifier[key_path] )
identifier[find_specs] . identifier[extend] ( identifier[specifications] )
identifier[self] . identifier[registry_artifact_names] . identifier[add] ( identifier[definition] . identifier[name] )
keyword[elif] ( identifier[source] . identifier[type_indicator] ==
identifier[artifact_types] . identifier[TYPE_INDICATOR_ARTIFACT_GROUP] ):
keyword[for] identifier[name] keyword[in] identifier[source] . identifier[names] :
identifier[specifications] = identifier[self] . identifier[_BuildFindSpecsFromGroupName] (
identifier[name] , identifier[environment_variables] )
identifier[find_specs] . identifier[extend] ( identifier[specifications] )
keyword[else] :
identifier[logger] . identifier[warning] (
literal[string] . identifier[format] (
identifier[source] . identifier[type_indicator] ))
keyword[return] identifier[find_specs] | def _BuildFindSpecsFromArtifact(self, definition, environment_variables):
"""Builds find specifications from an artifact definition.
Args:
definition (artifacts.ArtifactDefinition): artifact definition.
environment_variables (list[EnvironmentVariableArtifact]):
environment variables.
Returns:
list[dfvfs.FindSpec|dfwinreg.FindSpec]: dfVFS or dfWinReg find
specifications.
"""
find_specs = []
for source in definition.sources:
if source.type_indicator == artifact_types.TYPE_INDICATOR_FILE:
for path_entry in set(source.paths):
specifications = self._BuildFindSpecsFromFileSourcePath(path_entry, source.separator, environment_variables, self._knowledge_base.user_accounts)
find_specs.extend(specifications)
self.file_system_artifact_names.add(definition.name) # depends on [control=['for'], data=['path_entry']] # depends on [control=['if'], data=[]]
elif source.type_indicator == artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY:
for key_path in set(source.keys):
if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key_path']] # depends on [control=['if'], data=[]]
elif source.type_indicator == artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE:
# TODO: Handle Registry Values Once Supported in dfwinreg.
# https://github.com/log2timeline/dfwinreg/issues/98
# Use set-comprehension to create a set of the source key paths.
key_paths = {key_value['key'] for key_value in source.key_value_pairs}
key_paths_string = ', '.join(key_paths)
logger.warning('Windows Registry values are not supported, extracting keys: "{0!s}"'.format(key_paths_string))
for key_path in key_paths:
if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key_path']] # depends on [control=['if'], data=[]]
elif source.type_indicator == artifact_types.TYPE_INDICATOR_ARTIFACT_GROUP:
for name in source.names:
specifications = self._BuildFindSpecsFromGroupName(name, environment_variables)
find_specs.extend(specifications) # depends on [control=['for'], data=['name']] # depends on [control=['if'], data=[]]
else:
logger.warning('Unsupported artifact definition source type: "{0:s}"'.format(source.type_indicator)) # depends on [control=['for'], data=['source']]
return find_specs |
def or_where_pivot(self, column, operator=None, value=None):
"""
Set an or where clause for a pivot table column.
:param column: The column of the where clause, can also be a QueryBuilder instance for sub where
:type column: str|Builder
:param operator: The operator of the where clause
:type operator: str
:param value: The value of the where clause
:type value: mixed
:return: self
:rtype: BelongsToMany
"""
return self.where_pivot(column, operator, value, "or") | def function[or_where_pivot, parameter[self, column, operator, value]]:
constant[
Set an or where clause for a pivot table column.
:param column: The column of the where clause, can also be a QueryBuilder instance for sub where
:type column: str|Builder
:param operator: The operator of the where clause
:type operator: str
:param value: The value of the where clause
:type value: mixed
:return: self
:rtype: BelongsToMany
]
return[call[name[self].where_pivot, parameter[name[column], name[operator], name[value], constant[or]]]] | keyword[def] identifier[or_where_pivot] ( identifier[self] , identifier[column] , identifier[operator] = keyword[None] , identifier[value] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[where_pivot] ( identifier[column] , identifier[operator] , identifier[value] , literal[string] ) | def or_where_pivot(self, column, operator=None, value=None):
"""
Set an or where clause for a pivot table column.
:param column: The column of the where clause, can also be a QueryBuilder instance for sub where
:type column: str|Builder
:param operator: The operator of the where clause
:type operator: str
:param value: The value of the where clause
:type value: mixed
:return: self
:rtype: BelongsToMany
"""
return self.where_pivot(column, operator, value, 'or') |
def extend(self, builder):
"""
Extend the query builder with the needed functions.
:param builder: The query builder
:type builder: eloquent.orm.builder.Builder
"""
for extension in self._extensions:
getattr(self, '_add_%s' % extension)(builder)
builder.on_delete(self._on_delete) | def function[extend, parameter[self, builder]]:
constant[
Extend the query builder with the needed functions.
:param builder: The query builder
:type builder: eloquent.orm.builder.Builder
]
for taget[name[extension]] in starred[name[self]._extensions] begin[:]
call[call[name[getattr], parameter[name[self], binary_operation[constant[_add_%s] <ast.Mod object at 0x7da2590d6920> name[extension]]]], parameter[name[builder]]]
call[name[builder].on_delete, parameter[name[self]._on_delete]] | keyword[def] identifier[extend] ( identifier[self] , identifier[builder] ):
literal[string]
keyword[for] identifier[extension] keyword[in] identifier[self] . identifier[_extensions] :
identifier[getattr] ( identifier[self] , literal[string] % identifier[extension] )( identifier[builder] )
identifier[builder] . identifier[on_delete] ( identifier[self] . identifier[_on_delete] ) | def extend(self, builder):
"""
Extend the query builder with the needed functions.
:param builder: The query builder
:type builder: eloquent.orm.builder.Builder
"""
for extension in self._extensions:
getattr(self, '_add_%s' % extension)(builder) # depends on [control=['for'], data=['extension']]
builder.on_delete(self._on_delete) |
def reload(self, reload_timeout, save_config):
"""Reload the device."""
MAX_BOOT_TIME = 1800 # 30 minutes - TODO(klstanie): move to config
RELOAD_PROMPT = re.compile(re.escape("Reload hardware module ? [no,yes]"))
START_TO_BACKUP = re.compile("Status report.*START TO BACKUP")
BACKUP_IN_PROGRESS = re.compile("Status report.*BACKUP INPROGRESS")
BACKUP_HAS_COMPLETED_SUCCESSFULLY = re.compile("Status report.*BACKUP HAS COMPLETED SUCCESSFULLY")
DONE = re.compile(re.escape("[Done]"))
STAND_BY = re.compile("Please stand by while rebooting the system")
CONSOLE = re.compile("con[0|1]/(?:RS?P)?[0-1]/CPU0 is now available")
CONSOLE_STBY = re.compile("con[0|1]/(?:RS?P)?[0-1]/CPU0 is in standby")
CONFIGURATION_COMPLETED = re.compile("SYSTEM CONFIGURATION COMPLETED")
CONFIGURATION_IN_PROCESS = re.compile("SYSTEM CONFIGURATION IN PROCESS")
BOOTING = re.compile("Booting IOS-XR 64 bit Boot previously installed image")
# 0 1 2 3 4 5
events = [RELOAD_PROMPT, START_TO_BACKUP, BACKUP_IN_PROGRESS, BACKUP_HAS_COMPLETED_SUCCESSFULLY, DONE, BOOTING,
# 6 7 8 9 10
CONSOLE, self.press_return_re, CONFIGURATION_COMPLETED, CONFIGURATION_IN_PROCESS, self.username_re,
# 11 12 13 14 15
EOF, pexpect.TIMEOUT, self.rommon_re, STAND_BY, CONSOLE_STBY]
transitions = [
# do I really need to clean the cmd
(RELOAD_PROMPT, [0], 1, partial(a_send_line, "yes"), MAX_BOOT_TIME),
(START_TO_BACKUP, [0, 1], 2, a_message_callback, 60),
(BACKUP_IN_PROGRESS, [0, 1, 2], 2, a_message_callback, 90),
(BACKUP_HAS_COMPLETED_SUCCESSFULLY, [0, 1, 2], 3, a_message_callback, 10),
(DONE, [1, 2, 3], 4, None, MAX_BOOT_TIME),
(STAND_BY, [2, 3, 4], 5, a_message_callback, MAX_BOOT_TIME),
(self.rommon_re, [0, 4], 5, partial(a_send_boot, "boot"), MAX_BOOT_TIME),
(BOOTING, [0, 1, 2, 3, 4], 5, a_message_callback, MAX_BOOT_TIME),
(CONSOLE, [0, 1, 2, 3, 4, 5], 6, None, 600),
(self.press_return_re, [6], 7, partial(a_send, "\r"), 300),
(CONFIGURATION_IN_PROCESS, [7], 8, None, 180),
(CONFIGURATION_COMPLETED, [8], -1, a_return_and_reconnect, 0),
(CONSOLE_STBY, [5], -1, ConnectionStandbyConsole("Standby Console"), 0),
(self.username_re, [9], -1, a_return_and_reconnect, 0),
(EOF, [0, 1, 2, 3, 4, 5], -1, ConnectionError("Device disconnected"), 0),
(pexpect.TIMEOUT, [7], 9, partial(a_send, "\r"), 180),
(pexpect.TIMEOUT, [1, 5, 8], -1, ConnectionError("Boot process took more than {}s".format(MAX_BOOT_TIME)), 0),
(pexpect.TIMEOUT, [9], -1, ConnectionAuthenticationError("Unable to reconnect after reloading"), 0)
]
fsm = FSM("RELOAD", self.device, events, transitions, timeout=600)
return fsm.run() | def function[reload, parameter[self, reload_timeout, save_config]]:
constant[Reload the device.]
variable[MAX_BOOT_TIME] assign[=] constant[1800]
variable[RELOAD_PROMPT] assign[=] call[name[re].compile, parameter[call[name[re].escape, parameter[constant[Reload hardware module ? [no,yes]]]]]]
variable[START_TO_BACKUP] assign[=] call[name[re].compile, parameter[constant[Status report.*START TO BACKUP]]]
variable[BACKUP_IN_PROGRESS] assign[=] call[name[re].compile, parameter[constant[Status report.*BACKUP INPROGRESS]]]
variable[BACKUP_HAS_COMPLETED_SUCCESSFULLY] assign[=] call[name[re].compile, parameter[constant[Status report.*BACKUP HAS COMPLETED SUCCESSFULLY]]]
variable[DONE] assign[=] call[name[re].compile, parameter[call[name[re].escape, parameter[constant[[Done]]]]]]
variable[STAND_BY] assign[=] call[name[re].compile, parameter[constant[Please stand by while rebooting the system]]]
variable[CONSOLE] assign[=] call[name[re].compile, parameter[constant[con[0|1]/(?:RS?P)?[0-1]/CPU0 is now available]]]
variable[CONSOLE_STBY] assign[=] call[name[re].compile, parameter[constant[con[0|1]/(?:RS?P)?[0-1]/CPU0 is in standby]]]
variable[CONFIGURATION_COMPLETED] assign[=] call[name[re].compile, parameter[constant[SYSTEM CONFIGURATION COMPLETED]]]
variable[CONFIGURATION_IN_PROCESS] assign[=] call[name[re].compile, parameter[constant[SYSTEM CONFIGURATION IN PROCESS]]]
variable[BOOTING] assign[=] call[name[re].compile, parameter[constant[Booting IOS-XR 64 bit Boot previously installed image]]]
variable[events] assign[=] list[[<ast.Name object at 0x7da1b2534bb0>, <ast.Name object at 0x7da1b2536380>, <ast.Name object at 0x7da1b2537a90>, <ast.Name object at 0x7da1b2537850>, <ast.Name object at 0x7da1b25351e0>, <ast.Name object at 0x7da1b2537b20>, <ast.Name object at 0x7da1b2535ab0>, <ast.Attribute object at 0x7da1b2534b20>, <ast.Name object at 0x7da1b2535a20>, <ast.Name object at 0x7da1b2536e90>, <ast.Attribute object at 0x7da1b2535cf0>, <ast.Name object at 0x7da1b2534580>, <ast.Attribute object at 0x7da1b25376a0>, <ast.Attribute object at 0x7da1b25355d0>, <ast.Name object at 0x7da1b25364a0>, <ast.Name object at 0x7da1b2535ba0>]]
variable[transitions] assign[=] list[[<ast.Tuple object at 0x7da1b2536470>, <ast.Tuple object at 0x7da1b2534130>, <ast.Tuple object at 0x7da1b2536920>, <ast.Tuple object at 0x7da1b2535e10>, <ast.Tuple object at 0x7da1b25348e0>, <ast.Tuple object at 0x7da1b2534970>, <ast.Tuple object at 0x7da1b2534c10>, <ast.Tuple object at 0x7da1b2537610>, <ast.Tuple object at 0x7da1b2536770>, <ast.Tuple object at 0x7da1b2537430>, <ast.Tuple object at 0x7da1b2534ac0>, <ast.Tuple object at 0x7da1b2535360>, <ast.Tuple object at 0x7da2041dbb80>, <ast.Tuple object at 0x7da1b25f6530>, <ast.Tuple object at 0x7da1b25f6440>, <ast.Tuple object at 0x7da1b2508220>, <ast.Tuple object at 0x7da1b2508250>, <ast.Tuple object at 0x7da1b256d930>]]
variable[fsm] assign[=] call[name[FSM], parameter[constant[RELOAD], name[self].device, name[events], name[transitions]]]
return[call[name[fsm].run, parameter[]]] | keyword[def] identifier[reload] ( identifier[self] , identifier[reload_timeout] , identifier[save_config] ):
literal[string]
identifier[MAX_BOOT_TIME] = literal[int]
identifier[RELOAD_PROMPT] = identifier[re] . identifier[compile] ( identifier[re] . identifier[escape] ( literal[string] ))
identifier[START_TO_BACKUP] = identifier[re] . identifier[compile] ( literal[string] )
identifier[BACKUP_IN_PROGRESS] = identifier[re] . identifier[compile] ( literal[string] )
identifier[BACKUP_HAS_COMPLETED_SUCCESSFULLY] = identifier[re] . identifier[compile] ( literal[string] )
identifier[DONE] = identifier[re] . identifier[compile] ( identifier[re] . identifier[escape] ( literal[string] ))
identifier[STAND_BY] = identifier[re] . identifier[compile] ( literal[string] )
identifier[CONSOLE] = identifier[re] . identifier[compile] ( literal[string] )
identifier[CONSOLE_STBY] = identifier[re] . identifier[compile] ( literal[string] )
identifier[CONFIGURATION_COMPLETED] = identifier[re] . identifier[compile] ( literal[string] )
identifier[CONFIGURATION_IN_PROCESS] = identifier[re] . identifier[compile] ( literal[string] )
identifier[BOOTING] = identifier[re] . identifier[compile] ( literal[string] )
identifier[events] =[ identifier[RELOAD_PROMPT] , identifier[START_TO_BACKUP] , identifier[BACKUP_IN_PROGRESS] , identifier[BACKUP_HAS_COMPLETED_SUCCESSFULLY] , identifier[DONE] , identifier[BOOTING] ,
identifier[CONSOLE] , identifier[self] . identifier[press_return_re] , identifier[CONFIGURATION_COMPLETED] , identifier[CONFIGURATION_IN_PROCESS] , identifier[self] . identifier[username_re] ,
identifier[EOF] , identifier[pexpect] . identifier[TIMEOUT] , identifier[self] . identifier[rommon_re] , identifier[STAND_BY] , identifier[CONSOLE_STBY] ]
identifier[transitions] =[
( identifier[RELOAD_PROMPT] ,[ literal[int] ], literal[int] , identifier[partial] ( identifier[a_send_line] , literal[string] ), identifier[MAX_BOOT_TIME] ),
( identifier[START_TO_BACKUP] ,[ literal[int] , literal[int] ], literal[int] , identifier[a_message_callback] , literal[int] ),
( identifier[BACKUP_IN_PROGRESS] ,[ literal[int] , literal[int] , literal[int] ], literal[int] , identifier[a_message_callback] , literal[int] ),
( identifier[BACKUP_HAS_COMPLETED_SUCCESSFULLY] ,[ literal[int] , literal[int] , literal[int] ], literal[int] , identifier[a_message_callback] , literal[int] ),
( identifier[DONE] ,[ literal[int] , literal[int] , literal[int] ], literal[int] , keyword[None] , identifier[MAX_BOOT_TIME] ),
( identifier[STAND_BY] ,[ literal[int] , literal[int] , literal[int] ], literal[int] , identifier[a_message_callback] , identifier[MAX_BOOT_TIME] ),
( identifier[self] . identifier[rommon_re] ,[ literal[int] , literal[int] ], literal[int] , identifier[partial] ( identifier[a_send_boot] , literal[string] ), identifier[MAX_BOOT_TIME] ),
( identifier[BOOTING] ,[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ], literal[int] , identifier[a_message_callback] , identifier[MAX_BOOT_TIME] ),
( identifier[CONSOLE] ,[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ], literal[int] , keyword[None] , literal[int] ),
( identifier[self] . identifier[press_return_re] ,[ literal[int] ], literal[int] , identifier[partial] ( identifier[a_send] , literal[string] ), literal[int] ),
( identifier[CONFIGURATION_IN_PROCESS] ,[ literal[int] ], literal[int] , keyword[None] , literal[int] ),
( identifier[CONFIGURATION_COMPLETED] ,[ literal[int] ],- literal[int] , identifier[a_return_and_reconnect] , literal[int] ),
( identifier[CONSOLE_STBY] ,[ literal[int] ],- literal[int] , identifier[ConnectionStandbyConsole] ( literal[string] ), literal[int] ),
( identifier[self] . identifier[username_re] ,[ literal[int] ],- literal[int] , identifier[a_return_and_reconnect] , literal[int] ),
( identifier[EOF] ,[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ],- literal[int] , identifier[ConnectionError] ( literal[string] ), literal[int] ),
( identifier[pexpect] . identifier[TIMEOUT] ,[ literal[int] ], literal[int] , identifier[partial] ( identifier[a_send] , literal[string] ), literal[int] ),
( identifier[pexpect] . identifier[TIMEOUT] ,[ literal[int] , literal[int] , literal[int] ],- literal[int] , identifier[ConnectionError] ( literal[string] . identifier[format] ( identifier[MAX_BOOT_TIME] )), literal[int] ),
( identifier[pexpect] . identifier[TIMEOUT] ,[ literal[int] ],- literal[int] , identifier[ConnectionAuthenticationError] ( literal[string] ), literal[int] )
]
identifier[fsm] = identifier[FSM] ( literal[string] , identifier[self] . identifier[device] , identifier[events] , identifier[transitions] , identifier[timeout] = literal[int] )
keyword[return] identifier[fsm] . identifier[run] () | def reload(self, reload_timeout, save_config):
"""Reload the device."""
MAX_BOOT_TIME = 1800 # 30 minutes - TODO(klstanie): move to config
RELOAD_PROMPT = re.compile(re.escape('Reload hardware module ? [no,yes]'))
START_TO_BACKUP = re.compile('Status report.*START TO BACKUP')
BACKUP_IN_PROGRESS = re.compile('Status report.*BACKUP INPROGRESS')
BACKUP_HAS_COMPLETED_SUCCESSFULLY = re.compile('Status report.*BACKUP HAS COMPLETED SUCCESSFULLY')
DONE = re.compile(re.escape('[Done]'))
STAND_BY = re.compile('Please stand by while rebooting the system')
CONSOLE = re.compile('con[0|1]/(?:RS?P)?[0-1]/CPU0 is now available')
CONSOLE_STBY = re.compile('con[0|1]/(?:RS?P)?[0-1]/CPU0 is in standby')
CONFIGURATION_COMPLETED = re.compile('SYSTEM CONFIGURATION COMPLETED')
CONFIGURATION_IN_PROCESS = re.compile('SYSTEM CONFIGURATION IN PROCESS')
BOOTING = re.compile('Booting IOS-XR 64 bit Boot previously installed image')
# 0 1 2 3 4 5
# 6 7 8 9 10
# 11 12 13 14 15
events = [RELOAD_PROMPT, START_TO_BACKUP, BACKUP_IN_PROGRESS, BACKUP_HAS_COMPLETED_SUCCESSFULLY, DONE, BOOTING, CONSOLE, self.press_return_re, CONFIGURATION_COMPLETED, CONFIGURATION_IN_PROCESS, self.username_re, EOF, pexpect.TIMEOUT, self.rommon_re, STAND_BY, CONSOLE_STBY]
# do I really need to clean the cmd
transitions = [(RELOAD_PROMPT, [0], 1, partial(a_send_line, 'yes'), MAX_BOOT_TIME), (START_TO_BACKUP, [0, 1], 2, a_message_callback, 60), (BACKUP_IN_PROGRESS, [0, 1, 2], 2, a_message_callback, 90), (BACKUP_HAS_COMPLETED_SUCCESSFULLY, [0, 1, 2], 3, a_message_callback, 10), (DONE, [1, 2, 3], 4, None, MAX_BOOT_TIME), (STAND_BY, [2, 3, 4], 5, a_message_callback, MAX_BOOT_TIME), (self.rommon_re, [0, 4], 5, partial(a_send_boot, 'boot'), MAX_BOOT_TIME), (BOOTING, [0, 1, 2, 3, 4], 5, a_message_callback, MAX_BOOT_TIME), (CONSOLE, [0, 1, 2, 3, 4, 5], 6, None, 600), (self.press_return_re, [6], 7, partial(a_send, '\r'), 300), (CONFIGURATION_IN_PROCESS, [7], 8, None, 180), (CONFIGURATION_COMPLETED, [8], -1, a_return_and_reconnect, 0), (CONSOLE_STBY, [5], -1, ConnectionStandbyConsole('Standby Console'), 0), (self.username_re, [9], -1, a_return_and_reconnect, 0), (EOF, [0, 1, 2, 3, 4, 5], -1, ConnectionError('Device disconnected'), 0), (pexpect.TIMEOUT, [7], 9, partial(a_send, '\r'), 180), (pexpect.TIMEOUT, [1, 5, 8], -1, ConnectionError('Boot process took more than {}s'.format(MAX_BOOT_TIME)), 0), (pexpect.TIMEOUT, [9], -1, ConnectionAuthenticationError('Unable to reconnect after reloading'), 0)]
fsm = FSM('RELOAD', self.device, events, transitions, timeout=600)
return fsm.run() |
def get_wx_font(self, s, prop):
"""
Return a wx font. Cache instances in a font dictionary for
efficiency
"""
DEBUG_MSG("get_wx_font()", 1, self)
key = hash(prop)
fontprop = prop
fontname = fontprop.get_name()
font = self.fontd.get(key)
if font is not None:
return font
# Allow use of platform independent and dependent font names
wxFontname = self.fontnames.get(fontname, wx.ROMAN)
wxFacename = '' # Empty => wxPython chooses based on wx_fontname
# Font colour is determined by the active wx.Pen
# TODO: It may be wise to cache font information
size = self.points_to_pixels(fontprop.get_size_in_points())
font =wx.Font(int(size+0.5), # Size
wxFontname, # 'Generic' name
self.fontangles[fontprop.get_style()], # Angle
self.fontweights[fontprop.get_weight()], # Weight
False, # Underline
wxFacename) # Platform font name
# cache the font and gc and return it
self.fontd[key] = font
return font | def function[get_wx_font, parameter[self, s, prop]]:
constant[
Return a wx font. Cache instances in a font dictionary for
efficiency
]
call[name[DEBUG_MSG], parameter[constant[get_wx_font()], constant[1], name[self]]]
variable[key] assign[=] call[name[hash], parameter[name[prop]]]
variable[fontprop] assign[=] name[prop]
variable[fontname] assign[=] call[name[fontprop].get_name, parameter[]]
variable[font] assign[=] call[name[self].fontd.get, parameter[name[key]]]
if compare[name[font] is_not constant[None]] begin[:]
return[name[font]]
variable[wxFontname] assign[=] call[name[self].fontnames.get, parameter[name[fontname], name[wx].ROMAN]]
variable[wxFacename] assign[=] constant[]
variable[size] assign[=] call[name[self].points_to_pixels, parameter[call[name[fontprop].get_size_in_points, parameter[]]]]
variable[font] assign[=] call[name[wx].Font, parameter[call[name[int], parameter[binary_operation[name[size] + constant[0.5]]]], name[wxFontname], call[name[self].fontangles][call[name[fontprop].get_style, parameter[]]], call[name[self].fontweights][call[name[fontprop].get_weight, parameter[]]], constant[False], name[wxFacename]]]
call[name[self].fontd][name[key]] assign[=] name[font]
return[name[font]] | keyword[def] identifier[get_wx_font] ( identifier[self] , identifier[s] , identifier[prop] ):
literal[string]
identifier[DEBUG_MSG] ( literal[string] , literal[int] , identifier[self] )
identifier[key] = identifier[hash] ( identifier[prop] )
identifier[fontprop] = identifier[prop]
identifier[fontname] = identifier[fontprop] . identifier[get_name] ()
identifier[font] = identifier[self] . identifier[fontd] . identifier[get] ( identifier[key] )
keyword[if] identifier[font] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[font]
identifier[wxFontname] = identifier[self] . identifier[fontnames] . identifier[get] ( identifier[fontname] , identifier[wx] . identifier[ROMAN] )
identifier[wxFacename] = literal[string]
identifier[size] = identifier[self] . identifier[points_to_pixels] ( identifier[fontprop] . identifier[get_size_in_points] ())
identifier[font] = identifier[wx] . identifier[Font] ( identifier[int] ( identifier[size] + literal[int] ),
identifier[wxFontname] ,
identifier[self] . identifier[fontangles] [ identifier[fontprop] . identifier[get_style] ()],
identifier[self] . identifier[fontweights] [ identifier[fontprop] . identifier[get_weight] ()],
keyword[False] ,
identifier[wxFacename] )
identifier[self] . identifier[fontd] [ identifier[key] ]= identifier[font]
keyword[return] identifier[font] | def get_wx_font(self, s, prop):
"""
Return a wx font. Cache instances in a font dictionary for
efficiency
"""
DEBUG_MSG('get_wx_font()', 1, self)
key = hash(prop)
fontprop = prop
fontname = fontprop.get_name()
font = self.fontd.get(key)
if font is not None:
return font # depends on [control=['if'], data=['font']]
# Allow use of platform independent and dependent font names
wxFontname = self.fontnames.get(fontname, wx.ROMAN)
wxFacename = '' # Empty => wxPython chooses based on wx_fontname
# Font colour is determined by the active wx.Pen
# TODO: It may be wise to cache font information
size = self.points_to_pixels(fontprop.get_size_in_points()) # Size
# 'Generic' name
# Angle
# Weight
# Underline
font = wx.Font(int(size + 0.5), wxFontname, self.fontangles[fontprop.get_style()], self.fontweights[fontprop.get_weight()], False, wxFacename) # Platform font name
# cache the font and gc and return it
self.fontd[key] = font
return font |
def get_serializer_class(self):
"""Augment base serializer class.
Include permissions information with objects.
"""
base_class = super().get_serializer_class()
class SerializerWithPermissions(base_class):
"""Augment serializer class."""
def get_fields(serializer_self): # pylint: disable=no-self-argument
"""Return serializer's fields."""
fields = super().get_fields()
fields['current_user_permissions'] = CurrentUserPermissionsSerializer(read_only=True)
return fields
def to_representation(serializer_self, instance): # pylint: disable=no-self-argument
"""Object serializer."""
data = super().to_representation(instance)
if ('fields' not in self.request.query_params
or 'current_user_permissions' in self.request.query_params['fields']):
data['current_user_permissions'] = get_object_perms(instance, self.request.user)
return data
return SerializerWithPermissions | def function[get_serializer_class, parameter[self]]:
constant[Augment base serializer class.
Include permissions information with objects.
]
variable[base_class] assign[=] call[call[name[super], parameter[]].get_serializer_class, parameter[]]
class class[SerializerWithPermissions, parameter[]] begin[:]
constant[Augment serializer class.]
def function[get_fields, parameter[serializer_self]]:
constant[Return serializer's fields.]
variable[fields] assign[=] call[call[name[super], parameter[]].get_fields, parameter[]]
call[name[fields]][constant[current_user_permissions]] assign[=] call[name[CurrentUserPermissionsSerializer], parameter[]]
return[name[fields]]
def function[to_representation, parameter[serializer_self, instance]]:
constant[Object serializer.]
variable[data] assign[=] call[call[name[super], parameter[]].to_representation, parameter[name[instance]]]
if <ast.BoolOp object at 0x7da1b1adc1c0> begin[:]
call[name[data]][constant[current_user_permissions]] assign[=] call[name[get_object_perms], parameter[name[instance], name[self].request.user]]
return[name[data]]
return[name[SerializerWithPermissions]] | keyword[def] identifier[get_serializer_class] ( identifier[self] ):
literal[string]
identifier[base_class] = identifier[super] (). identifier[get_serializer_class] ()
keyword[class] identifier[SerializerWithPermissions] ( identifier[base_class] ):
literal[string]
keyword[def] identifier[get_fields] ( identifier[serializer_self] ):
literal[string]
identifier[fields] = identifier[super] (). identifier[get_fields] ()
identifier[fields] [ literal[string] ]= identifier[CurrentUserPermissionsSerializer] ( identifier[read_only] = keyword[True] )
keyword[return] identifier[fields]
keyword[def] identifier[to_representation] ( identifier[serializer_self] , identifier[instance] ):
literal[string]
identifier[data] = identifier[super] (). identifier[to_representation] ( identifier[instance] )
keyword[if] ( literal[string] keyword[not] keyword[in] identifier[self] . identifier[request] . identifier[query_params]
keyword[or] literal[string] keyword[in] identifier[self] . identifier[request] . identifier[query_params] [ literal[string] ]):
identifier[data] [ literal[string] ]= identifier[get_object_perms] ( identifier[instance] , identifier[self] . identifier[request] . identifier[user] )
keyword[return] identifier[data]
keyword[return] identifier[SerializerWithPermissions] | def get_serializer_class(self):
"""Augment base serializer class.
Include permissions information with objects.
"""
base_class = super().get_serializer_class()
class SerializerWithPermissions(base_class):
"""Augment serializer class."""
def get_fields(serializer_self): # pylint: disable=no-self-argument
"Return serializer's fields."
fields = super().get_fields()
fields['current_user_permissions'] = CurrentUserPermissionsSerializer(read_only=True)
return fields
def to_representation(serializer_self, instance): # pylint: disable=no-self-argument
'Object serializer.'
data = super().to_representation(instance)
if 'fields' not in self.request.query_params or 'current_user_permissions' in self.request.query_params['fields']:
data['current_user_permissions'] = get_object_perms(instance, self.request.user) # depends on [control=['if'], data=[]]
return data
return SerializerWithPermissions |
def basicConfig(level=logging.WARNING, transient_level=logging.NOTSET):
"""Shortcut for setting up transient logging
I am a replica of ``logging.basicConfig`` which installs a
transient logging handler to stderr.
"""
fmt = "%(asctime)s [%(levelname)s] [%(name)s:%(lineno)d] %(message)s"
logging.root.setLevel(transient_level) # <--- IMPORTANT
hand = TransientStreamHandler(level=level)
hand.setFormatter(logging.Formatter(fmt))
logging.root.addHandler(hand) | def function[basicConfig, parameter[level, transient_level]]:
constant[Shortcut for setting up transient logging
I am a replica of ``logging.basicConfig`` which installs a
transient logging handler to stderr.
]
variable[fmt] assign[=] constant[%(asctime)s [%(levelname)s] [%(name)s:%(lineno)d] %(message)s]
call[name[logging].root.setLevel, parameter[name[transient_level]]]
variable[hand] assign[=] call[name[TransientStreamHandler], parameter[]]
call[name[hand].setFormatter, parameter[call[name[logging].Formatter, parameter[name[fmt]]]]]
call[name[logging].root.addHandler, parameter[name[hand]]] | keyword[def] identifier[basicConfig] ( identifier[level] = identifier[logging] . identifier[WARNING] , identifier[transient_level] = identifier[logging] . identifier[NOTSET] ):
literal[string]
identifier[fmt] = literal[string]
identifier[logging] . identifier[root] . identifier[setLevel] ( identifier[transient_level] )
identifier[hand] = identifier[TransientStreamHandler] ( identifier[level] = identifier[level] )
identifier[hand] . identifier[setFormatter] ( identifier[logging] . identifier[Formatter] ( identifier[fmt] ))
identifier[logging] . identifier[root] . identifier[addHandler] ( identifier[hand] ) | def basicConfig(level=logging.WARNING, transient_level=logging.NOTSET):
"""Shortcut for setting up transient logging
I am a replica of ``logging.basicConfig`` which installs a
transient logging handler to stderr.
"""
fmt = '%(asctime)s [%(levelname)s] [%(name)s:%(lineno)d] %(message)s'
logging.root.setLevel(transient_level) # <--- IMPORTANT
hand = TransientStreamHandler(level=level)
hand.setFormatter(logging.Formatter(fmt))
logging.root.addHandler(hand) |
def data(self, index, role=Qt.DisplayRole):
"""Cell content"""
if not index.isValid():
return to_qvariant()
if role == Qt.DisplayRole or role == Qt.EditRole:
column = index.column()
row = index.row()
value = self.get_value(row, column)
if isinstance(value, float):
try:
return to_qvariant(self._format % value)
except (ValueError, TypeError):
# may happen if format = '%d' and value = NaN;
# see issue 4139
return to_qvariant(DEFAULT_FORMAT % value)
elif is_type_text_string(value):
# Don't perform any conversion on strings
# because it leads to differences between
# the data present in the dataframe and
# what is shown by Spyder
return value
else:
try:
return to_qvariant(to_text_string(value))
except Exception:
self.display_error_idxs.append(index)
return u'Display Error!'
elif role == Qt.BackgroundColorRole:
return to_qvariant(self.get_bgcolor(index))
elif role == Qt.FontRole:
return to_qvariant(get_font(font_size_delta=DEFAULT_SMALL_DELTA))
elif role == Qt.ToolTipRole:
if index in self.display_error_idxs:
return _("It is not possible to display this value because\n"
"an error ocurred while trying to do it")
return to_qvariant() | def function[data, parameter[self, index, role]]:
constant[Cell content]
if <ast.UnaryOp object at 0x7da18f00e9b0> begin[:]
return[call[name[to_qvariant], parameter[]]]
if <ast.BoolOp object at 0x7da18f00c5e0> begin[:]
variable[column] assign[=] call[name[index].column, parameter[]]
variable[row] assign[=] call[name[index].row, parameter[]]
variable[value] assign[=] call[name[self].get_value, parameter[name[row], name[column]]]
if call[name[isinstance], parameter[name[value], name[float]]] begin[:]
<ast.Try object at 0x7da18f00db70>
return[call[name[to_qvariant], parameter[]]] | keyword[def] identifier[data] ( identifier[self] , identifier[index] , identifier[role] = identifier[Qt] . identifier[DisplayRole] ):
literal[string]
keyword[if] keyword[not] identifier[index] . identifier[isValid] ():
keyword[return] identifier[to_qvariant] ()
keyword[if] identifier[role] == identifier[Qt] . identifier[DisplayRole] keyword[or] identifier[role] == identifier[Qt] . identifier[EditRole] :
identifier[column] = identifier[index] . identifier[column] ()
identifier[row] = identifier[index] . identifier[row] ()
identifier[value] = identifier[self] . identifier[get_value] ( identifier[row] , identifier[column] )
keyword[if] identifier[isinstance] ( identifier[value] , identifier[float] ):
keyword[try] :
keyword[return] identifier[to_qvariant] ( identifier[self] . identifier[_format] % identifier[value] )
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
keyword[return] identifier[to_qvariant] ( identifier[DEFAULT_FORMAT] % identifier[value] )
keyword[elif] identifier[is_type_text_string] ( identifier[value] ):
keyword[return] identifier[value]
keyword[else] :
keyword[try] :
keyword[return] identifier[to_qvariant] ( identifier[to_text_string] ( identifier[value] ))
keyword[except] identifier[Exception] :
identifier[self] . identifier[display_error_idxs] . identifier[append] ( identifier[index] )
keyword[return] literal[string]
keyword[elif] identifier[role] == identifier[Qt] . identifier[BackgroundColorRole] :
keyword[return] identifier[to_qvariant] ( identifier[self] . identifier[get_bgcolor] ( identifier[index] ))
keyword[elif] identifier[role] == identifier[Qt] . identifier[FontRole] :
keyword[return] identifier[to_qvariant] ( identifier[get_font] ( identifier[font_size_delta] = identifier[DEFAULT_SMALL_DELTA] ))
keyword[elif] identifier[role] == identifier[Qt] . identifier[ToolTipRole] :
keyword[if] identifier[index] keyword[in] identifier[self] . identifier[display_error_idxs] :
keyword[return] identifier[_] ( literal[string]
literal[string] )
keyword[return] identifier[to_qvariant] () | def data(self, index, role=Qt.DisplayRole):
"""Cell content"""
if not index.isValid():
return to_qvariant() # depends on [control=['if'], data=[]]
if role == Qt.DisplayRole or role == Qt.EditRole:
column = index.column()
row = index.row()
value = self.get_value(row, column)
if isinstance(value, float):
try:
return to_qvariant(self._format % value) # depends on [control=['try'], data=[]]
except (ValueError, TypeError): # may happen if format = '%d' and value = NaN;
# see issue 4139
return to_qvariant(DEFAULT_FORMAT % value) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif is_type_text_string(value): # Don't perform any conversion on strings
# because it leads to differences between
# the data present in the dataframe and
# what is shown by Spyder
return value # depends on [control=['if'], data=[]]
else:
try:
return to_qvariant(to_text_string(value)) # depends on [control=['try'], data=[]]
except Exception:
self.display_error_idxs.append(index)
return u'Display Error!' # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif role == Qt.BackgroundColorRole:
return to_qvariant(self.get_bgcolor(index)) # depends on [control=['if'], data=[]]
elif role == Qt.FontRole:
return to_qvariant(get_font(font_size_delta=DEFAULT_SMALL_DELTA)) # depends on [control=['if'], data=[]]
elif role == Qt.ToolTipRole:
if index in self.display_error_idxs:
return _('It is not possible to display this value because\nan error ocurred while trying to do it') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return to_qvariant() |
def get_data_home(data_home=None):
"""Return the path of the msmbuilder data dir.
As of msmbuilder v3.6, this function will prefer data downloaded via
the msmb_data conda package (and located within the python installation
directory). If this package exists, we will use its data directory as
the data home. Otherwise, we use the old logic:
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'msmbuilder_data'
in the user's home folder.
Alternatively, it can be set by the 'MSMBUILDER_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is not None:
return _expand_and_makedir(data_home)
msmb_data = has_msmb_data()
if msmb_data is not None:
return _expand_and_makedir(msmb_data)
data_home = environ.get('MSMBUILDER_DATA', join('~', 'msmbuilder_data'))
return _expand_and_makedir(data_home) | def function[get_data_home, parameter[data_home]]:
constant[Return the path of the msmbuilder data dir.
As of msmbuilder v3.6, this function will prefer data downloaded via
the msmb_data conda package (and located within the python installation
directory). If this package exists, we will use its data directory as
the data home. Otherwise, we use the old logic:
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'msmbuilder_data'
in the user's home folder.
Alternatively, it can be set by the 'MSMBUILDER_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
]
if compare[name[data_home] is_not constant[None]] begin[:]
return[call[name[_expand_and_makedir], parameter[name[data_home]]]]
variable[msmb_data] assign[=] call[name[has_msmb_data], parameter[]]
if compare[name[msmb_data] is_not constant[None]] begin[:]
return[call[name[_expand_and_makedir], parameter[name[msmb_data]]]]
variable[data_home] assign[=] call[name[environ].get, parameter[constant[MSMBUILDER_DATA], call[name[join], parameter[constant[~], constant[msmbuilder_data]]]]]
return[call[name[_expand_and_makedir], parameter[name[data_home]]]] | keyword[def] identifier[get_data_home] ( identifier[data_home] = keyword[None] ):
literal[string]
keyword[if] identifier[data_home] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[_expand_and_makedir] ( identifier[data_home] )
identifier[msmb_data] = identifier[has_msmb_data] ()
keyword[if] identifier[msmb_data] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[_expand_and_makedir] ( identifier[msmb_data] )
identifier[data_home] = identifier[environ] . identifier[get] ( literal[string] , identifier[join] ( literal[string] , literal[string] ))
keyword[return] identifier[_expand_and_makedir] ( identifier[data_home] ) | def get_data_home(data_home=None):
"""Return the path of the msmbuilder data dir.
As of msmbuilder v3.6, this function will prefer data downloaded via
the msmb_data conda package (and located within the python installation
directory). If this package exists, we will use its data directory as
the data home. Otherwise, we use the old logic:
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'msmbuilder_data'
in the user's home folder.
Alternatively, it can be set by the 'MSMBUILDER_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is not None:
return _expand_and_makedir(data_home) # depends on [control=['if'], data=['data_home']]
msmb_data = has_msmb_data()
if msmb_data is not None:
return _expand_and_makedir(msmb_data) # depends on [control=['if'], data=['msmb_data']]
data_home = environ.get('MSMBUILDER_DATA', join('~', 'msmbuilder_data'))
return _expand_and_makedir(data_home) |
def close(self):
"""
Disconnect and close *Vim*.
"""
self._tempfile.close()
self._process.terminate()
if self._process.is_alive():
self._process.kill() | def function[close, parameter[self]]:
constant[
Disconnect and close *Vim*.
]
call[name[self]._tempfile.close, parameter[]]
call[name[self]._process.terminate, parameter[]]
if call[name[self]._process.is_alive, parameter[]] begin[:]
call[name[self]._process.kill, parameter[]] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_tempfile] . identifier[close] ()
identifier[self] . identifier[_process] . identifier[terminate] ()
keyword[if] identifier[self] . identifier[_process] . identifier[is_alive] ():
identifier[self] . identifier[_process] . identifier[kill] () | def close(self):
"""
Disconnect and close *Vim*.
"""
self._tempfile.close()
self._process.terminate()
if self._process.is_alive():
self._process.kill() # depends on [control=['if'], data=[]] |
def xml_row(row, lang):
'''
Generator for an XML row
'''
for elem in row:
name = elem.get('name')
child = elem[0]
ftype = re.sub(r'\{[^}]+\}', '', child.tag)
if ftype == 'literal':
ftype = '{}, {}'.format(ftype, child.attrib.get(XML_LANG, 'none'))
yield (name, (child.text, ftype)) | def function[xml_row, parameter[row, lang]]:
constant[
Generator for an XML row
]
for taget[name[elem]] in starred[name[row]] begin[:]
variable[name] assign[=] call[name[elem].get, parameter[constant[name]]]
variable[child] assign[=] call[name[elem]][constant[0]]
variable[ftype] assign[=] call[name[re].sub, parameter[constant[\{[^}]+\}], constant[], name[child].tag]]
if compare[name[ftype] equal[==] constant[literal]] begin[:]
variable[ftype] assign[=] call[constant[{}, {}].format, parameter[name[ftype], call[name[child].attrib.get, parameter[name[XML_LANG], constant[none]]]]]
<ast.Yield object at 0x7da18f09cd30> | keyword[def] identifier[xml_row] ( identifier[row] , identifier[lang] ):
literal[string]
keyword[for] identifier[elem] keyword[in] identifier[row] :
identifier[name] = identifier[elem] . identifier[get] ( literal[string] )
identifier[child] = identifier[elem] [ literal[int] ]
identifier[ftype] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[child] . identifier[tag] )
keyword[if] identifier[ftype] == literal[string] :
identifier[ftype] = literal[string] . identifier[format] ( identifier[ftype] , identifier[child] . identifier[attrib] . identifier[get] ( identifier[XML_LANG] , literal[string] ))
keyword[yield] ( identifier[name] ,( identifier[child] . identifier[text] , identifier[ftype] )) | def xml_row(row, lang):
"""
Generator for an XML row
"""
for elem in row:
name = elem.get('name')
child = elem[0]
ftype = re.sub('\\{[^}]+\\}', '', child.tag)
if ftype == 'literal':
ftype = '{}, {}'.format(ftype, child.attrib.get(XML_LANG, 'none')) # depends on [control=['if'], data=['ftype']]
yield (name, (child.text, ftype)) # depends on [control=['for'], data=['elem']] |
def name(self):
""" Provides a nice name for the field which is derived from the alias, caption, or the id.
The name resolves as either the alias if it's defined, or the caption if alias is not defined,
and finally the id which is the underlying name if neither of the fields exist. """
alias = getattr(self, 'alias', None)
if alias:
return alias
caption = getattr(self, 'caption', None)
if caption:
return caption
return self.id | def function[name, parameter[self]]:
constant[ Provides a nice name for the field which is derived from the alias, caption, or the id.
The name resolves as either the alias if it's defined, or the caption if alias is not defined,
and finally the id which is the underlying name if neither of the fields exist. ]
variable[alias] assign[=] call[name[getattr], parameter[name[self], constant[alias], constant[None]]]
if name[alias] begin[:]
return[name[alias]]
variable[caption] assign[=] call[name[getattr], parameter[name[self], constant[caption], constant[None]]]
if name[caption] begin[:]
return[name[caption]]
return[name[self].id] | keyword[def] identifier[name] ( identifier[self] ):
literal[string]
identifier[alias] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] )
keyword[if] identifier[alias] :
keyword[return] identifier[alias]
identifier[caption] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] )
keyword[if] identifier[caption] :
keyword[return] identifier[caption]
keyword[return] identifier[self] . identifier[id] | def name(self):
""" Provides a nice name for the field which is derived from the alias, caption, or the id.
The name resolves as either the alias if it's defined, or the caption if alias is not defined,
and finally the id which is the underlying name if neither of the fields exist. """
alias = getattr(self, 'alias', None)
if alias:
return alias # depends on [control=['if'], data=[]]
caption = getattr(self, 'caption', None)
if caption:
return caption # depends on [control=['if'], data=[]]
return self.id |
def varReplace(basedir, raw, vars, lookup_fatal=True, depth=0, expand_lists=False):
''' Perform variable replacement of $variables in string raw using vars dictionary '''
# this code originally from yum
if (depth > 20):
raise errors.AnsibleError("template recursion depth exceeded")
done = [] # Completed chunks to return
while raw:
m = _varFind(basedir, raw, vars, lookup_fatal, depth)
if not m:
done.append(raw)
break
# Determine replacement value (if unknown variable then preserve
# original)
replacement = m['replacement']
if expand_lists and isinstance(replacement, (list, tuple)):
replacement = ",".join(replacement)
if isinstance(replacement, (str, unicode)):
replacement = varReplace(basedir, replacement, vars, lookup_fatal, depth=depth+1, expand_lists=expand_lists)
if replacement is None:
replacement = raw[m['start']:m['end']]
start, end = m['start'], m['end']
done.append(raw[:start]) # Keep stuff leading up to token
done.append(unicode(replacement)) # Append replacement value
raw = raw[end:] # Continue with remainder of string
return ''.join(done) | def function[varReplace, parameter[basedir, raw, vars, lookup_fatal, depth, expand_lists]]:
constant[ Perform variable replacement of $variables in string raw using vars dictionary ]
if compare[name[depth] greater[>] constant[20]] begin[:]
<ast.Raise object at 0x7da1b1386cb0>
variable[done] assign[=] list[[]]
while name[raw] begin[:]
variable[m] assign[=] call[name[_varFind], parameter[name[basedir], name[raw], name[vars], name[lookup_fatal], name[depth]]]
if <ast.UnaryOp object at 0x7da1b14c7010> begin[:]
call[name[done].append, parameter[name[raw]]]
break
variable[replacement] assign[=] call[name[m]][constant[replacement]]
if <ast.BoolOp object at 0x7da1b14c5c90> begin[:]
variable[replacement] assign[=] call[constant[,].join, parameter[name[replacement]]]
if call[name[isinstance], parameter[name[replacement], tuple[[<ast.Name object at 0x7da1b14c5060>, <ast.Name object at 0x7da1b14c7700>]]]] begin[:]
variable[replacement] assign[=] call[name[varReplace], parameter[name[basedir], name[replacement], name[vars], name[lookup_fatal]]]
if compare[name[replacement] is constant[None]] begin[:]
variable[replacement] assign[=] call[name[raw]][<ast.Slice object at 0x7da1b14c5c00>]
<ast.Tuple object at 0x7da1b14c7c10> assign[=] tuple[[<ast.Subscript object at 0x7da1b14c5030>, <ast.Subscript object at 0x7da1b14c6ce0>]]
call[name[done].append, parameter[call[name[raw]][<ast.Slice object at 0x7da1b14c49a0>]]]
call[name[done].append, parameter[call[name[unicode], parameter[name[replacement]]]]]
variable[raw] assign[=] call[name[raw]][<ast.Slice object at 0x7da1b14c5cc0>]
return[call[constant[].join, parameter[name[done]]]] | keyword[def] identifier[varReplace] ( identifier[basedir] , identifier[raw] , identifier[vars] , identifier[lookup_fatal] = keyword[True] , identifier[depth] = literal[int] , identifier[expand_lists] = keyword[False] ):
literal[string]
keyword[if] ( identifier[depth] > literal[int] ):
keyword[raise] identifier[errors] . identifier[AnsibleError] ( literal[string] )
identifier[done] =[]
keyword[while] identifier[raw] :
identifier[m] = identifier[_varFind] ( identifier[basedir] , identifier[raw] , identifier[vars] , identifier[lookup_fatal] , identifier[depth] )
keyword[if] keyword[not] identifier[m] :
identifier[done] . identifier[append] ( identifier[raw] )
keyword[break]
identifier[replacement] = identifier[m] [ literal[string] ]
keyword[if] identifier[expand_lists] keyword[and] identifier[isinstance] ( identifier[replacement] ,( identifier[list] , identifier[tuple] )):
identifier[replacement] = literal[string] . identifier[join] ( identifier[replacement] )
keyword[if] identifier[isinstance] ( identifier[replacement] ,( identifier[str] , identifier[unicode] )):
identifier[replacement] = identifier[varReplace] ( identifier[basedir] , identifier[replacement] , identifier[vars] , identifier[lookup_fatal] , identifier[depth] = identifier[depth] + literal[int] , identifier[expand_lists] = identifier[expand_lists] )
keyword[if] identifier[replacement] keyword[is] keyword[None] :
identifier[replacement] = identifier[raw] [ identifier[m] [ literal[string] ]: identifier[m] [ literal[string] ]]
identifier[start] , identifier[end] = identifier[m] [ literal[string] ], identifier[m] [ literal[string] ]
identifier[done] . identifier[append] ( identifier[raw] [: identifier[start] ])
identifier[done] . identifier[append] ( identifier[unicode] ( identifier[replacement] ))
identifier[raw] = identifier[raw] [ identifier[end] :]
keyword[return] literal[string] . identifier[join] ( identifier[done] ) | def varReplace(basedir, raw, vars, lookup_fatal=True, depth=0, expand_lists=False):
""" Perform variable replacement of $variables in string raw using vars dictionary """
# this code originally from yum
if depth > 20:
raise errors.AnsibleError('template recursion depth exceeded') # depends on [control=['if'], data=[]]
done = [] # Completed chunks to return
while raw:
m = _varFind(basedir, raw, vars, lookup_fatal, depth)
if not m:
done.append(raw)
break # depends on [control=['if'], data=[]]
# Determine replacement value (if unknown variable then preserve
# original)
replacement = m['replacement']
if expand_lists and isinstance(replacement, (list, tuple)):
replacement = ','.join(replacement) # depends on [control=['if'], data=[]]
if isinstance(replacement, (str, unicode)):
replacement = varReplace(basedir, replacement, vars, lookup_fatal, depth=depth + 1, expand_lists=expand_lists) # depends on [control=['if'], data=[]]
if replacement is None:
replacement = raw[m['start']:m['end']] # depends on [control=['if'], data=['replacement']]
(start, end) = (m['start'], m['end'])
done.append(raw[:start]) # Keep stuff leading up to token
done.append(unicode(replacement)) # Append replacement value
raw = raw[end:] # Continue with remainder of string # depends on [control=['while'], data=[]]
return ''.join(done) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.