code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def to_json(self):
"""Convert the STAT object to a dictionary."""
def jsonify_dict(base_dict):
new_dict = {}
for key, val in base_dict.items():
if isinstance(val, list):
new_dict[key] = [v.to_json() for v in val]
else:
new_dict[key] = val.to_json()
return new_dict
return {
'location': self.location.to_json(),
'ashrae_climate_zone': self.ashrae_climate_zone,
'koppen_climate_zone': self.koppen_climate_zone,
'extreme_cold_week': self.extreme_cold_week.to_json()
if self.extreme_cold_week else None,
'extreme_hot_week': self.extreme_hot_week.to_json()
if self.extreme_cold_week else None,
'typical_weeks': jsonify_dict(self._typical_weeks),
'heating_dict': self._winter_des_day_dict,
'cooling_dict': self._summer_des_day_dict,
"monthly_db_50": self._monthly_db_50,
"monthly_wb_50": self._monthly_wb_50,
"monthly_db_range_50": self._monthly_db_range_50,
"monthly_wb_range_50": self._monthly_wb_range_50,
"monthly_db_100": self._monthly_db_100,
"monthly_wb_100": self._monthly_wb_100,
"monthly_db_20": self._monthly_db_20,
"monthly_wb_20": self._monthly_wb_20,
"monthly_db_04": self._monthly_db_04,
"monthly_wb_04": self._monthly_wb_04,
"monthly_wind": self._monthly_wind,
"monthly_wind_dirs": self._monthly_wind_dirs,
"standard_pressure_at_elev": self.standard_pressure_at_elev,
"monthly_tau_beam": self.monthly_tau_beam,
"monthly_tau_diffuse": self.monthly_tau_diffuse
} | def function[to_json, parameter[self]]:
constant[Convert the STAT object to a dictionary.]
def function[jsonify_dict, parameter[base_dict]]:
variable[new_dict] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1295540>, <ast.Name object at 0x7da1b12965f0>]]] in starred[call[name[base_dict].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[val], name[list]]] begin[:]
call[name[new_dict]][name[key]] assign[=] <ast.ListComp object at 0x7da1b1296ec0>
return[name[new_dict]]
return[dictionary[[<ast.Constant object at 0x7da1b12949a0>, <ast.Constant object at 0x7da1b1296260>, <ast.Constant object at 0x7da1b1294ee0>, <ast.Constant object at 0x7da1b1294340>, <ast.Constant object at 0x7da1b1297a90>, <ast.Constant object at 0x7da1b12977f0>, <ast.Constant object at 0x7da1b1294430>, <ast.Constant object at 0x7da1b1294640>, <ast.Constant object at 0x7da1b12967d0>, <ast.Constant object at 0x7da1b1295bd0>, <ast.Constant object at 0x7da1b1295690>, <ast.Constant object at 0x7da1b1295150>, <ast.Constant object at 0x7da1b12944c0>, <ast.Constant object at 0x7da1b1296b30>, <ast.Constant object at 0x7da1b1294130>, <ast.Constant object at 0x7da1b1294220>, <ast.Constant object at 0x7da1b1294160>, <ast.Constant object at 0x7da1b1294b50>, <ast.Constant object at 0x7da1b12944f0>, <ast.Constant object at 0x7da1b1295fc0>, <ast.Constant object at 0x7da1b1295120>, <ast.Constant object at 0x7da1b1297880>, <ast.Constant object at 0x7da1b1297ca0>], [<ast.Call object at 0x7da1b12969e0>, <ast.Attribute object at 0x7da1b12951b0>, <ast.Attribute object at 0x7da1b1296980>, <ast.IfExp object at 0x7da1b1295300>, <ast.IfExp object at 0x7da1b12949d0>, <ast.Call object at 0x7da1b1295f00>, <ast.Attribute object at 0x7da1b1297a30>, <ast.Attribute object at 0x7da1b1297d30>, <ast.Attribute object at 0x7da1b1297580>, <ast.Attribute object at 0x7da1b1294100>, <ast.Attribute object at 0x7da1b1297550>, <ast.Attribute object at 0x7da1b12960e0>, <ast.Attribute object at 0x7da1b1294b80>, <ast.Attribute object at 0x7da1b1296c80>, <ast.Attribute object at 0x7da1b1294550>, <ast.Attribute object at 0x7da1b1297bb0>, <ast.Attribute object at 0x7da1b1297460>, <ast.Attribute object at 0x7da1b12978e0>, <ast.Attribute object at 0x7da1b12941f0>, <ast.Attribute object at 0x7da1b1295570>, <ast.Attribute object at 0x7da1b1295a80>, <ast.Attribute object at 0x7da1b1295f30>, <ast.Attribute object at 0x7da1b1295060>]]] | keyword[def] identifier[to_json] ( identifier[self] ):
literal[string]
keyword[def] identifier[jsonify_dict] ( identifier[base_dict] ):
identifier[new_dict] ={}
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[base_dict] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[val] , identifier[list] ):
identifier[new_dict] [ identifier[key] ]=[ identifier[v] . identifier[to_json] () keyword[for] identifier[v] keyword[in] identifier[val] ]
keyword[else] :
identifier[new_dict] [ identifier[key] ]= identifier[val] . identifier[to_json] ()
keyword[return] identifier[new_dict]
keyword[return] {
literal[string] : identifier[self] . identifier[location] . identifier[to_json] (),
literal[string] : identifier[self] . identifier[ashrae_climate_zone] ,
literal[string] : identifier[self] . identifier[koppen_climate_zone] ,
literal[string] : identifier[self] . identifier[extreme_cold_week] . identifier[to_json] ()
keyword[if] identifier[self] . identifier[extreme_cold_week] keyword[else] keyword[None] ,
literal[string] : identifier[self] . identifier[extreme_hot_week] . identifier[to_json] ()
keyword[if] identifier[self] . identifier[extreme_cold_week] keyword[else] keyword[None] ,
literal[string] : identifier[jsonify_dict] ( identifier[self] . identifier[_typical_weeks] ),
literal[string] : identifier[self] . identifier[_winter_des_day_dict] ,
literal[string] : identifier[self] . identifier[_summer_des_day_dict] ,
literal[string] : identifier[self] . identifier[_monthly_db_50] ,
literal[string] : identifier[self] . identifier[_monthly_wb_50] ,
literal[string] : identifier[self] . identifier[_monthly_db_range_50] ,
literal[string] : identifier[self] . identifier[_monthly_wb_range_50] ,
literal[string] : identifier[self] . identifier[_monthly_db_100] ,
literal[string] : identifier[self] . identifier[_monthly_wb_100] ,
literal[string] : identifier[self] . identifier[_monthly_db_20] ,
literal[string] : identifier[self] . identifier[_monthly_wb_20] ,
literal[string] : identifier[self] . identifier[_monthly_db_04] ,
literal[string] : identifier[self] . identifier[_monthly_wb_04] ,
literal[string] : identifier[self] . identifier[_monthly_wind] ,
literal[string] : identifier[self] . identifier[_monthly_wind_dirs] ,
literal[string] : identifier[self] . identifier[standard_pressure_at_elev] ,
literal[string] : identifier[self] . identifier[monthly_tau_beam] ,
literal[string] : identifier[self] . identifier[monthly_tau_diffuse]
} | def to_json(self):
"""Convert the STAT object to a dictionary."""
def jsonify_dict(base_dict):
new_dict = {}
for (key, val) in base_dict.items():
if isinstance(val, list):
new_dict[key] = [v.to_json() for v in val] # depends on [control=['if'], data=[]]
else:
new_dict[key] = val.to_json() # depends on [control=['for'], data=[]]
return new_dict
return {'location': self.location.to_json(), 'ashrae_climate_zone': self.ashrae_climate_zone, 'koppen_climate_zone': self.koppen_climate_zone, 'extreme_cold_week': self.extreme_cold_week.to_json() if self.extreme_cold_week else None, 'extreme_hot_week': self.extreme_hot_week.to_json() if self.extreme_cold_week else None, 'typical_weeks': jsonify_dict(self._typical_weeks), 'heating_dict': self._winter_des_day_dict, 'cooling_dict': self._summer_des_day_dict, 'monthly_db_50': self._monthly_db_50, 'monthly_wb_50': self._monthly_wb_50, 'monthly_db_range_50': self._monthly_db_range_50, 'monthly_wb_range_50': self._monthly_wb_range_50, 'monthly_db_100': self._monthly_db_100, 'monthly_wb_100': self._monthly_wb_100, 'monthly_db_20': self._monthly_db_20, 'monthly_wb_20': self._monthly_wb_20, 'monthly_db_04': self._monthly_db_04, 'monthly_wb_04': self._monthly_wb_04, 'monthly_wind': self._monthly_wind, 'monthly_wind_dirs': self._monthly_wind_dirs, 'standard_pressure_at_elev': self.standard_pressure_at_elev, 'monthly_tau_beam': self.monthly_tau_beam, 'monthly_tau_diffuse': self.monthly_tau_diffuse} |
def getdatetime(timedateformat='complete'):
"""
Get the current date or time in a specific format.
:type timedateformat: string
:param timedateformat: The type of date to query for. Can be: day, month, year, hour, minute, second, millisecond, yearmonthday, daymonthyear, hourminutesecond, secondminutehour, complete, datetime or timedate.
"""
timedateformat = timedateformat.lower()
if timedateformat == 'day':
return ((str(datetime.datetime.now())).split(' ')[0]).split('-')[2]
elif timedateformat == 'month':
return ((str(datetime.datetime.now())).split(' ')[0]).split('-')[1]
elif timedateformat == 'year':
return ((str(datetime.datetime.now())).split(' ')[0]).split('-')[0]
elif timedateformat == 'hour':
return (((str(datetime.datetime.now())).split(' ')[1]).split('.')[0]
).split(':')[0]
elif timedateformat == 'minute':
return (((str(datetime.datetime.now())).split(' ')[1]).split('.')[0]
).split(':')[1]
elif timedateformat == 'second':
return (((str(datetime.datetime.now())).split(' ')[1]).split('.')[0]
).split(':')[2]
elif timedateformat == 'millisecond':
return (str(datetime.datetime.now())).split('.')[1]
elif timedateformat == 'yearmonthday':
return (str(datetime.datetime.now())).split(' ')[0]
elif timedateformat == 'daymonthyear':
return ((str(datetime.datetime.now())).split(' ')[0]).split(
'-')[2] + '-' + ((str(
datetime.datetime.now())).split(' ')[0]).split('-')[1] + '-' + (
(str(datetime.datetime.now())).split(' ')[0]).split('-')[0]
elif timedateformat == 'hourminutesecond':
return ((str(datetime.datetime.now())).split(' ')[1]).split('.')[0]
elif timedateformat == 'secondminutehour':
return (((str(datetime.datetime.now())).split(' ')[1]).split('.')[0]
).split(':')[2] + ':' + (((str(datetime.datetime.now())).split(
' ')[1]).split('.')[0]).split(':')[1] + ':' + (
((str(datetime.datetime.now())).split(' ')[1]
).split('.')[0]).split(':')[0]
elif timedateformat == 'complete':
return str(datetime.datetime.now())
elif timedateformat == 'datetime':
return (str(datetime.datetime.now())).split('.')[0]
elif timedateformat == 'timedate':
return ((str(
datetime.datetime.now())).split('.')[0]).split(' ')[1] + ' ' + (
(str(datetime.datetime.now())).split('.')[0]).split(' ')[0]
else:
raise ValueError("Invalid time date format used.") | def function[getdatetime, parameter[timedateformat]]:
constant[
Get the current date or time in a specific format.
:type timedateformat: string
:param timedateformat: The type of date to query for. Can be: day, month, year, hour, minute, second, millisecond, yearmonthday, daymonthyear, hourminutesecond, secondminutehour, complete, datetime or timedate.
]
variable[timedateformat] assign[=] call[name[timedateformat].lower, parameter[]]
if compare[name[timedateformat] equal[==] constant[day]] begin[:]
return[call[call[call[call[call[name[str], parameter[call[name[datetime].datetime.now, parameter[]]]].split, parameter[constant[ ]]]][constant[0]].split, parameter[constant[-]]]][constant[2]]] | keyword[def] identifier[getdatetime] ( identifier[timedateformat] = literal[string] ):
literal[string]
identifier[timedateformat] = identifier[timedateformat] . identifier[lower] ()
keyword[if] identifier[timedateformat] == literal[string] :
keyword[return] (( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] (( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] (( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] ((( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]
). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] ((( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]
). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] ((( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]
). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] ( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] ( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] (( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] (
literal[string] )[ literal[int] ]+ literal[string] +(( identifier[str] (
identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]+ literal[string] +(
( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] (( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] ((( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]
). identifier[split] ( literal[string] )[ literal[int] ]+ literal[string] +((( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] (
literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]+ literal[string] +(
(( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]
). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] ( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[timedateformat] == literal[string] :
keyword[return] (( identifier[str] (
identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]+ literal[string] +(
( identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())). identifier[split] ( literal[string] )[ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] ]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def getdatetime(timedateformat='complete'):
"""
Get the current date or time in a specific format.
:type timedateformat: string
:param timedateformat: The type of date to query for. Can be: day, month, year, hour, minute, second, millisecond, yearmonthday, daymonthyear, hourminutesecond, secondminutehour, complete, datetime or timedate.
"""
timedateformat = timedateformat.lower()
if timedateformat == 'day':
return str(datetime.datetime.now()).split(' ')[0].split('-')[2] # depends on [control=['if'], data=[]]
elif timedateformat == 'month':
return str(datetime.datetime.now()).split(' ')[0].split('-')[1] # depends on [control=['if'], data=[]]
elif timedateformat == 'year':
return str(datetime.datetime.now()).split(' ')[0].split('-')[0] # depends on [control=['if'], data=[]]
elif timedateformat == 'hour':
return str(datetime.datetime.now()).split(' ')[1].split('.')[0].split(':')[0] # depends on [control=['if'], data=[]]
elif timedateformat == 'minute':
return str(datetime.datetime.now()).split(' ')[1].split('.')[0].split(':')[1] # depends on [control=['if'], data=[]]
elif timedateformat == 'second':
return str(datetime.datetime.now()).split(' ')[1].split('.')[0].split(':')[2] # depends on [control=['if'], data=[]]
elif timedateformat == 'millisecond':
return str(datetime.datetime.now()).split('.')[1] # depends on [control=['if'], data=[]]
elif timedateformat == 'yearmonthday':
return str(datetime.datetime.now()).split(' ')[0] # depends on [control=['if'], data=[]]
elif timedateformat == 'daymonthyear':
return str(datetime.datetime.now()).split(' ')[0].split('-')[2] + '-' + str(datetime.datetime.now()).split(' ')[0].split('-')[1] + '-' + str(datetime.datetime.now()).split(' ')[0].split('-')[0] # depends on [control=['if'], data=[]]
elif timedateformat == 'hourminutesecond':
return str(datetime.datetime.now()).split(' ')[1].split('.')[0] # depends on [control=['if'], data=[]]
elif timedateformat == 'secondminutehour':
return str(datetime.datetime.now()).split(' ')[1].split('.')[0].split(':')[2] + ':' + str(datetime.datetime.now()).split(' ')[1].split('.')[0].split(':')[1] + ':' + str(datetime.datetime.now()).split(' ')[1].split('.')[0].split(':')[0] # depends on [control=['if'], data=[]]
elif timedateformat == 'complete':
return str(datetime.datetime.now()) # depends on [control=['if'], data=[]]
elif timedateformat == 'datetime':
return str(datetime.datetime.now()).split('.')[0] # depends on [control=['if'], data=[]]
elif timedateformat == 'timedate':
return str(datetime.datetime.now()).split('.')[0].split(' ')[1] + ' ' + str(datetime.datetime.now()).split('.')[0].split(' ')[0] # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid time date format used.') |
def species_list(what_list):
'''
provide default lists of elements to plot.
what_list : string
String name of species lists provided.
If what_list is "CNONe", then C, N, O and some other light
elements.
If what_list is "s-process", then s-process indicators.
'''
if what_list is "CNONe":
list_to_print = ['H-1','He-4','C-12','N-14','O-16','Ne-20']
elif what_list is "sprocess":
list_to_print = ['Fe-56','Ge-70','Zn-70','Se-76','Kr-80','Kr-82','Kr-86','Sr-88','Ba-138','Pb-208']
elif what_list is "burn_stages":
list_to_print = ['H-1','He-4','C-12','O-16','Ne-20','Si-28']
elif what_list is "list_marco_1":
list_to_print = ['C-12','O-16','Ne-20','Ne-22','Na-23','Fe-54','Fe-56','Zn-70','Ge-70','Se-76','Kr-80','Kr-82','Sr-88','Y-89','Zr-96','Te-124','Xe-130','Xe-134','Ba-138']
return list_to_print | def function[species_list, parameter[what_list]]:
constant[
provide default lists of elements to plot.
what_list : string
String name of species lists provided.
If what_list is "CNONe", then C, N, O and some other light
elements.
If what_list is "s-process", then s-process indicators.
]
if compare[name[what_list] is constant[CNONe]] begin[:]
variable[list_to_print] assign[=] list[[<ast.Constant object at 0x7da1b1a1b100>, <ast.Constant object at 0x7da1b1a1ba90>, <ast.Constant object at 0x7da1b1a19ff0>, <ast.Constant object at 0x7da1b1a1bac0>, <ast.Constant object at 0x7da1b1a1b490>, <ast.Constant object at 0x7da1b1a1baf0>]]
return[name[list_to_print]] | keyword[def] identifier[species_list] ( identifier[what_list] ):
literal[string]
keyword[if] identifier[what_list] keyword[is] literal[string] :
identifier[list_to_print] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[elif] identifier[what_list] keyword[is] literal[string] :
identifier[list_to_print] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[elif] identifier[what_list] keyword[is] literal[string] :
identifier[list_to_print] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[elif] identifier[what_list] keyword[is] literal[string] :
identifier[list_to_print] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[return] identifier[list_to_print] | def species_list(what_list):
"""
provide default lists of elements to plot.
what_list : string
String name of species lists provided.
If what_list is "CNONe", then C, N, O and some other light
elements.
If what_list is "s-process", then s-process indicators.
"""
if what_list is 'CNONe':
list_to_print = ['H-1', 'He-4', 'C-12', 'N-14', 'O-16', 'Ne-20'] # depends on [control=['if'], data=[]]
elif what_list is 'sprocess':
list_to_print = ['Fe-56', 'Ge-70', 'Zn-70', 'Se-76', 'Kr-80', 'Kr-82', 'Kr-86', 'Sr-88', 'Ba-138', 'Pb-208'] # depends on [control=['if'], data=[]]
elif what_list is 'burn_stages':
list_to_print = ['H-1', 'He-4', 'C-12', 'O-16', 'Ne-20', 'Si-28'] # depends on [control=['if'], data=[]]
elif what_list is 'list_marco_1':
list_to_print = ['C-12', 'O-16', 'Ne-20', 'Ne-22', 'Na-23', 'Fe-54', 'Fe-56', 'Zn-70', 'Ge-70', 'Se-76', 'Kr-80', 'Kr-82', 'Sr-88', 'Y-89', 'Zr-96', 'Te-124', 'Xe-130', 'Xe-134', 'Ba-138'] # depends on [control=['if'], data=[]]
return list_to_print |
async def async_set_state(self, data):
"""Recall scene to group."""
field = self._deconz_id + '/recall'
await self._async_set_state_callback(field, data) | <ast.AsyncFunctionDef object at 0x7da1b05917b0> | keyword[async] keyword[def] identifier[async_set_state] ( identifier[self] , identifier[data] ):
literal[string]
identifier[field] = identifier[self] . identifier[_deconz_id] + literal[string]
keyword[await] identifier[self] . identifier[_async_set_state_callback] ( identifier[field] , identifier[data] ) | async def async_set_state(self, data):
"""Recall scene to group."""
field = self._deconz_id + '/recall'
await self._async_set_state_callback(field, data) |
def from_pandas(cls, index):
"""Create baloo Index from pandas Index.
Parameters
----------
index : pandas.base.Index
Returns
-------
Index
"""
from pandas import Index as PandasIndex
check_type(index, PandasIndex)
return Index(index.values,
index.dtype,
index.name) | def function[from_pandas, parameter[cls, index]]:
constant[Create baloo Index from pandas Index.
Parameters
----------
index : pandas.base.Index
Returns
-------
Index
]
from relative_module[pandas] import module[Index]
call[name[check_type], parameter[name[index], name[PandasIndex]]]
return[call[name[Index], parameter[name[index].values, name[index].dtype, name[index].name]]] | keyword[def] identifier[from_pandas] ( identifier[cls] , identifier[index] ):
literal[string]
keyword[from] identifier[pandas] keyword[import] identifier[Index] keyword[as] identifier[PandasIndex]
identifier[check_type] ( identifier[index] , identifier[PandasIndex] )
keyword[return] identifier[Index] ( identifier[index] . identifier[values] ,
identifier[index] . identifier[dtype] ,
identifier[index] . identifier[name] ) | def from_pandas(cls, index):
"""Create baloo Index from pandas Index.
Parameters
----------
index : pandas.base.Index
Returns
-------
Index
"""
from pandas import Index as PandasIndex
check_type(index, PandasIndex)
return Index(index.values, index.dtype, index.name) |
def midi_to_note(midi, octave=True, cents=False):
'''Convert one or more MIDI numbers to note strings.
MIDI numbers will be rounded to the nearest integer.
Notes will be of the format 'C0', 'C#0', 'D0', ...
Examples
--------
>>> librosa.midi_to_note(0)
'C-1'
>>> librosa.midi_to_note(37)
'C#2'
>>> librosa.midi_to_note(-2)
'A#-2'
>>> librosa.midi_to_note(104.7)
'A7'
>>> librosa.midi_to_note(104.7, cents=True)
'A7-30'
>>> librosa.midi_to_note(list(range(12, 24)))
['C0', 'C#0', 'D0', 'D#0', 'E0', 'F0', 'F#0', 'G0', 'G#0', 'A0', 'A#0', 'B0']
Parameters
----------
midi : int or iterable of int
Midi numbers to convert.
octave: bool
If True, include the octave number
cents: bool
If true, cent markers will be appended for fractional notes.
Eg, `midi_to_note(69.3, cents=True)` == `A4+03`
Returns
-------
notes : str or iterable of str
Strings describing each midi note.
Raises
------
ParameterError
if `cents` is True and `octave` is False
See Also
--------
midi_to_hz
note_to_midi
hz_to_note
'''
if cents and not octave:
raise ParameterError('Cannot encode cents without octave information.')
if not np.isscalar(midi):
return [midi_to_note(x, octave=octave, cents=cents) for x in midi]
note_map = ['C', 'C#', 'D', 'D#',
'E', 'F', 'F#', 'G',
'G#', 'A', 'A#', 'B']
note_num = int(np.round(midi))
note_cents = int(100 * np.around(midi - note_num, 2))
note = note_map[note_num % 12]
if octave:
note = '{:s}{:0d}'.format(note, int(note_num / 12) - 1)
if cents:
note = '{:s}{:+02d}'.format(note, note_cents)
return note | def function[midi_to_note, parameter[midi, octave, cents]]:
constant[Convert one or more MIDI numbers to note strings.
MIDI numbers will be rounded to the nearest integer.
Notes will be of the format 'C0', 'C#0', 'D0', ...
Examples
--------
>>> librosa.midi_to_note(0)
'C-1'
>>> librosa.midi_to_note(37)
'C#2'
>>> librosa.midi_to_note(-2)
'A#-2'
>>> librosa.midi_to_note(104.7)
'A7'
>>> librosa.midi_to_note(104.7, cents=True)
'A7-30'
>>> librosa.midi_to_note(list(range(12, 24)))
['C0', 'C#0', 'D0', 'D#0', 'E0', 'F0', 'F#0', 'G0', 'G#0', 'A0', 'A#0', 'B0']
Parameters
----------
midi : int or iterable of int
Midi numbers to convert.
octave: bool
If True, include the octave number
cents: bool
If true, cent markers will be appended for fractional notes.
Eg, `midi_to_note(69.3, cents=True)` == `A4+03`
Returns
-------
notes : str or iterable of str
Strings describing each midi note.
Raises
------
ParameterError
if `cents` is True and `octave` is False
See Also
--------
midi_to_hz
note_to_midi
hz_to_note
]
if <ast.BoolOp object at 0x7da1b2344c10> begin[:]
<ast.Raise object at 0x7da1b2344250>
if <ast.UnaryOp object at 0x7da1b2345cc0> begin[:]
return[<ast.ListComp object at 0x7da1b2344f40>]
variable[note_map] assign[=] list[[<ast.Constant object at 0x7da1b2344370>, <ast.Constant object at 0x7da1b2344730>, <ast.Constant object at 0x7da1b2347dc0>, <ast.Constant object at 0x7da1b2345180>, <ast.Constant object at 0x7da1b2347730>, <ast.Constant object at 0x7da1b2344ac0>, <ast.Constant object at 0x7da1b2344520>, <ast.Constant object at 0x7da1b2345ba0>, <ast.Constant object at 0x7da1b23479a0>, <ast.Constant object at 0x7da1b2345420>, <ast.Constant object at 0x7da1b23458a0>, <ast.Constant object at 0x7da1b23471f0>]]
variable[note_num] assign[=] call[name[int], parameter[call[name[np].round, parameter[name[midi]]]]]
variable[note_cents] assign[=] call[name[int], parameter[binary_operation[constant[100] * call[name[np].around, parameter[binary_operation[name[midi] - name[note_num]], constant[2]]]]]]
variable[note] assign[=] call[name[note_map]][binary_operation[name[note_num] <ast.Mod object at 0x7da2590d6920> constant[12]]]
if name[octave] begin[:]
variable[note] assign[=] call[constant[{:s}{:0d}].format, parameter[name[note], binary_operation[call[name[int], parameter[binary_operation[name[note_num] / constant[12]]]] - constant[1]]]]
if name[cents] begin[:]
variable[note] assign[=] call[constant[{:s}{:+02d}].format, parameter[name[note], name[note_cents]]]
return[name[note]] | keyword[def] identifier[midi_to_note] ( identifier[midi] , identifier[octave] = keyword[True] , identifier[cents] = keyword[False] ):
literal[string]
keyword[if] identifier[cents] keyword[and] keyword[not] identifier[octave] :
keyword[raise] identifier[ParameterError] ( literal[string] )
keyword[if] keyword[not] identifier[np] . identifier[isscalar] ( identifier[midi] ):
keyword[return] [ identifier[midi_to_note] ( identifier[x] , identifier[octave] = identifier[octave] , identifier[cents] = identifier[cents] ) keyword[for] identifier[x] keyword[in] identifier[midi] ]
identifier[note_map] =[ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[note_num] = identifier[int] ( identifier[np] . identifier[round] ( identifier[midi] ))
identifier[note_cents] = identifier[int] ( literal[int] * identifier[np] . identifier[around] ( identifier[midi] - identifier[note_num] , literal[int] ))
identifier[note] = identifier[note_map] [ identifier[note_num] % literal[int] ]
keyword[if] identifier[octave] :
identifier[note] = literal[string] . identifier[format] ( identifier[note] , identifier[int] ( identifier[note_num] / literal[int] )- literal[int] )
keyword[if] identifier[cents] :
identifier[note] = literal[string] . identifier[format] ( identifier[note] , identifier[note_cents] )
keyword[return] identifier[note] | def midi_to_note(midi, octave=True, cents=False):
"""Convert one or more MIDI numbers to note strings.
MIDI numbers will be rounded to the nearest integer.
Notes will be of the format 'C0', 'C#0', 'D0', ...
Examples
--------
>>> librosa.midi_to_note(0)
'C-1'
>>> librosa.midi_to_note(37)
'C#2'
>>> librosa.midi_to_note(-2)
'A#-2'
>>> librosa.midi_to_note(104.7)
'A7'
>>> librosa.midi_to_note(104.7, cents=True)
'A7-30'
>>> librosa.midi_to_note(list(range(12, 24)))
['C0', 'C#0', 'D0', 'D#0', 'E0', 'F0', 'F#0', 'G0', 'G#0', 'A0', 'A#0', 'B0']
Parameters
----------
midi : int or iterable of int
Midi numbers to convert.
octave: bool
If True, include the octave number
cents: bool
If true, cent markers will be appended for fractional notes.
Eg, `midi_to_note(69.3, cents=True)` == `A4+03`
Returns
-------
notes : str or iterable of str
Strings describing each midi note.
Raises
------
ParameterError
if `cents` is True and `octave` is False
See Also
--------
midi_to_hz
note_to_midi
hz_to_note
"""
if cents and (not octave):
raise ParameterError('Cannot encode cents without octave information.') # depends on [control=['if'], data=[]]
if not np.isscalar(midi):
return [midi_to_note(x, octave=octave, cents=cents) for x in midi] # depends on [control=['if'], data=[]]
note_map = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
note_num = int(np.round(midi))
note_cents = int(100 * np.around(midi - note_num, 2))
note = note_map[note_num % 12]
if octave:
note = '{:s}{:0d}'.format(note, int(note_num / 12) - 1) # depends on [control=['if'], data=[]]
if cents:
note = '{:s}{:+02d}'.format(note, note_cents) # depends on [control=['if'], data=[]]
return note |
def exec_prog(args):
"""Run a subprocess, check for .OK and raise error if does not exist.
args: list of arguments, for value is the command to execute.
"""
program_name = args[0]
logging.info(" ".join(args))
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
if not os.access(program_name+".OK", os.F_OK):
logging.error("No {}.OK file?".format(program_name))
raise subprocess.CalledProcessError(-1, ' '.join(args), output)
os.unlink(program_name+".OK")
if os.access(program_name+".FAILED", os.F_OK):
os.unlink(program_name+".FAILED")
return output | def function[exec_prog, parameter[args]]:
constant[Run a subprocess, check for .OK and raise error if does not exist.
args: list of arguments, for value is the command to execute.
]
variable[program_name] assign[=] call[name[args]][constant[0]]
call[name[logging].info, parameter[call[constant[ ].join, parameter[name[args]]]]]
variable[output] assign[=] call[name[subprocess].check_output, parameter[name[args]]]
if <ast.UnaryOp object at 0x7da1b198c040> begin[:]
call[name[logging].error, parameter[call[constant[No {}.OK file?].format, parameter[name[program_name]]]]]
<ast.Raise object at 0x7da1b1933a30>
call[name[os].unlink, parameter[binary_operation[name[program_name] + constant[.OK]]]]
if call[name[os].access, parameter[binary_operation[name[program_name] + constant[.FAILED]], name[os].F_OK]] begin[:]
call[name[os].unlink, parameter[binary_operation[name[program_name] + constant[.FAILED]]]]
return[name[output]] | keyword[def] identifier[exec_prog] ( identifier[args] ):
literal[string]
identifier[program_name] = identifier[args] [ literal[int] ]
identifier[logging] . identifier[info] ( literal[string] . identifier[join] ( identifier[args] ))
identifier[output] = identifier[subprocess] . identifier[check_output] ( identifier[args] , identifier[stderr] = identifier[subprocess] . identifier[STDOUT] )
keyword[if] keyword[not] identifier[os] . identifier[access] ( identifier[program_name] + literal[string] , identifier[os] . identifier[F_OK] ):
identifier[logging] . identifier[error] ( literal[string] . identifier[format] ( identifier[program_name] ))
keyword[raise] identifier[subprocess] . identifier[CalledProcessError] (- literal[int] , literal[string] . identifier[join] ( identifier[args] ), identifier[output] )
identifier[os] . identifier[unlink] ( identifier[program_name] + literal[string] )
keyword[if] identifier[os] . identifier[access] ( identifier[program_name] + literal[string] , identifier[os] . identifier[F_OK] ):
identifier[os] . identifier[unlink] ( identifier[program_name] + literal[string] )
keyword[return] identifier[output] | def exec_prog(args):
"""Run a subprocess, check for .OK and raise error if does not exist.
args: list of arguments, for value is the command to execute.
"""
program_name = args[0]
logging.info(' '.join(args))
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
if not os.access(program_name + '.OK', os.F_OK):
logging.error('No {}.OK file?'.format(program_name))
raise subprocess.CalledProcessError(-1, ' '.join(args), output) # depends on [control=['if'], data=[]]
os.unlink(program_name + '.OK')
if os.access(program_name + '.FAILED', os.F_OK):
os.unlink(program_name + '.FAILED') # depends on [control=['if'], data=[]]
return output |
def task(*args, **kwargs):
'''Register a function as a task, as well as applying any attributes. '''
# support @task
if args and hasattr(args[0], '__call__'):
return _taskify(args[0])
# as well as @task(), @task('default'), etc.
else:
def wrapper(func):
global DEFAULT, SETUP, TEARDOWN
func = _taskify(func)
if 'default' in args:
DEFAULT = func
if 'setup' in args:
SETUP = func
if 'teardown' in args:
TEARDOWN = func
if 'private' in args:
TASKS.remove(func)
if 'method' in args:
func.method = True
func.args = func.args[1:]
if 'consume' in args:
func.consume = True
if 'reqs' in kwargs:
func.reqs = _tuplify(kwargs['reqs'])
func.file_reqs = [req for req in func.reqs if type(req) is str]
func.task_reqs = [req for req in func.reqs if type(req) is not str]
if 'gens' in kwargs:
func.gens = kwargs['gens']
GENERATES[kwargs['gens']] = func
if 'alias' in kwargs:
func.aliases = _tuplify(kwargs['alias'])
if 'namespace' in kwargs:
func.ns = kwargs['namespace'] + '.' if kwargs['namespace'] else ''
return func
return wrapper | def function[task, parameter[]]:
constant[Register a function as a task, as well as applying any attributes. ]
if <ast.BoolOp object at 0x7da18dc05870> begin[:]
return[call[name[_taskify], parameter[call[name[args]][constant[0]]]]] | keyword[def] identifier[task] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[args] keyword[and] identifier[hasattr] ( identifier[args] [ literal[int] ], literal[string] ):
keyword[return] identifier[_taskify] ( identifier[args] [ literal[int] ])
keyword[else] :
keyword[def] identifier[wrapper] ( identifier[func] ):
keyword[global] identifier[DEFAULT] , identifier[SETUP] , identifier[TEARDOWN]
identifier[func] = identifier[_taskify] ( identifier[func] )
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[DEFAULT] = identifier[func]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[SETUP] = identifier[func]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[TEARDOWN] = identifier[func]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[TASKS] . identifier[remove] ( identifier[func] )
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[func] . identifier[method] = keyword[True]
identifier[func] . identifier[args] = identifier[func] . identifier[args] [ literal[int] :]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[func] . identifier[consume] = keyword[True]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[func] . identifier[reqs] = identifier[_tuplify] ( identifier[kwargs] [ literal[string] ])
identifier[func] . identifier[file_reqs] =[ identifier[req] keyword[for] identifier[req] keyword[in] identifier[func] . identifier[reqs] keyword[if] identifier[type] ( identifier[req] ) keyword[is] identifier[str] ]
identifier[func] . identifier[task_reqs] =[ identifier[req] keyword[for] identifier[req] keyword[in] identifier[func] . identifier[reqs] keyword[if] identifier[type] ( identifier[req] ) keyword[is] keyword[not] identifier[str] ]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[func] . identifier[gens] = identifier[kwargs] [ literal[string] ]
identifier[GENERATES] [ identifier[kwargs] [ literal[string] ]]= identifier[func]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[func] . identifier[aliases] = identifier[_tuplify] ( identifier[kwargs] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[func] . identifier[ns] = identifier[kwargs] [ literal[string] ]+ literal[string] keyword[if] identifier[kwargs] [ literal[string] ] keyword[else] literal[string]
keyword[return] identifier[func]
keyword[return] identifier[wrapper] | def task(*args, **kwargs):
"""Register a function as a task, as well as applying any attributes. """ # support @task
if args and hasattr(args[0], '__call__'):
return _taskify(args[0]) # depends on [control=['if'], data=[]]
else: # as well as @task(), @task('default'), etc.
def wrapper(func):
global DEFAULT, SETUP, TEARDOWN
func = _taskify(func)
if 'default' in args:
DEFAULT = func # depends on [control=['if'], data=[]]
if 'setup' in args:
SETUP = func # depends on [control=['if'], data=[]]
if 'teardown' in args:
TEARDOWN = func # depends on [control=['if'], data=[]]
if 'private' in args:
TASKS.remove(func) # depends on [control=['if'], data=[]]
if 'method' in args:
func.method = True
func.args = func.args[1:] # depends on [control=['if'], data=[]]
if 'consume' in args:
func.consume = True # depends on [control=['if'], data=[]]
if 'reqs' in kwargs:
func.reqs = _tuplify(kwargs['reqs'])
func.file_reqs = [req for req in func.reqs if type(req) is str]
func.task_reqs = [req for req in func.reqs if type(req) is not str] # depends on [control=['if'], data=['kwargs']]
if 'gens' in kwargs:
func.gens = kwargs['gens']
GENERATES[kwargs['gens']] = func # depends on [control=['if'], data=['kwargs']]
if 'alias' in kwargs:
func.aliases = _tuplify(kwargs['alias']) # depends on [control=['if'], data=['kwargs']]
if 'namespace' in kwargs:
func.ns = kwargs['namespace'] + '.' if kwargs['namespace'] else '' # depends on [control=['if'], data=['kwargs']]
return func
return wrapper |
def cookies(self) -> Dict[str, http.cookies.Morsel]:
"""A dictionary of ``http.cookies.Morsel`` objects."""
if not hasattr(self, "_cookies"):
self._cookies = http.cookies.SimpleCookie()
if "Cookie" in self.headers:
try:
parsed = parse_cookie(self.headers["Cookie"])
except Exception:
pass
else:
for k, v in parsed.items():
try:
self._cookies[k] = v
except Exception:
# SimpleCookie imposes some restrictions on keys;
# parse_cookie does not. Discard any cookies
# with disallowed keys.
pass
return self._cookies | def function[cookies, parameter[self]]:
constant[A dictionary of ``http.cookies.Morsel`` objects.]
if <ast.UnaryOp object at 0x7da1b1f2f460> begin[:]
name[self]._cookies assign[=] call[name[http].cookies.SimpleCookie, parameter[]]
if compare[constant[Cookie] in name[self].headers] begin[:]
<ast.Try object at 0x7da1b1f2de40>
return[name[self]._cookies] | keyword[def] identifier[cookies] ( identifier[self] )-> identifier[Dict] [ identifier[str] , identifier[http] . identifier[cookies] . identifier[Morsel] ]:
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_cookies] = identifier[http] . identifier[cookies] . identifier[SimpleCookie] ()
keyword[if] literal[string] keyword[in] identifier[self] . identifier[headers] :
keyword[try] :
identifier[parsed] = identifier[parse_cookie] ( identifier[self] . identifier[headers] [ literal[string] ])
keyword[except] identifier[Exception] :
keyword[pass]
keyword[else] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[parsed] . identifier[items] ():
keyword[try] :
identifier[self] . identifier[_cookies] [ identifier[k] ]= identifier[v]
keyword[except] identifier[Exception] :
keyword[pass]
keyword[return] identifier[self] . identifier[_cookies] | def cookies(self) -> Dict[str, http.cookies.Morsel]:
"""A dictionary of ``http.cookies.Morsel`` objects."""
if not hasattr(self, '_cookies'):
self._cookies = http.cookies.SimpleCookie()
if 'Cookie' in self.headers:
try:
parsed = parse_cookie(self.headers['Cookie']) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
else:
for (k, v) in parsed.items():
try:
self._cookies[k] = v # depends on [control=['try'], data=[]]
except Exception:
# SimpleCookie imposes some restrictions on keys;
# parse_cookie does not. Discard any cookies
# with disallowed keys.
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self._cookies |
def get_meta(cls):
"""
Collect all members of any contained :code:`Meta` class declarations from the given class or any of its base classes.
(Sub class values take precedence.)
:type cls: class
:rtype: Struct
"""
merged_attributes = Struct()
for class_ in reversed(cls.mro()):
if hasattr(class_, 'Meta'):
for key, value in class_.Meta.__dict__.items():
merged_attributes[key] = value
return merged_attributes | def function[get_meta, parameter[cls]]:
constant[
Collect all members of any contained :code:`Meta` class declarations from the given class or any of its base classes.
(Sub class values take precedence.)
:type cls: class
:rtype: Struct
]
variable[merged_attributes] assign[=] call[name[Struct], parameter[]]
for taget[name[class_]] in starred[call[name[reversed], parameter[call[name[cls].mro, parameter[]]]]] begin[:]
if call[name[hasattr], parameter[name[class_], constant[Meta]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c6e7f70>, <ast.Name object at 0x7da20c6e4a00>]]] in starred[call[name[class_].Meta.__dict__.items, parameter[]]] begin[:]
call[name[merged_attributes]][name[key]] assign[=] name[value]
return[name[merged_attributes]] | keyword[def] identifier[get_meta] ( identifier[cls] ):
literal[string]
identifier[merged_attributes] = identifier[Struct] ()
keyword[for] identifier[class_] keyword[in] identifier[reversed] ( identifier[cls] . identifier[mro] ()):
keyword[if] identifier[hasattr] ( identifier[class_] , literal[string] ):
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[class_] . identifier[Meta] . identifier[__dict__] . identifier[items] ():
identifier[merged_attributes] [ identifier[key] ]= identifier[value]
keyword[return] identifier[merged_attributes] | def get_meta(cls):
"""
Collect all members of any contained :code:`Meta` class declarations from the given class or any of its base classes.
(Sub class values take precedence.)
:type cls: class
:rtype: Struct
"""
merged_attributes = Struct()
for class_ in reversed(cls.mro()):
if hasattr(class_, 'Meta'):
for (key, value) in class_.Meta.__dict__.items():
merged_attributes[key] = value # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['class_']]
return merged_attributes |
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
) | def function[distros_for_filename, parameter[filename, metadata]]:
constant[Yield possible egg or source distribution objects based on a filename]
return[call[name[distros_for_location], parameter[call[name[normalize_path], parameter[name[filename]]], call[name[os].path.basename, parameter[name[filename]]], name[metadata]]]] | keyword[def] identifier[distros_for_filename] ( identifier[filename] , identifier[metadata] = keyword[None] ):
literal[string]
keyword[return] identifier[distros_for_location] (
identifier[normalize_path] ( identifier[filename] ), identifier[os] . identifier[path] . identifier[basename] ( identifier[filename] ), identifier[metadata]
) | def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(normalize_path(filename), os.path.basename(filename), metadata) |
def get_vis_data_from_string(self, sess, input_string):
"""Constructs the data needed for visualizing attentions.
Args:
sess: A tf.Session object.
input_string: The input sentence to be translated and visualized.
Returns:
Tuple of (
output_string: The translated sentence.
input_list: Tokenized input sentence.
output_list: Tokenized translation.
att_mats: Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
"""
encoded_inputs = self.encode(input_string)
# Run inference graph to get the translation.
out = sess.run(self.samples, {
self.inputs: encoded_inputs,
})
# Run the decoded translation through the training graph to get the
# attention tensors.
att_mats = sess.run(self.att_mats, {
self.inputs: encoded_inputs,
self.targets: np.reshape(out, [1, -1, 1, 1]),
})
output_string = self.decode(out)
input_list = self.decode_list(encoded_inputs)
output_list = self.decode_list(out)
return output_string, input_list, output_list, att_mats | def function[get_vis_data_from_string, parameter[self, sess, input_string]]:
constant[Constructs the data needed for visualizing attentions.
Args:
sess: A tf.Session object.
input_string: The input sentence to be translated and visualized.
Returns:
Tuple of (
output_string: The translated sentence.
input_list: Tokenized input sentence.
output_list: Tokenized translation.
att_mats: Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
]
variable[encoded_inputs] assign[=] call[name[self].encode, parameter[name[input_string]]]
variable[out] assign[=] call[name[sess].run, parameter[name[self].samples, dictionary[[<ast.Attribute object at 0x7da1b20f9720>], [<ast.Name object at 0x7da1b20f8040>]]]]
variable[att_mats] assign[=] call[name[sess].run, parameter[name[self].att_mats, dictionary[[<ast.Attribute object at 0x7da1b20fba60>, <ast.Attribute object at 0x7da1b20f9ea0>], [<ast.Name object at 0x7da1b20fbe80>, <ast.Call object at 0x7da1b20f9750>]]]]
variable[output_string] assign[=] call[name[self].decode, parameter[name[out]]]
variable[input_list] assign[=] call[name[self].decode_list, parameter[name[encoded_inputs]]]
variable[output_list] assign[=] call[name[self].decode_list, parameter[name[out]]]
return[tuple[[<ast.Name object at 0x7da1b2098b50>, <ast.Name object at 0x7da1b2099f90>, <ast.Name object at 0x7da1b2098eb0>, <ast.Name object at 0x7da1b209a740>]]] | keyword[def] identifier[get_vis_data_from_string] ( identifier[self] , identifier[sess] , identifier[input_string] ):
literal[string]
identifier[encoded_inputs] = identifier[self] . identifier[encode] ( identifier[input_string] )
identifier[out] = identifier[sess] . identifier[run] ( identifier[self] . identifier[samples] ,{
identifier[self] . identifier[inputs] : identifier[encoded_inputs] ,
})
identifier[att_mats] = identifier[sess] . identifier[run] ( identifier[self] . identifier[att_mats] ,{
identifier[self] . identifier[inputs] : identifier[encoded_inputs] ,
identifier[self] . identifier[targets] : identifier[np] . identifier[reshape] ( identifier[out] ,[ literal[int] ,- literal[int] , literal[int] , literal[int] ]),
})
identifier[output_string] = identifier[self] . identifier[decode] ( identifier[out] )
identifier[input_list] = identifier[self] . identifier[decode_list] ( identifier[encoded_inputs] )
identifier[output_list] = identifier[self] . identifier[decode_list] ( identifier[out] )
keyword[return] identifier[output_string] , identifier[input_list] , identifier[output_list] , identifier[att_mats] | def get_vis_data_from_string(self, sess, input_string):
"""Constructs the data needed for visualizing attentions.
Args:
sess: A tf.Session object.
input_string: The input sentence to be translated and visualized.
Returns:
Tuple of (
output_string: The translated sentence.
input_list: Tokenized input sentence.
output_list: Tokenized translation.
att_mats: Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
"""
encoded_inputs = self.encode(input_string)
# Run inference graph to get the translation.
out = sess.run(self.samples, {self.inputs: encoded_inputs})
# Run the decoded translation through the training graph to get the
# attention tensors.
att_mats = sess.run(self.att_mats, {self.inputs: encoded_inputs, self.targets: np.reshape(out, [1, -1, 1, 1])})
output_string = self.decode(out)
input_list = self.decode_list(encoded_inputs)
output_list = self.decode_list(out)
return (output_string, input_list, output_list, att_mats) |
def most_visited_pages_charts():
"""Chart for most visited pages."""
stats = most_visited_pages_stats()
charts = []
for i, stat in enumerate(stats['more_than_10']):
bound = stat['bound']
subset = stat['subset']
chart_options = {
'chart': {
'type': 'bar',
'height': 15 * len(subset) + 100
},
'title': {
'text': {0: _('More than %d times') % bound}.get(
i, _('Between %d and %d times') % (
bound, stats['more_than_10'][i - 1]['bound']))
},
'xAxis': {
'categories': [u for (u, c, t) in subset],
'title': {
'text': None
}
},
'yAxis': {
'title': {
'text': None
}
},
'plotOptions': {
'bar': {
'dataLabels': {
'enabled': True
}
},
},
'tooltip': {
'enabled': False
},
'legend': {
'enabled': False
},
'credits': {
'enabled': False
},
}
series_data = []
for index, (url, count, url_type) in enumerate(subset):
data = {
'x': index,
'y': count
}
color = URL_TYPE_COLOR[url_type]
data['color'] = color
series_data.append(data)
chart_options['series'] = [{
'name': _('Requests'),
'data': series_data
}]
charts.append(chart_options)
point_formatter_code = """
return '<br>%s: <strong>' + this.dis + '</strong>(' +
Highcharts.numberFormat(this.dis / this.total_dis * 100, 1) + '%%)' +
'<br>%s: <strong>' + this.occ + '</strong> (' +
Highcharts.numberFormat(this.occ / this.total_occ * 100, 1) + '%%)';
""" % (_('Distinct URLs'), _('Occurrences'))
occurrences = stats['less_than_10']
total_distinct = sum([v['distinct'] for k, v in occurrences.items()])
total_occurrences = sum([v['total'] for k, v in occurrences.items()])
charts.append({
'chart': {
'plotBackgroundColor': None,
'plotBorderWidth': None,
'plotShadow': False,
'type': 'pie'
},
'title': {
'text': _('Less than 10 (type repartition)')
},
'plotOptions': {
'pie': {
'allowPointSelect': True,
'cursor': 'pointer',
'dataLabels': {
'enabled': False
},
'showInLegend': True,
'tooltip': {
'pointFormatter': point_formatter_code
},
}
},
'series': [{
'name': '',
'colorByPoint': True,
'data': [{
'name': _('Valid project URL'),
'dis': occurrences[PROJECT]['distinct'],
'y': occurrences[PROJECT]['total'],
'occ': occurrences[PROJECT]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[PROJECT]
}, {
'name': _('Old project URL'),
'dis': occurrences[OLD_PROJECT]['distinct'],
'y': occurrences[OLD_PROJECT]['total'],
'occ': occurrences[OLD_PROJECT]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[OLD_PROJECT]
}, {
'name': _('Valid asset URL'),
'dis': occurrences[ASSET]['distinct'],
'y': occurrences[ASSET]['total'],
'occ': occurrences[ASSET]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[ASSET]
}, {
'name': _('Old asset URL'),
'dis': occurrences[OLD_ASSET]['distinct'],
'y': occurrences[OLD_ASSET]['total'],
'occ': occurrences[OLD_ASSET]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[OLD_ASSET]
}, {
'name': _('Common asset URL'),
'dis': occurrences[COMMON_ASSET]['distinct'],
'y': occurrences[COMMON_ASSET]['total'],
'occ': occurrences[COMMON_ASSET]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[COMMON_ASSET]
}, {
'name': _('False-negative project URL'),
'dis': occurrences[FALSE_NEGATIVE]['distinct'],
'y': occurrences[FALSE_NEGATIVE]['total'],
'occ': occurrences[FALSE_NEGATIVE]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[FALSE_NEGATIVE]
}, {
'name': _('Suspicious URL (potential attack)'),
'dis': occurrences[SUSPICIOUS]['distinct'],
'y': occurrences[SUSPICIOUS]['total'],
'occ': occurrences[SUSPICIOUS]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[SUSPICIOUS]
}]
}]
})
return charts | def function[most_visited_pages_charts, parameter[]]:
constant[Chart for most visited pages.]
variable[stats] assign[=] call[name[most_visited_pages_stats], parameter[]]
variable[charts] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b2438af0>, <ast.Name object at 0x7da1b2438a90>]]] in starred[call[name[enumerate], parameter[call[name[stats]][constant[more_than_10]]]]] begin[:]
variable[bound] assign[=] call[name[stat]][constant[bound]]
variable[subset] assign[=] call[name[stat]][constant[subset]]
variable[chart_options] assign[=] dictionary[[<ast.Constant object at 0x7da18bc721d0>, <ast.Constant object at 0x7da18bc700a0>, <ast.Constant object at 0x7da18bc73850>, <ast.Constant object at 0x7da18bc72920>, <ast.Constant object at 0x7da18bc70f70>, <ast.Constant object at 0x7da18bc71360>, <ast.Constant object at 0x7da18bc706a0>, <ast.Constant object at 0x7da18bc71fc0>], [<ast.Dict object at 0x7da18bc72e60>, <ast.Dict object at 0x7da18bc73970>, <ast.Dict object at 0x7da1b25db7f0>, <ast.Dict object at 0x7da1b25dabc0>, <ast.Dict object at 0x7da1b25db6d0>, <ast.Dict object at 0x7da1b25d84f0>, <ast.Dict object at 0x7da1b25d9c60>, <ast.Dict object at 0x7da1b25db490>]]
variable[series_data] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b25d8d60>, <ast.Tuple object at 0x7da1b25d93f0>]]] in starred[call[name[enumerate], parameter[name[subset]]]] begin[:]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b25d9660>, <ast.Constant object at 0x7da1b25da2f0>], [<ast.Name object at 0x7da1b25da6b0>, <ast.Name object at 0x7da1b25da800>]]
variable[color] assign[=] call[name[URL_TYPE_COLOR]][name[url_type]]
call[name[data]][constant[color]] assign[=] name[color]
call[name[series_data].append, parameter[name[data]]]
call[name[chart_options]][constant[series]] assign[=] list[[<ast.Dict object at 0x7da1b25db5b0>]]
call[name[charts].append, parameter[name[chart_options]]]
variable[point_formatter_code] assign[=] binary_operation[constant[
return '<br>%s: <strong>' + this.dis + '</strong>(' +
Highcharts.numberFormat(this.dis / this.total_dis * 100, 1) + '%%)' +
'<br>%s: <strong>' + this.occ + '</strong> (' +
Highcharts.numberFormat(this.occ / this.total_occ * 100, 1) + '%%)';
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b2589660>, <ast.Call object at 0x7da1b2589540>]]]
variable[occurrences] assign[=] call[name[stats]][constant[less_than_10]]
variable[total_distinct] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da20c6aa890>]]
variable[total_occurrences] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da20c6a8040>]]
call[name[charts].append, parameter[dictionary[[<ast.Constant object at 0x7da20c6aa050>, <ast.Constant object at 0x7da20c6a9ff0>, <ast.Constant object at 0x7da20c6ab6d0>, <ast.Constant object at 0x7da20c6a97e0>], [<ast.Dict object at 0x7da20c6ab340>, <ast.Dict object at 0x7da20c6aac20>, <ast.Dict object at 0x7da1b251d210>, <ast.List object at 0x7da1b251f2b0>]]]]
return[name[charts]] | keyword[def] identifier[most_visited_pages_charts] ():
literal[string]
identifier[stats] = identifier[most_visited_pages_stats] ()
identifier[charts] =[]
keyword[for] identifier[i] , identifier[stat] keyword[in] identifier[enumerate] ( identifier[stats] [ literal[string] ]):
identifier[bound] = identifier[stat] [ literal[string] ]
identifier[subset] = identifier[stat] [ literal[string] ]
identifier[chart_options] ={
literal[string] :{
literal[string] : literal[string] ,
literal[string] : literal[int] * identifier[len] ( identifier[subset] )+ literal[int]
},
literal[string] :{
literal[string] :{ literal[int] : identifier[_] ( literal[string] )% identifier[bound] }. identifier[get] (
identifier[i] , identifier[_] ( literal[string] )%(
identifier[bound] , identifier[stats] [ literal[string] ][ identifier[i] - literal[int] ][ literal[string] ]))
},
literal[string] :{
literal[string] :[ identifier[u] keyword[for] ( identifier[u] , identifier[c] , identifier[t] ) keyword[in] identifier[subset] ],
literal[string] :{
literal[string] : keyword[None]
}
},
literal[string] :{
literal[string] :{
literal[string] : keyword[None]
}
},
literal[string] :{
literal[string] :{
literal[string] :{
literal[string] : keyword[True]
}
},
},
literal[string] :{
literal[string] : keyword[False]
},
literal[string] :{
literal[string] : keyword[False]
},
literal[string] :{
literal[string] : keyword[False]
},
}
identifier[series_data] =[]
keyword[for] identifier[index] ,( identifier[url] , identifier[count] , identifier[url_type] ) keyword[in] identifier[enumerate] ( identifier[subset] ):
identifier[data] ={
literal[string] : identifier[index] ,
literal[string] : identifier[count]
}
identifier[color] = identifier[URL_TYPE_COLOR] [ identifier[url_type] ]
identifier[data] [ literal[string] ]= identifier[color]
identifier[series_data] . identifier[append] ( identifier[data] )
identifier[chart_options] [ literal[string] ]=[{
literal[string] : identifier[_] ( literal[string] ),
literal[string] : identifier[series_data]
}]
identifier[charts] . identifier[append] ( identifier[chart_options] )
identifier[point_formatter_code] = literal[string] %( identifier[_] ( literal[string] ), identifier[_] ( literal[string] ))
identifier[occurrences] = identifier[stats] [ literal[string] ]
identifier[total_distinct] = identifier[sum] ([ identifier[v] [ literal[string] ] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[occurrences] . identifier[items] ()])
identifier[total_occurrences] = identifier[sum] ([ identifier[v] [ literal[string] ] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[occurrences] . identifier[items] ()])
identifier[charts] . identifier[append] ({
literal[string] :{
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[False] ,
literal[string] : literal[string]
},
literal[string] :{
literal[string] : identifier[_] ( literal[string] )
},
literal[string] :{
literal[string] :{
literal[string] : keyword[True] ,
literal[string] : literal[string] ,
literal[string] :{
literal[string] : keyword[False]
},
literal[string] : keyword[True] ,
literal[string] :{
literal[string] : identifier[point_formatter_code]
},
}
},
literal[string] :[{
literal[string] : literal[string] ,
literal[string] : keyword[True] ,
literal[string] :[{
literal[string] : identifier[_] ( literal[string] ),
literal[string] : identifier[occurrences] [ identifier[PROJECT] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[PROJECT] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[PROJECT] ][ literal[string] ],
literal[string] : identifier[total_distinct] ,
literal[string] : identifier[total_occurrences] ,
literal[string] : identifier[URL_TYPE_COLOR] [ identifier[PROJECT] ]
},{
literal[string] : identifier[_] ( literal[string] ),
literal[string] : identifier[occurrences] [ identifier[OLD_PROJECT] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[OLD_PROJECT] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[OLD_PROJECT] ][ literal[string] ],
literal[string] : identifier[total_distinct] ,
literal[string] : identifier[total_occurrences] ,
literal[string] : identifier[URL_TYPE_COLOR] [ identifier[OLD_PROJECT] ]
},{
literal[string] : identifier[_] ( literal[string] ),
literal[string] : identifier[occurrences] [ identifier[ASSET] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[ASSET] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[ASSET] ][ literal[string] ],
literal[string] : identifier[total_distinct] ,
literal[string] : identifier[total_occurrences] ,
literal[string] : identifier[URL_TYPE_COLOR] [ identifier[ASSET] ]
},{
literal[string] : identifier[_] ( literal[string] ),
literal[string] : identifier[occurrences] [ identifier[OLD_ASSET] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[OLD_ASSET] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[OLD_ASSET] ][ literal[string] ],
literal[string] : identifier[total_distinct] ,
literal[string] : identifier[total_occurrences] ,
literal[string] : identifier[URL_TYPE_COLOR] [ identifier[OLD_ASSET] ]
},{
literal[string] : identifier[_] ( literal[string] ),
literal[string] : identifier[occurrences] [ identifier[COMMON_ASSET] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[COMMON_ASSET] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[COMMON_ASSET] ][ literal[string] ],
literal[string] : identifier[total_distinct] ,
literal[string] : identifier[total_occurrences] ,
literal[string] : identifier[URL_TYPE_COLOR] [ identifier[COMMON_ASSET] ]
},{
literal[string] : identifier[_] ( literal[string] ),
literal[string] : identifier[occurrences] [ identifier[FALSE_NEGATIVE] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[FALSE_NEGATIVE] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[FALSE_NEGATIVE] ][ literal[string] ],
literal[string] : identifier[total_distinct] ,
literal[string] : identifier[total_occurrences] ,
literal[string] : identifier[URL_TYPE_COLOR] [ identifier[FALSE_NEGATIVE] ]
},{
literal[string] : identifier[_] ( literal[string] ),
literal[string] : identifier[occurrences] [ identifier[SUSPICIOUS] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[SUSPICIOUS] ][ literal[string] ],
literal[string] : identifier[occurrences] [ identifier[SUSPICIOUS] ][ literal[string] ],
literal[string] : identifier[total_distinct] ,
literal[string] : identifier[total_occurrences] ,
literal[string] : identifier[URL_TYPE_COLOR] [ identifier[SUSPICIOUS] ]
}]
}]
})
keyword[return] identifier[charts] | def most_visited_pages_charts():
"""Chart for most visited pages."""
stats = most_visited_pages_stats()
charts = []
for (i, stat) in enumerate(stats['more_than_10']):
bound = stat['bound']
subset = stat['subset']
chart_options = {'chart': {'type': 'bar', 'height': 15 * len(subset) + 100}, 'title': {'text': {0: _('More than %d times') % bound}.get(i, _('Between %d and %d times') % (bound, stats['more_than_10'][i - 1]['bound']))}, 'xAxis': {'categories': [u for (u, c, t) in subset], 'title': {'text': None}}, 'yAxis': {'title': {'text': None}}, 'plotOptions': {'bar': {'dataLabels': {'enabled': True}}}, 'tooltip': {'enabled': False}, 'legend': {'enabled': False}, 'credits': {'enabled': False}}
series_data = []
for (index, (url, count, url_type)) in enumerate(subset):
data = {'x': index, 'y': count}
color = URL_TYPE_COLOR[url_type]
data['color'] = color
series_data.append(data) # depends on [control=['for'], data=[]]
chart_options['series'] = [{'name': _('Requests'), 'data': series_data}]
charts.append(chart_options) # depends on [control=['for'], data=[]]
point_formatter_code = "\n return '<br>%s: <strong>' + this.dis + '</strong>(' +\n Highcharts.numberFormat(this.dis / this.total_dis * 100, 1) + '%%)' +\n '<br>%s: <strong>' + this.occ + '</strong> (' +\n Highcharts.numberFormat(this.occ / this.total_occ * 100, 1) + '%%)';\n " % (_('Distinct URLs'), _('Occurrences'))
occurrences = stats['less_than_10']
total_distinct = sum([v['distinct'] for (k, v) in occurrences.items()])
total_occurrences = sum([v['total'] for (k, v) in occurrences.items()])
charts.append({'chart': {'plotBackgroundColor': None, 'plotBorderWidth': None, 'plotShadow': False, 'type': 'pie'}, 'title': {'text': _('Less than 10 (type repartition)')}, 'plotOptions': {'pie': {'allowPointSelect': True, 'cursor': 'pointer', 'dataLabels': {'enabled': False}, 'showInLegend': True, 'tooltip': {'pointFormatter': point_formatter_code}}}, 'series': [{'name': '', 'colorByPoint': True, 'data': [{'name': _('Valid project URL'), 'dis': occurrences[PROJECT]['distinct'], 'y': occurrences[PROJECT]['total'], 'occ': occurrences[PROJECT]['total'], 'total_dis': total_distinct, 'total_occ': total_occurrences, 'color': URL_TYPE_COLOR[PROJECT]}, {'name': _('Old project URL'), 'dis': occurrences[OLD_PROJECT]['distinct'], 'y': occurrences[OLD_PROJECT]['total'], 'occ': occurrences[OLD_PROJECT]['total'], 'total_dis': total_distinct, 'total_occ': total_occurrences, 'color': URL_TYPE_COLOR[OLD_PROJECT]}, {'name': _('Valid asset URL'), 'dis': occurrences[ASSET]['distinct'], 'y': occurrences[ASSET]['total'], 'occ': occurrences[ASSET]['total'], 'total_dis': total_distinct, 'total_occ': total_occurrences, 'color': URL_TYPE_COLOR[ASSET]}, {'name': _('Old asset URL'), 'dis': occurrences[OLD_ASSET]['distinct'], 'y': occurrences[OLD_ASSET]['total'], 'occ': occurrences[OLD_ASSET]['total'], 'total_dis': total_distinct, 'total_occ': total_occurrences, 'color': URL_TYPE_COLOR[OLD_ASSET]}, {'name': _('Common asset URL'), 'dis': occurrences[COMMON_ASSET]['distinct'], 'y': occurrences[COMMON_ASSET]['total'], 'occ': occurrences[COMMON_ASSET]['total'], 'total_dis': total_distinct, 'total_occ': total_occurrences, 'color': URL_TYPE_COLOR[COMMON_ASSET]}, {'name': _('False-negative project URL'), 'dis': occurrences[FALSE_NEGATIVE]['distinct'], 'y': occurrences[FALSE_NEGATIVE]['total'], 'occ': occurrences[FALSE_NEGATIVE]['total'], 'total_dis': total_distinct, 'total_occ': total_occurrences, 'color': URL_TYPE_COLOR[FALSE_NEGATIVE]}, {'name': _('Suspicious URL (potential attack)'), 'dis': occurrences[SUSPICIOUS]['distinct'], 'y': occurrences[SUSPICIOUS]['total'], 'occ': occurrences[SUSPICIOUS]['total'], 'total_dis': total_distinct, 'total_occ': total_occurrences, 'color': URL_TYPE_COLOR[SUSPICIOUS]}]}]})
return charts |
def collect(self):
"""
Override collect to copy files concurrently. The tasks are populated by
Command.copy_file() which is called by super().collect().
"""
ret = super(Command, self).collect()
if settings.threads:
Pool(settings.threads).map(self.do_copy_file, self.tasks)
return ret | def function[collect, parameter[self]]:
constant[
Override collect to copy files concurrently. The tasks are populated by
Command.copy_file() which is called by super().collect().
]
variable[ret] assign[=] call[call[name[super], parameter[name[Command], name[self]]].collect, parameter[]]
if name[settings].threads begin[:]
call[call[name[Pool], parameter[name[settings].threads]].map, parameter[name[self].do_copy_file, name[self].tasks]]
return[name[ret]] | keyword[def] identifier[collect] ( identifier[self] ):
literal[string]
identifier[ret] = identifier[super] ( identifier[Command] , identifier[self] ). identifier[collect] ()
keyword[if] identifier[settings] . identifier[threads] :
identifier[Pool] ( identifier[settings] . identifier[threads] ). identifier[map] ( identifier[self] . identifier[do_copy_file] , identifier[self] . identifier[tasks] )
keyword[return] identifier[ret] | def collect(self):
"""
Override collect to copy files concurrently. The tasks are populated by
Command.copy_file() which is called by super().collect().
"""
ret = super(Command, self).collect()
if settings.threads:
Pool(settings.threads).map(self.do_copy_file, self.tasks) # depends on [control=['if'], data=[]]
return ret |
def contains_geometric_info(var):
""" Check whether the passed variable is a tuple with two floats or integers """
return isinstance(var, tuple) and len(var) == 2 and all(isinstance(val, (int, float)) for val in var) | def function[contains_geometric_info, parameter[var]]:
constant[ Check whether the passed variable is a tuple with two floats or integers ]
return[<ast.BoolOp object at 0x7da1b1c7c8e0>] | keyword[def] identifier[contains_geometric_info] ( identifier[var] ):
literal[string]
keyword[return] identifier[isinstance] ( identifier[var] , identifier[tuple] ) keyword[and] identifier[len] ( identifier[var] )== literal[int] keyword[and] identifier[all] ( identifier[isinstance] ( identifier[val] ,( identifier[int] , identifier[float] )) keyword[for] identifier[val] keyword[in] identifier[var] ) | def contains_geometric_info(var):
""" Check whether the passed variable is a tuple with two floats or integers """
return isinstance(var, tuple) and len(var) == 2 and all((isinstance(val, (int, float)) for val in var)) |
def get_default(self):
"""
Returns the default value for this field.
The default implementation on models.Field calls force_unicode
on the default, which means you can't set arbitrary Python
objects as the default. To fix this, we just return the value
without calling force_unicode on it. Note that if you set a
callable as a default, the field will still call it. It will
*not* try to pickle and encode it.
"""
if self.has_default():
if callable(self.default):
return self.default()
return self.default
# If the field doesn't have a default, then we punt to models.Field.
return super(PickledObjectField, self).get_default() | def function[get_default, parameter[self]]:
constant[
Returns the default value for this field.
The default implementation on models.Field calls force_unicode
on the default, which means you can't set arbitrary Python
objects as the default. To fix this, we just return the value
without calling force_unicode on it. Note that if you set a
callable as a default, the field will still call it. It will
*not* try to pickle and encode it.
]
if call[name[self].has_default, parameter[]] begin[:]
if call[name[callable], parameter[name[self].default]] begin[:]
return[call[name[self].default, parameter[]]]
return[name[self].default]
return[call[call[name[super], parameter[name[PickledObjectField], name[self]]].get_default, parameter[]]] | keyword[def] identifier[get_default] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[has_default] ():
keyword[if] identifier[callable] ( identifier[self] . identifier[default] ):
keyword[return] identifier[self] . identifier[default] ()
keyword[return] identifier[self] . identifier[default]
keyword[return] identifier[super] ( identifier[PickledObjectField] , identifier[self] ). identifier[get_default] () | def get_default(self):
"""
Returns the default value for this field.
The default implementation on models.Field calls force_unicode
on the default, which means you can't set arbitrary Python
objects as the default. To fix this, we just return the value
without calling force_unicode on it. Note that if you set a
callable as a default, the field will still call it. It will
*not* try to pickle and encode it.
"""
if self.has_default():
if callable(self.default):
return self.default() # depends on [control=['if'], data=[]]
return self.default # depends on [control=['if'], data=[]]
# If the field doesn't have a default, then we punt to models.Field.
return super(PickledObjectField, self).get_default() |
def union_categoricals(to_union, sort_categories=False, ignore_order=False):
"""
Combine list-like of Categorical-like, unioning categories. All
categories must have the same dtype.
.. versionadded:: 0.19.0
Parameters
----------
to_union : list-like of Categorical, CategoricalIndex,
or Series with dtype='category'
sort_categories : boolean, default False
If true, resulting categories will be lexsorted, otherwise
they will be ordered as they appear in the data.
ignore_order : boolean, default False
If true, the ordered attribute of the Categoricals will be ignored.
Results in an unordered categorical.
.. versionadded:: 0.20.0
Returns
-------
result : Categorical
Raises
------
TypeError
- all inputs do not have the same dtype
- all inputs do not have the same ordered property
- all inputs are ordered and their categories are not identical
- sort_categories=True and Categoricals are ordered
ValueError
Empty list of categoricals passed
Notes
-----
To learn more about categories, see `link
<http://pandas.pydata.org/pandas-docs/stable/categorical.html#unioning>`__
Examples
--------
>>> from pandas.api.types import union_categoricals
If you want to combine categoricals that do not necessarily have
the same categories, `union_categoricals` will combine a list-like
of categoricals. The new categories will be the union of the
categories being combined.
>>> a = pd.Categorical(["b", "c"])
>>> b = pd.Categorical(["a", "b"])
>>> union_categoricals([a, b])
[b, c, a, b]
Categories (3, object): [b, c, a]
By default, the resulting categories will be ordered as they appear
in the `categories` of the data. If you want the categories to be
lexsorted, use `sort_categories=True` argument.
>>> union_categoricals([a, b], sort_categories=True)
[b, c, a, b]
Categories (3, object): [a, b, c]
`union_categoricals` also works with the case of combining two
categoricals of the same categories and order information (e.g. what
you could also `append` for).
>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "a"], ordered=True)
>>> union_categoricals([a, b])
[a, b, a, b, a]
Categories (2, object): [a < b]
Raises `TypeError` because the categories are ordered and not identical.
>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "c"], ordered=True)
>>> union_categoricals([a, b])
TypeError: to union ordered Categoricals, all categories must be the same
New in version 0.20.0
Ordered categoricals with different categories or orderings can be
combined by using the `ignore_ordered=True` argument.
>>> a = pd.Categorical(["a", "b", "c"], ordered=True)
>>> b = pd.Categorical(["c", "b", "a"], ordered=True)
>>> union_categoricals([a, b], ignore_order=True)
[a, b, c, c, b, a]
Categories (3, object): [a, b, c]
`union_categoricals` also works with a `CategoricalIndex`, or `Series`
containing categorical data, but note that the resulting array will
always be a plain `Categorical`
>>> a = pd.Series(["b", "c"], dtype='category')
>>> b = pd.Series(["a", "b"], dtype='category')
>>> union_categoricals([a, b])
[b, c, a, b]
Categories (3, object): [b, c, a]
"""
from pandas import Index, Categorical, CategoricalIndex, Series
from pandas.core.arrays.categorical import _recode_for_categories
if len(to_union) == 0:
raise ValueError('No Categoricals to union')
def _maybe_unwrap(x):
if isinstance(x, (CategoricalIndex, Series)):
return x.values
elif isinstance(x, Categorical):
return x
else:
raise TypeError("all components to combine must be Categorical")
to_union = [_maybe_unwrap(x) for x in to_union]
first = to_union[0]
if not all(is_dtype_equal(other.categories.dtype, first.categories.dtype)
for other in to_union[1:]):
raise TypeError("dtype of categories must be the same")
ordered = False
if all(first.is_dtype_equal(other) for other in to_union[1:]):
# identical categories - fastpath
categories = first.categories
ordered = first.ordered
if all(first.categories.equals(other.categories)
for other in to_union[1:]):
new_codes = np.concatenate([c.codes for c in to_union])
else:
codes = [first.codes] + [_recode_for_categories(other.codes,
other.categories,
first.categories)
for other in to_union[1:]]
new_codes = np.concatenate(codes)
if sort_categories and not ignore_order and ordered:
raise TypeError("Cannot use sort_categories=True with "
"ordered Categoricals")
if sort_categories and not categories.is_monotonic_increasing:
categories = categories.sort_values()
indexer = categories.get_indexer(first.categories)
from pandas.core.algorithms import take_1d
new_codes = take_1d(indexer, new_codes, fill_value=-1)
elif ignore_order or all(not c.ordered for c in to_union):
# different categories - union and recode
cats = first.categories.append([c.categories for c in to_union[1:]])
categories = Index(cats.unique())
if sort_categories:
categories = categories.sort_values()
new_codes = [_recode_for_categories(c.codes, c.categories, categories)
for c in to_union]
new_codes = np.concatenate(new_codes)
else:
# ordered - to show a proper error message
if all(c.ordered for c in to_union):
msg = ("to union ordered Categoricals, "
"all categories must be the same")
raise TypeError(msg)
else:
raise TypeError('Categorical.ordered must be the same')
if ignore_order:
ordered = False
return Categorical(new_codes, categories=categories, ordered=ordered,
fastpath=True) | def function[union_categoricals, parameter[to_union, sort_categories, ignore_order]]:
constant[
Combine list-like of Categorical-like, unioning categories. All
categories must have the same dtype.
.. versionadded:: 0.19.0
Parameters
----------
to_union : list-like of Categorical, CategoricalIndex,
or Series with dtype='category'
sort_categories : boolean, default False
If true, resulting categories will be lexsorted, otherwise
they will be ordered as they appear in the data.
ignore_order : boolean, default False
If true, the ordered attribute of the Categoricals will be ignored.
Results in an unordered categorical.
.. versionadded:: 0.20.0
Returns
-------
result : Categorical
Raises
------
TypeError
- all inputs do not have the same dtype
- all inputs do not have the same ordered property
- all inputs are ordered and their categories are not identical
- sort_categories=True and Categoricals are ordered
ValueError
Empty list of categoricals passed
Notes
-----
To learn more about categories, see `link
<http://pandas.pydata.org/pandas-docs/stable/categorical.html#unioning>`__
Examples
--------
>>> from pandas.api.types import union_categoricals
If you want to combine categoricals that do not necessarily have
the same categories, `union_categoricals` will combine a list-like
of categoricals. The new categories will be the union of the
categories being combined.
>>> a = pd.Categorical(["b", "c"])
>>> b = pd.Categorical(["a", "b"])
>>> union_categoricals([a, b])
[b, c, a, b]
Categories (3, object): [b, c, a]
By default, the resulting categories will be ordered as they appear
in the `categories` of the data. If you want the categories to be
lexsorted, use `sort_categories=True` argument.
>>> union_categoricals([a, b], sort_categories=True)
[b, c, a, b]
Categories (3, object): [a, b, c]
`union_categoricals` also works with the case of combining two
categoricals of the same categories and order information (e.g. what
you could also `append` for).
>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "a"], ordered=True)
>>> union_categoricals([a, b])
[a, b, a, b, a]
Categories (2, object): [a < b]
Raises `TypeError` because the categories are ordered and not identical.
>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "c"], ordered=True)
>>> union_categoricals([a, b])
TypeError: to union ordered Categoricals, all categories must be the same
New in version 0.20.0
Ordered categoricals with different categories or orderings can be
combined by using the `ignore_ordered=True` argument.
>>> a = pd.Categorical(["a", "b", "c"], ordered=True)
>>> b = pd.Categorical(["c", "b", "a"], ordered=True)
>>> union_categoricals([a, b], ignore_order=True)
[a, b, c, c, b, a]
Categories (3, object): [a, b, c]
`union_categoricals` also works with a `CategoricalIndex`, or `Series`
containing categorical data, but note that the resulting array will
always be a plain `Categorical`
>>> a = pd.Series(["b", "c"], dtype='category')
>>> b = pd.Series(["a", "b"], dtype='category')
>>> union_categoricals([a, b])
[b, c, a, b]
Categories (3, object): [b, c, a]
]
from relative_module[pandas] import module[Index], module[Categorical], module[CategoricalIndex], module[Series]
from relative_module[pandas.core.arrays.categorical] import module[_recode_for_categories]
if compare[call[name[len], parameter[name[to_union]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18ede7b50>
def function[_maybe_unwrap, parameter[x]]:
if call[name[isinstance], parameter[name[x], tuple[[<ast.Name object at 0x7da18ede70d0>, <ast.Name object at 0x7da18ede7c70>]]]] begin[:]
return[name[x].values]
variable[to_union] assign[=] <ast.ListComp object at 0x7da18ede5810>
variable[first] assign[=] call[name[to_union]][constant[0]]
if <ast.UnaryOp object at 0x7da18ede5d80> begin[:]
<ast.Raise object at 0x7da18ede4460>
variable[ordered] assign[=] constant[False]
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da18ede7310>]] begin[:]
variable[categories] assign[=] name[first].categories
variable[ordered] assign[=] name[first].ordered
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da18ede44f0>]] begin[:]
variable[new_codes] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da18ede4850>]]
if <ast.BoolOp object at 0x7da18ede6470> begin[:]
<ast.Raise object at 0x7da18ede5780>
if <ast.BoolOp object at 0x7da18ede5c00> begin[:]
variable[categories] assign[=] call[name[categories].sort_values, parameter[]]
variable[indexer] assign[=] call[name[categories].get_indexer, parameter[name[first].categories]]
from relative_module[pandas.core.algorithms] import module[take_1d]
variable[new_codes] assign[=] call[name[take_1d], parameter[name[indexer], name[new_codes]]]
if name[ignore_order] begin[:]
variable[ordered] assign[=] constant[False]
return[call[name[Categorical], parameter[name[new_codes]]]] | keyword[def] identifier[union_categoricals] ( identifier[to_union] , identifier[sort_categories] = keyword[False] , identifier[ignore_order] = keyword[False] ):
literal[string]
keyword[from] identifier[pandas] keyword[import] identifier[Index] , identifier[Categorical] , identifier[CategoricalIndex] , identifier[Series]
keyword[from] identifier[pandas] . identifier[core] . identifier[arrays] . identifier[categorical] keyword[import] identifier[_recode_for_categories]
keyword[if] identifier[len] ( identifier[to_union] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[def] identifier[_maybe_unwrap] ( identifier[x] ):
keyword[if] identifier[isinstance] ( identifier[x] ,( identifier[CategoricalIndex] , identifier[Series] )):
keyword[return] identifier[x] . identifier[values]
keyword[elif] identifier[isinstance] ( identifier[x] , identifier[Categorical] ):
keyword[return] identifier[x]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[to_union] =[ identifier[_maybe_unwrap] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[to_union] ]
identifier[first] = identifier[to_union] [ literal[int] ]
keyword[if] keyword[not] identifier[all] ( identifier[is_dtype_equal] ( identifier[other] . identifier[categories] . identifier[dtype] , identifier[first] . identifier[categories] . identifier[dtype] )
keyword[for] identifier[other] keyword[in] identifier[to_union] [ literal[int] :]):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[ordered] = keyword[False]
keyword[if] identifier[all] ( identifier[first] . identifier[is_dtype_equal] ( identifier[other] ) keyword[for] identifier[other] keyword[in] identifier[to_union] [ literal[int] :]):
identifier[categories] = identifier[first] . identifier[categories]
identifier[ordered] = identifier[first] . identifier[ordered]
keyword[if] identifier[all] ( identifier[first] . identifier[categories] . identifier[equals] ( identifier[other] . identifier[categories] )
keyword[for] identifier[other] keyword[in] identifier[to_union] [ literal[int] :]):
identifier[new_codes] = identifier[np] . identifier[concatenate] ([ identifier[c] . identifier[codes] keyword[for] identifier[c] keyword[in] identifier[to_union] ])
keyword[else] :
identifier[codes] =[ identifier[first] . identifier[codes] ]+[ identifier[_recode_for_categories] ( identifier[other] . identifier[codes] ,
identifier[other] . identifier[categories] ,
identifier[first] . identifier[categories] )
keyword[for] identifier[other] keyword[in] identifier[to_union] [ literal[int] :]]
identifier[new_codes] = identifier[np] . identifier[concatenate] ( identifier[codes] )
keyword[if] identifier[sort_categories] keyword[and] keyword[not] identifier[ignore_order] keyword[and] identifier[ordered] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
keyword[if] identifier[sort_categories] keyword[and] keyword[not] identifier[categories] . identifier[is_monotonic_increasing] :
identifier[categories] = identifier[categories] . identifier[sort_values] ()
identifier[indexer] = identifier[categories] . identifier[get_indexer] ( identifier[first] . identifier[categories] )
keyword[from] identifier[pandas] . identifier[core] . identifier[algorithms] keyword[import] identifier[take_1d]
identifier[new_codes] = identifier[take_1d] ( identifier[indexer] , identifier[new_codes] , identifier[fill_value] =- literal[int] )
keyword[elif] identifier[ignore_order] keyword[or] identifier[all] ( keyword[not] identifier[c] . identifier[ordered] keyword[for] identifier[c] keyword[in] identifier[to_union] ):
identifier[cats] = identifier[first] . identifier[categories] . identifier[append] ([ identifier[c] . identifier[categories] keyword[for] identifier[c] keyword[in] identifier[to_union] [ literal[int] :]])
identifier[categories] = identifier[Index] ( identifier[cats] . identifier[unique] ())
keyword[if] identifier[sort_categories] :
identifier[categories] = identifier[categories] . identifier[sort_values] ()
identifier[new_codes] =[ identifier[_recode_for_categories] ( identifier[c] . identifier[codes] , identifier[c] . identifier[categories] , identifier[categories] )
keyword[for] identifier[c] keyword[in] identifier[to_union] ]
identifier[new_codes] = identifier[np] . identifier[concatenate] ( identifier[new_codes] )
keyword[else] :
keyword[if] identifier[all] ( identifier[c] . identifier[ordered] keyword[for] identifier[c] keyword[in] identifier[to_union] ):
identifier[msg] =( literal[string]
literal[string] )
keyword[raise] identifier[TypeError] ( identifier[msg] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[ignore_order] :
identifier[ordered] = keyword[False]
keyword[return] identifier[Categorical] ( identifier[new_codes] , identifier[categories] = identifier[categories] , identifier[ordered] = identifier[ordered] ,
identifier[fastpath] = keyword[True] ) | def union_categoricals(to_union, sort_categories=False, ignore_order=False):
"""
Combine list-like of Categorical-like, unioning categories. All
categories must have the same dtype.
.. versionadded:: 0.19.0
Parameters
----------
to_union : list-like of Categorical, CategoricalIndex,
or Series with dtype='category'
sort_categories : boolean, default False
If true, resulting categories will be lexsorted, otherwise
they will be ordered as they appear in the data.
ignore_order : boolean, default False
If true, the ordered attribute of the Categoricals will be ignored.
Results in an unordered categorical.
.. versionadded:: 0.20.0
Returns
-------
result : Categorical
Raises
------
TypeError
- all inputs do not have the same dtype
- all inputs do not have the same ordered property
- all inputs are ordered and their categories are not identical
- sort_categories=True and Categoricals are ordered
ValueError
Empty list of categoricals passed
Notes
-----
To learn more about categories, see `link
<http://pandas.pydata.org/pandas-docs/stable/categorical.html#unioning>`__
Examples
--------
>>> from pandas.api.types import union_categoricals
If you want to combine categoricals that do not necessarily have
the same categories, `union_categoricals` will combine a list-like
of categoricals. The new categories will be the union of the
categories being combined.
>>> a = pd.Categorical(["b", "c"])
>>> b = pd.Categorical(["a", "b"])
>>> union_categoricals([a, b])
[b, c, a, b]
Categories (3, object): [b, c, a]
By default, the resulting categories will be ordered as they appear
in the `categories` of the data. If you want the categories to be
lexsorted, use `sort_categories=True` argument.
>>> union_categoricals([a, b], sort_categories=True)
[b, c, a, b]
Categories (3, object): [a, b, c]
`union_categoricals` also works with the case of combining two
categoricals of the same categories and order information (e.g. what
you could also `append` for).
>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "a"], ordered=True)
>>> union_categoricals([a, b])
[a, b, a, b, a]
Categories (2, object): [a < b]
Raises `TypeError` because the categories are ordered and not identical.
>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "c"], ordered=True)
>>> union_categoricals([a, b])
TypeError: to union ordered Categoricals, all categories must be the same
New in version 0.20.0
Ordered categoricals with different categories or orderings can be
combined by using the `ignore_ordered=True` argument.
>>> a = pd.Categorical(["a", "b", "c"], ordered=True)
>>> b = pd.Categorical(["c", "b", "a"], ordered=True)
>>> union_categoricals([a, b], ignore_order=True)
[a, b, c, c, b, a]
Categories (3, object): [a, b, c]
`union_categoricals` also works with a `CategoricalIndex`, or `Series`
containing categorical data, but note that the resulting array will
always be a plain `Categorical`
>>> a = pd.Series(["b", "c"], dtype='category')
>>> b = pd.Series(["a", "b"], dtype='category')
>>> union_categoricals([a, b])
[b, c, a, b]
Categories (3, object): [b, c, a]
"""
from pandas import Index, Categorical, CategoricalIndex, Series
from pandas.core.arrays.categorical import _recode_for_categories
if len(to_union) == 0:
raise ValueError('No Categoricals to union') # depends on [control=['if'], data=[]]
def _maybe_unwrap(x):
if isinstance(x, (CategoricalIndex, Series)):
return x.values # depends on [control=['if'], data=[]]
elif isinstance(x, Categorical):
return x # depends on [control=['if'], data=[]]
else:
raise TypeError('all components to combine must be Categorical')
to_union = [_maybe_unwrap(x) for x in to_union]
first = to_union[0]
if not all((is_dtype_equal(other.categories.dtype, first.categories.dtype) for other in to_union[1:])):
raise TypeError('dtype of categories must be the same') # depends on [control=['if'], data=[]]
ordered = False
if all((first.is_dtype_equal(other) for other in to_union[1:])):
# identical categories - fastpath
categories = first.categories
ordered = first.ordered
if all((first.categories.equals(other.categories) for other in to_union[1:])):
new_codes = np.concatenate([c.codes for c in to_union]) # depends on [control=['if'], data=[]]
else:
codes = [first.codes] + [_recode_for_categories(other.codes, other.categories, first.categories) for other in to_union[1:]]
new_codes = np.concatenate(codes)
if sort_categories and (not ignore_order) and ordered:
raise TypeError('Cannot use sort_categories=True with ordered Categoricals') # depends on [control=['if'], data=[]]
if sort_categories and (not categories.is_monotonic_increasing):
categories = categories.sort_values()
indexer = categories.get_indexer(first.categories)
from pandas.core.algorithms import take_1d
new_codes = take_1d(indexer, new_codes, fill_value=-1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif ignore_order or all((not c.ordered for c in to_union)):
# different categories - union and recode
cats = first.categories.append([c.categories for c in to_union[1:]])
categories = Index(cats.unique())
if sort_categories:
categories = categories.sort_values() # depends on [control=['if'], data=[]]
new_codes = [_recode_for_categories(c.codes, c.categories, categories) for c in to_union]
new_codes = np.concatenate(new_codes) # depends on [control=['if'], data=[]]
# ordered - to show a proper error message
elif all((c.ordered for c in to_union)):
msg = 'to union ordered Categoricals, all categories must be the same'
raise TypeError(msg) # depends on [control=['if'], data=[]]
else:
raise TypeError('Categorical.ordered must be the same')
if ignore_order:
ordered = False # depends on [control=['if'], data=[]]
return Categorical(new_codes, categories=categories, ordered=ordered, fastpath=True) |
def maybe_decode_payload(payload, content_type='application/nanoservice-tlv', decode_b64=True):
"""If the payload is tlv, decode it, otherwise passthrough
:param payload: some data
:param content_type: http content type
:param decode_b64: by default, payload is assumed to be b64 encoded
:return:
"""
if not payload:
return None
binary = b64decoder(payload) if decode_b64 else payload
if content_type and 'tlv' in content_type.lower():
return binary_tlv_to_python(bytearray(binary))
return binary | def function[maybe_decode_payload, parameter[payload, content_type, decode_b64]]:
constant[If the payload is tlv, decode it, otherwise passthrough
:param payload: some data
:param content_type: http content type
:param decode_b64: by default, payload is assumed to be b64 encoded
:return:
]
if <ast.UnaryOp object at 0x7da1b04a5060> begin[:]
return[constant[None]]
variable[binary] assign[=] <ast.IfExp object at 0x7da1b04a7880>
if <ast.BoolOp object at 0x7da1b04a4700> begin[:]
return[call[name[binary_tlv_to_python], parameter[call[name[bytearray], parameter[name[binary]]]]]]
return[name[binary]] | keyword[def] identifier[maybe_decode_payload] ( identifier[payload] , identifier[content_type] = literal[string] , identifier[decode_b64] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[payload] :
keyword[return] keyword[None]
identifier[binary] = identifier[b64decoder] ( identifier[payload] ) keyword[if] identifier[decode_b64] keyword[else] identifier[payload]
keyword[if] identifier[content_type] keyword[and] literal[string] keyword[in] identifier[content_type] . identifier[lower] ():
keyword[return] identifier[binary_tlv_to_python] ( identifier[bytearray] ( identifier[binary] ))
keyword[return] identifier[binary] | def maybe_decode_payload(payload, content_type='application/nanoservice-tlv', decode_b64=True):
"""If the payload is tlv, decode it, otherwise passthrough
:param payload: some data
:param content_type: http content type
:param decode_b64: by default, payload is assumed to be b64 encoded
:return:
"""
if not payload:
return None # depends on [control=['if'], data=[]]
binary = b64decoder(payload) if decode_b64 else payload
if content_type and 'tlv' in content_type.lower():
return binary_tlv_to_python(bytearray(binary)) # depends on [control=['if'], data=[]]
return binary |
def _receiverThread(session):
"""Receiving messages from cjdns admin server"""
timeOfLastSend = time.time()
timeOfLastRecv = time.time()
try:
while True:
if timeOfLastSend + KEEPALIVE_INTERVAL_SECONDS < time.time():
if timeOfLastRecv + 10 < time.time():
raise exceptions.PingTimeout()
session.socket.send(
b'd1:q18:Admin_asyncEnabled4:txid8:keepalive')
timeOfLastSend = time.time()
try:
data = session.socket.recv(BUFFER_SIZE)
except socket.timeout:
continue
try:
benc = bdecode(data)
except (KeyError, ValueError):
logger.error("error decoding [%s]", data)
continue
if benc['txid'] == 'keepaliv':
if benc['asyncEnabled'] == 0:
raise exceptions.SessionLost()
timeOfLastRecv = time.time()
else:
session.queue.put(benc)
except KeyboardInterrupt:
logger.exception("interrupted")
import thread
thread.interrupt_main() | def function[_receiverThread, parameter[session]]:
constant[Receiving messages from cjdns admin server]
variable[timeOfLastSend] assign[=] call[name[time].time, parameter[]]
variable[timeOfLastRecv] assign[=] call[name[time].time, parameter[]]
<ast.Try object at 0x7da1b1a76ef0> | keyword[def] identifier[_receiverThread] ( identifier[session] ):
literal[string]
identifier[timeOfLastSend] = identifier[time] . identifier[time] ()
identifier[timeOfLastRecv] = identifier[time] . identifier[time] ()
keyword[try] :
keyword[while] keyword[True] :
keyword[if] identifier[timeOfLastSend] + identifier[KEEPALIVE_INTERVAL_SECONDS] < identifier[time] . identifier[time] ():
keyword[if] identifier[timeOfLastRecv] + literal[int] < identifier[time] . identifier[time] ():
keyword[raise] identifier[exceptions] . identifier[PingTimeout] ()
identifier[session] . identifier[socket] . identifier[send] (
literal[string] )
identifier[timeOfLastSend] = identifier[time] . identifier[time] ()
keyword[try] :
identifier[data] = identifier[session] . identifier[socket] . identifier[recv] ( identifier[BUFFER_SIZE] )
keyword[except] identifier[socket] . identifier[timeout] :
keyword[continue]
keyword[try] :
identifier[benc] = identifier[bdecode] ( identifier[data] )
keyword[except] ( identifier[KeyError] , identifier[ValueError] ):
identifier[logger] . identifier[error] ( literal[string] , identifier[data] )
keyword[continue]
keyword[if] identifier[benc] [ literal[string] ]== literal[string] :
keyword[if] identifier[benc] [ literal[string] ]== literal[int] :
keyword[raise] identifier[exceptions] . identifier[SessionLost] ()
identifier[timeOfLastRecv] = identifier[time] . identifier[time] ()
keyword[else] :
identifier[session] . identifier[queue] . identifier[put] ( identifier[benc] )
keyword[except] identifier[KeyboardInterrupt] :
identifier[logger] . identifier[exception] ( literal[string] )
keyword[import] identifier[thread]
identifier[thread] . identifier[interrupt_main] () | def _receiverThread(session):
"""Receiving messages from cjdns admin server"""
timeOfLastSend = time.time()
timeOfLastRecv = time.time()
try:
while True:
if timeOfLastSend + KEEPALIVE_INTERVAL_SECONDS < time.time():
if timeOfLastRecv + 10 < time.time():
raise exceptions.PingTimeout() # depends on [control=['if'], data=[]]
session.socket.send(b'd1:q18:Admin_asyncEnabled4:txid8:keepalive')
timeOfLastSend = time.time() # depends on [control=['if'], data=[]]
try:
data = session.socket.recv(BUFFER_SIZE) # depends on [control=['try'], data=[]]
except socket.timeout:
continue # depends on [control=['except'], data=[]]
try:
benc = bdecode(data) # depends on [control=['try'], data=[]]
except (KeyError, ValueError):
logger.error('error decoding [%s]', data)
continue # depends on [control=['except'], data=[]]
if benc['txid'] == 'keepaliv':
if benc['asyncEnabled'] == 0:
raise exceptions.SessionLost() # depends on [control=['if'], data=[]]
timeOfLastRecv = time.time() # depends on [control=['if'], data=[]]
else:
session.queue.put(benc) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
logger.exception('interrupted')
import thread
thread.interrupt_main() # depends on [control=['except'], data=[]] |
def thread_lock(lock):
"""Return the thread lock for *lock*."""
if hasattr(lock, '_lock'):
return lock._lock
elif hasattr(lock, 'acquire'):
return lock
else:
raise TypeError('expecting Lock/RLock') | def function[thread_lock, parameter[lock]]:
constant[Return the thread lock for *lock*.]
if call[name[hasattr], parameter[name[lock], constant[_lock]]] begin[:]
return[name[lock]._lock] | keyword[def] identifier[thread_lock] ( identifier[lock] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[lock] , literal[string] ):
keyword[return] identifier[lock] . identifier[_lock]
keyword[elif] identifier[hasattr] ( identifier[lock] , literal[string] ):
keyword[return] identifier[lock]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] ) | def thread_lock(lock):
"""Return the thread lock for *lock*."""
if hasattr(lock, '_lock'):
return lock._lock # depends on [control=['if'], data=[]]
elif hasattr(lock, 'acquire'):
return lock # depends on [control=['if'], data=[]]
else:
raise TypeError('expecting Lock/RLock') |
def f_i18n_iso(isocode, lang="eng"):
""" Replace isocode by its language equivalent
:param isocode: Three character long language code
:param lang: Lang in which to return the language name
:return: Full Text Language Name
"""
if lang not in flask_nemo._data.AVAILABLE_TRANSLATIONS:
lang = "eng"
try:
return flask_nemo._data.ISOCODES[isocode][lang]
except KeyError:
return "Unknown" | def function[f_i18n_iso, parameter[isocode, lang]]:
constant[ Replace isocode by its language equivalent
:param isocode: Three character long language code
:param lang: Lang in which to return the language name
:return: Full Text Language Name
]
if compare[name[lang] <ast.NotIn object at 0x7da2590d7190> name[flask_nemo]._data.AVAILABLE_TRANSLATIONS] begin[:]
variable[lang] assign[=] constant[eng]
<ast.Try object at 0x7da1b00994e0> | keyword[def] identifier[f_i18n_iso] ( identifier[isocode] , identifier[lang] = literal[string] ):
literal[string]
keyword[if] identifier[lang] keyword[not] keyword[in] identifier[flask_nemo] . identifier[_data] . identifier[AVAILABLE_TRANSLATIONS] :
identifier[lang] = literal[string]
keyword[try] :
keyword[return] identifier[flask_nemo] . identifier[_data] . identifier[ISOCODES] [ identifier[isocode] ][ identifier[lang] ]
keyword[except] identifier[KeyError] :
keyword[return] literal[string] | def f_i18n_iso(isocode, lang='eng'):
""" Replace isocode by its language equivalent
:param isocode: Three character long language code
:param lang: Lang in which to return the language name
:return: Full Text Language Name
"""
if lang not in flask_nemo._data.AVAILABLE_TRANSLATIONS:
lang = 'eng' # depends on [control=['if'], data=['lang']]
try:
return flask_nemo._data.ISOCODES[isocode][lang] # depends on [control=['try'], data=[]]
except KeyError:
return 'Unknown' # depends on [control=['except'], data=[]] |
def save(self, filepath):
# type: (str) -> None
"""Save current configuration to file.
:param str filepath: Path to file where settings will be saved.
:raises: ValueError if supplied filepath cannot be written to.
"""
sections = [
"Connection",
"Proxies",
"RedirectPolicy"]
for section in sections:
self._config.add_section(section)
self._config.set("Connection", "timeout", self.connection.timeout)
self._config.set("Connection", "verify", self.connection.verify)
self._config.set("Connection", "cert", self.connection.cert)
self._config.set("Proxies", "proxies", self.proxies.proxies)
self._config.set("Proxies", "env_settings",
self.proxies.use_env_settings)
self._config.set("RedirectPolicy", "allow", self.redirect_policy.allow)
self._config.set("RedirectPolicy", "max_redirects",
self.redirect_policy.max_redirects)
try:
with open(filepath, 'w') as configfile:
self._config.write(configfile)
except (KeyError, EnvironmentError):
error = "Supplied config filepath invalid."
raise_with_traceback(ValueError, error)
finally:
self._clear_config() | def function[save, parameter[self, filepath]]:
constant[Save current configuration to file.
:param str filepath: Path to file where settings will be saved.
:raises: ValueError if supplied filepath cannot be written to.
]
variable[sections] assign[=] list[[<ast.Constant object at 0x7da18f58dc90>, <ast.Constant object at 0x7da18f58c430>, <ast.Constant object at 0x7da18f58fe20>]]
for taget[name[section]] in starred[name[sections]] begin[:]
call[name[self]._config.add_section, parameter[name[section]]]
call[name[self]._config.set, parameter[constant[Connection], constant[timeout], name[self].connection.timeout]]
call[name[self]._config.set, parameter[constant[Connection], constant[verify], name[self].connection.verify]]
call[name[self]._config.set, parameter[constant[Connection], constant[cert], name[self].connection.cert]]
call[name[self]._config.set, parameter[constant[Proxies], constant[proxies], name[self].proxies.proxies]]
call[name[self]._config.set, parameter[constant[Proxies], constant[env_settings], name[self].proxies.use_env_settings]]
call[name[self]._config.set, parameter[constant[RedirectPolicy], constant[allow], name[self].redirect_policy.allow]]
call[name[self]._config.set, parameter[constant[RedirectPolicy], constant[max_redirects], name[self].redirect_policy.max_redirects]]
<ast.Try object at 0x7da18dc06ec0> | keyword[def] identifier[save] ( identifier[self] , identifier[filepath] ):
literal[string]
identifier[sections] =[
literal[string] ,
literal[string] ,
literal[string] ]
keyword[for] identifier[section] keyword[in] identifier[sections] :
identifier[self] . identifier[_config] . identifier[add_section] ( identifier[section] )
identifier[self] . identifier[_config] . identifier[set] ( literal[string] , literal[string] , identifier[self] . identifier[connection] . identifier[timeout] )
identifier[self] . identifier[_config] . identifier[set] ( literal[string] , literal[string] , identifier[self] . identifier[connection] . identifier[verify] )
identifier[self] . identifier[_config] . identifier[set] ( literal[string] , literal[string] , identifier[self] . identifier[connection] . identifier[cert] )
identifier[self] . identifier[_config] . identifier[set] ( literal[string] , literal[string] , identifier[self] . identifier[proxies] . identifier[proxies] )
identifier[self] . identifier[_config] . identifier[set] ( literal[string] , literal[string] ,
identifier[self] . identifier[proxies] . identifier[use_env_settings] )
identifier[self] . identifier[_config] . identifier[set] ( literal[string] , literal[string] , identifier[self] . identifier[redirect_policy] . identifier[allow] )
identifier[self] . identifier[_config] . identifier[set] ( literal[string] , literal[string] ,
identifier[self] . identifier[redirect_policy] . identifier[max_redirects] )
keyword[try] :
keyword[with] identifier[open] ( identifier[filepath] , literal[string] ) keyword[as] identifier[configfile] :
identifier[self] . identifier[_config] . identifier[write] ( identifier[configfile] )
keyword[except] ( identifier[KeyError] , identifier[EnvironmentError] ):
identifier[error] = literal[string]
identifier[raise_with_traceback] ( identifier[ValueError] , identifier[error] )
keyword[finally] :
identifier[self] . identifier[_clear_config] () | def save(self, filepath):
# type: (str) -> None
'Save current configuration to file.\n\n :param str filepath: Path to file where settings will be saved.\n :raises: ValueError if supplied filepath cannot be written to.\n '
sections = ['Connection', 'Proxies', 'RedirectPolicy']
for section in sections:
self._config.add_section(section) # depends on [control=['for'], data=['section']]
self._config.set('Connection', 'timeout', self.connection.timeout)
self._config.set('Connection', 'verify', self.connection.verify)
self._config.set('Connection', 'cert', self.connection.cert)
self._config.set('Proxies', 'proxies', self.proxies.proxies)
self._config.set('Proxies', 'env_settings', self.proxies.use_env_settings)
self._config.set('RedirectPolicy', 'allow', self.redirect_policy.allow)
self._config.set('RedirectPolicy', 'max_redirects', self.redirect_policy.max_redirects)
try:
with open(filepath, 'w') as configfile:
self._config.write(configfile) # depends on [control=['with'], data=['configfile']] # depends on [control=['try'], data=[]]
except (KeyError, EnvironmentError):
error = 'Supplied config filepath invalid.'
raise_with_traceback(ValueError, error) # depends on [control=['except'], data=[]]
finally:
self._clear_config() |
def write_implied_format(self, path, jpeg_quality=0, jpeg_progressive=0):
"""Write pix to the filename, with the extension indicating format.
jpeg_quality -- quality (iff JPEG; 1 - 100, 0 for default)
jpeg_progressive -- (iff JPEG; 0 for baseline seq., 1 for progressive)
"""
filename = fspath(path)
with _LeptonicaErrorTrap():
lept.pixWriteImpliedFormat(
os.fsencode(filename), self._cdata, jpeg_quality, jpeg_progressive
) | def function[write_implied_format, parameter[self, path, jpeg_quality, jpeg_progressive]]:
constant[Write pix to the filename, with the extension indicating format.
jpeg_quality -- quality (iff JPEG; 1 - 100, 0 for default)
jpeg_progressive -- (iff JPEG; 0 for baseline seq., 1 for progressive)
]
variable[filename] assign[=] call[name[fspath], parameter[name[path]]]
with call[name[_LeptonicaErrorTrap], parameter[]] begin[:]
call[name[lept].pixWriteImpliedFormat, parameter[call[name[os].fsencode, parameter[name[filename]]], name[self]._cdata, name[jpeg_quality], name[jpeg_progressive]]] | keyword[def] identifier[write_implied_format] ( identifier[self] , identifier[path] , identifier[jpeg_quality] = literal[int] , identifier[jpeg_progressive] = literal[int] ):
literal[string]
identifier[filename] = identifier[fspath] ( identifier[path] )
keyword[with] identifier[_LeptonicaErrorTrap] ():
identifier[lept] . identifier[pixWriteImpliedFormat] (
identifier[os] . identifier[fsencode] ( identifier[filename] ), identifier[self] . identifier[_cdata] , identifier[jpeg_quality] , identifier[jpeg_progressive]
) | def write_implied_format(self, path, jpeg_quality=0, jpeg_progressive=0):
"""Write pix to the filename, with the extension indicating format.
jpeg_quality -- quality (iff JPEG; 1 - 100, 0 for default)
jpeg_progressive -- (iff JPEG; 0 for baseline seq., 1 for progressive)
"""
filename = fspath(path)
with _LeptonicaErrorTrap():
lept.pixWriteImpliedFormat(os.fsencode(filename), self._cdata, jpeg_quality, jpeg_progressive) # depends on [control=['with'], data=[]] |
def payload(self, value):
"""
Sets the payload of the message and eventually the Content-Type
:param value: the payload
"""
if isinstance(value, tuple):
content_type, payload = value
self.content_type = content_type
self._payload = payload
else:
self._payload = value | def function[payload, parameter[self, value]]:
constant[
Sets the payload of the message and eventually the Content-Type
:param value: the payload
]
if call[name[isinstance], parameter[name[value], name[tuple]]] begin[:]
<ast.Tuple object at 0x7da207f00970> assign[=] name[value]
name[self].content_type assign[=] name[content_type]
name[self]._payload assign[=] name[payload] | keyword[def] identifier[payload] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[tuple] ):
identifier[content_type] , identifier[payload] = identifier[value]
identifier[self] . identifier[content_type] = identifier[content_type]
identifier[self] . identifier[_payload] = identifier[payload]
keyword[else] :
identifier[self] . identifier[_payload] = identifier[value] | def payload(self, value):
"""
Sets the payload of the message and eventually the Content-Type
:param value: the payload
"""
if isinstance(value, tuple):
(content_type, payload) = value
self.content_type = content_type
self._payload = payload # depends on [control=['if'], data=[]]
else:
self._payload = value |
def after_batch(self, stream_name: str, batch_data) -> None:
"""
If initialized to check after each batch, stop the training once the batch data contains a monitored
variable equal to NaN.
:param stream_name: name of the stream to be checked
:param batch_data: batch data to be checked
"""
if self._after_batch:
self._check_nan({stream_name: batch_data}) | def function[after_batch, parameter[self, stream_name, batch_data]]:
constant[
If initialized to check after each batch, stop the training once the batch data contains a monitored
variable equal to NaN.
:param stream_name: name of the stream to be checked
:param batch_data: batch data to be checked
]
if name[self]._after_batch begin[:]
call[name[self]._check_nan, parameter[dictionary[[<ast.Name object at 0x7da207f9b8e0>], [<ast.Name object at 0x7da207f9a830>]]]] | keyword[def] identifier[after_batch] ( identifier[self] , identifier[stream_name] : identifier[str] , identifier[batch_data] )-> keyword[None] :
literal[string]
keyword[if] identifier[self] . identifier[_after_batch] :
identifier[self] . identifier[_check_nan] ({ identifier[stream_name] : identifier[batch_data] }) | def after_batch(self, stream_name: str, batch_data) -> None:
"""
If initialized to check after each batch, stop the training once the batch data contains a monitored
variable equal to NaN.
:param stream_name: name of the stream to be checked
:param batch_data: batch data to be checked
"""
if self._after_batch:
self._check_nan({stream_name: batch_data}) # depends on [control=['if'], data=[]] |
def angle_wrap(angle, radians=False):
'''Wraps the input angle to 360.0 degrees.
Parameters
----------
angle : float
The angle to wrap around 360.0 deg.
radians : bool
If True, will assume that the input is in radians. The output will then
also be in radians.
Returns
-------
float
Wrapped angle. If radians is True: input is assumed to be in radians,
output is also in radians.
'''
if radians:
wrapped = angle % (2.0*pi_value)
if wrapped < 0.0:
wrapped = 2.0*pi_value + wrapped
else:
wrapped = angle % 360.0
if wrapped < 0.0:
wrapped = 360.0 + wrapped
return wrapped | def function[angle_wrap, parameter[angle, radians]]:
constant[Wraps the input angle to 360.0 degrees.
Parameters
----------
angle : float
The angle to wrap around 360.0 deg.
radians : bool
If True, will assume that the input is in radians. The output will then
also be in radians.
Returns
-------
float
Wrapped angle. If radians is True: input is assumed to be in radians,
output is also in radians.
]
if name[radians] begin[:]
variable[wrapped] assign[=] binary_operation[name[angle] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[2.0] * name[pi_value]]]
if compare[name[wrapped] less[<] constant[0.0]] begin[:]
variable[wrapped] assign[=] binary_operation[binary_operation[constant[2.0] * name[pi_value]] + name[wrapped]]
return[name[wrapped]] | keyword[def] identifier[angle_wrap] ( identifier[angle] , identifier[radians] = keyword[False] ):
literal[string]
keyword[if] identifier[radians] :
identifier[wrapped] = identifier[angle] %( literal[int] * identifier[pi_value] )
keyword[if] identifier[wrapped] < literal[int] :
identifier[wrapped] = literal[int] * identifier[pi_value] + identifier[wrapped]
keyword[else] :
identifier[wrapped] = identifier[angle] % literal[int]
keyword[if] identifier[wrapped] < literal[int] :
identifier[wrapped] = literal[int] + identifier[wrapped]
keyword[return] identifier[wrapped] | def angle_wrap(angle, radians=False):
"""Wraps the input angle to 360.0 degrees.
Parameters
----------
angle : float
The angle to wrap around 360.0 deg.
radians : bool
If True, will assume that the input is in radians. The output will then
also be in radians.
Returns
-------
float
Wrapped angle. If radians is True: input is assumed to be in radians,
output is also in radians.
"""
if radians:
wrapped = angle % (2.0 * pi_value)
if wrapped < 0.0:
wrapped = 2.0 * pi_value + wrapped # depends on [control=['if'], data=['wrapped']] # depends on [control=['if'], data=[]]
else:
wrapped = angle % 360.0
if wrapped < 0.0:
wrapped = 360.0 + wrapped # depends on [control=['if'], data=['wrapped']]
return wrapped |
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
title = obj.Title()
description = obj.Description()
url = obj.absolute_url()
item["Description"] = description
item["replace"]["Title"] = get_link(url, value=title)
retention_period = obj.getRetentionPeriod()
if retention_period:
hours = retention_period["hours"]
minutes = retention_period["minutes"]
days = retention_period["days"]
item["RetentionPeriod"] = _("hours: {} minutes: {} days: {}"
.format(hours, minutes, days))
else:
item["RetentionPeriod"] = ""
sample_matrix = obj.getSampleMatrix()
if sample_matrix:
title = sample_matrix.Title()
url = sample_matrix.absolute_url()
item["SampleMatrix"] = title
item["replace"]["SampleMatrix"] = get_link(url, value=title)
else:
item["SampleMatrix"] = ""
container_type = obj.getContainerType()
if container_type:
title = container_type.Title()
url = container_type.absolute_url()
item["ContainerType"] = title
item["replace"]["ContainerType"] = get_link(url, value=title)
else:
item["ContainerType"] = ""
sample_points = obj.getSamplePoints()
if sample_points:
links = map(
lambda sp: get_link(sp.absolute_url(),
value=sp.Title(),
css_class="link"),
sample_points)
item["replace"]["getSamplePoints"] = ", ".join(links)
else:
item["getSamplePoints"] = ""
return item | def function[folderitem, parameter[self, obj, item, index]]:
constant[Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
]
variable[title] assign[=] call[name[obj].Title, parameter[]]
variable[description] assign[=] call[name[obj].Description, parameter[]]
variable[url] assign[=] call[name[obj].absolute_url, parameter[]]
call[name[item]][constant[Description]] assign[=] name[description]
call[call[name[item]][constant[replace]]][constant[Title]] assign[=] call[name[get_link], parameter[name[url]]]
variable[retention_period] assign[=] call[name[obj].getRetentionPeriod, parameter[]]
if name[retention_period] begin[:]
variable[hours] assign[=] call[name[retention_period]][constant[hours]]
variable[minutes] assign[=] call[name[retention_period]][constant[minutes]]
variable[days] assign[=] call[name[retention_period]][constant[days]]
call[name[item]][constant[RetentionPeriod]] assign[=] call[name[_], parameter[call[constant[hours: {} minutes: {} days: {}].format, parameter[name[hours], name[minutes], name[days]]]]]
variable[sample_matrix] assign[=] call[name[obj].getSampleMatrix, parameter[]]
if name[sample_matrix] begin[:]
variable[title] assign[=] call[name[sample_matrix].Title, parameter[]]
variable[url] assign[=] call[name[sample_matrix].absolute_url, parameter[]]
call[name[item]][constant[SampleMatrix]] assign[=] name[title]
call[call[name[item]][constant[replace]]][constant[SampleMatrix]] assign[=] call[name[get_link], parameter[name[url]]]
variable[container_type] assign[=] call[name[obj].getContainerType, parameter[]]
if name[container_type] begin[:]
variable[title] assign[=] call[name[container_type].Title, parameter[]]
variable[url] assign[=] call[name[container_type].absolute_url, parameter[]]
call[name[item]][constant[ContainerType]] assign[=] name[title]
call[call[name[item]][constant[replace]]][constant[ContainerType]] assign[=] call[name[get_link], parameter[name[url]]]
variable[sample_points] assign[=] call[name[obj].getSamplePoints, parameter[]]
if name[sample_points] begin[:]
variable[links] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da1b1d65ff0>, name[sample_points]]]
call[call[name[item]][constant[replace]]][constant[getSamplePoints]] assign[=] call[constant[, ].join, parameter[name[links]]]
return[name[item]] | keyword[def] identifier[folderitem] ( identifier[self] , identifier[obj] , identifier[item] , identifier[index] ):
literal[string]
identifier[title] = identifier[obj] . identifier[Title] ()
identifier[description] = identifier[obj] . identifier[Description] ()
identifier[url] = identifier[obj] . identifier[absolute_url] ()
identifier[item] [ literal[string] ]= identifier[description]
identifier[item] [ literal[string] ][ literal[string] ]= identifier[get_link] ( identifier[url] , identifier[value] = identifier[title] )
identifier[retention_period] = identifier[obj] . identifier[getRetentionPeriod] ()
keyword[if] identifier[retention_period] :
identifier[hours] = identifier[retention_period] [ literal[string] ]
identifier[minutes] = identifier[retention_period] [ literal[string] ]
identifier[days] = identifier[retention_period] [ literal[string] ]
identifier[item] [ literal[string] ]= identifier[_] ( literal[string]
. identifier[format] ( identifier[hours] , identifier[minutes] , identifier[days] ))
keyword[else] :
identifier[item] [ literal[string] ]= literal[string]
identifier[sample_matrix] = identifier[obj] . identifier[getSampleMatrix] ()
keyword[if] identifier[sample_matrix] :
identifier[title] = identifier[sample_matrix] . identifier[Title] ()
identifier[url] = identifier[sample_matrix] . identifier[absolute_url] ()
identifier[item] [ literal[string] ]= identifier[title]
identifier[item] [ literal[string] ][ literal[string] ]= identifier[get_link] ( identifier[url] , identifier[value] = identifier[title] )
keyword[else] :
identifier[item] [ literal[string] ]= literal[string]
identifier[container_type] = identifier[obj] . identifier[getContainerType] ()
keyword[if] identifier[container_type] :
identifier[title] = identifier[container_type] . identifier[Title] ()
identifier[url] = identifier[container_type] . identifier[absolute_url] ()
identifier[item] [ literal[string] ]= identifier[title]
identifier[item] [ literal[string] ][ literal[string] ]= identifier[get_link] ( identifier[url] , identifier[value] = identifier[title] )
keyword[else] :
identifier[item] [ literal[string] ]= literal[string]
identifier[sample_points] = identifier[obj] . identifier[getSamplePoints] ()
keyword[if] identifier[sample_points] :
identifier[links] = identifier[map] (
keyword[lambda] identifier[sp] : identifier[get_link] ( identifier[sp] . identifier[absolute_url] (),
identifier[value] = identifier[sp] . identifier[Title] (),
identifier[css_class] = literal[string] ),
identifier[sample_points] )
identifier[item] [ literal[string] ][ literal[string] ]= literal[string] . identifier[join] ( identifier[links] )
keyword[else] :
identifier[item] [ literal[string] ]= literal[string]
keyword[return] identifier[item] | def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
title = obj.Title()
description = obj.Description()
url = obj.absolute_url()
item['Description'] = description
item['replace']['Title'] = get_link(url, value=title)
retention_period = obj.getRetentionPeriod()
if retention_period:
hours = retention_period['hours']
minutes = retention_period['minutes']
days = retention_period['days']
item['RetentionPeriod'] = _('hours: {} minutes: {} days: {}'.format(hours, minutes, days)) # depends on [control=['if'], data=[]]
else:
item['RetentionPeriod'] = ''
sample_matrix = obj.getSampleMatrix()
if sample_matrix:
title = sample_matrix.Title()
url = sample_matrix.absolute_url()
item['SampleMatrix'] = title
item['replace']['SampleMatrix'] = get_link(url, value=title) # depends on [control=['if'], data=[]]
else:
item['SampleMatrix'] = ''
container_type = obj.getContainerType()
if container_type:
title = container_type.Title()
url = container_type.absolute_url()
item['ContainerType'] = title
item['replace']['ContainerType'] = get_link(url, value=title) # depends on [control=['if'], data=[]]
else:
item['ContainerType'] = ''
sample_points = obj.getSamplePoints()
if sample_points:
links = map(lambda sp: get_link(sp.absolute_url(), value=sp.Title(), css_class='link'), sample_points)
item['replace']['getSamplePoints'] = ', '.join(links) # depends on [control=['if'], data=[]]
else:
item['getSamplePoints'] = ''
return item |
def send_command_response(self,
source: list,
command: str,
*args,
**kwargs):
"""
Used in bot observer `on_next` method
"""
args = _json.dumps(args).encode('utf8')
kwargs = _json.dumps(kwargs).encode('utf8')
if isinstance(source, list):
frame = (*source, b'', command.encode('utf8'), args, kwargs)
else:
frame = (b'', command.encode('utf8'), args, kwargs)
if self._run_control_loop:
self.add_callback(self.command_socket.send_multipart, frame)
else:
self.command_socket.send_multipart(frame) | def function[send_command_response, parameter[self, source, command]]:
constant[
Used in bot observer `on_next` method
]
variable[args] assign[=] call[call[name[_json].dumps, parameter[name[args]]].encode, parameter[constant[utf8]]]
variable[kwargs] assign[=] call[call[name[_json].dumps, parameter[name[kwargs]]].encode, parameter[constant[utf8]]]
if call[name[isinstance], parameter[name[source], name[list]]] begin[:]
variable[frame] assign[=] tuple[[<ast.Starred object at 0x7da1b0e39870>, <ast.Constant object at 0x7da1b0e3ab90>, <ast.Call object at 0x7da1b0e3a950>, <ast.Name object at 0x7da1b0e386a0>, <ast.Name object at 0x7da1b0e397b0>]]
if name[self]._run_control_loop begin[:]
call[name[self].add_callback, parameter[name[self].command_socket.send_multipart, name[frame]]] | keyword[def] identifier[send_command_response] ( identifier[self] ,
identifier[source] : identifier[list] ,
identifier[command] : identifier[str] ,
* identifier[args] ,
** identifier[kwargs] ):
literal[string]
identifier[args] = identifier[_json] . identifier[dumps] ( identifier[args] ). identifier[encode] ( literal[string] )
identifier[kwargs] = identifier[_json] . identifier[dumps] ( identifier[kwargs] ). identifier[encode] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[source] , identifier[list] ):
identifier[frame] =(* identifier[source] , literal[string] , identifier[command] . identifier[encode] ( literal[string] ), identifier[args] , identifier[kwargs] )
keyword[else] :
identifier[frame] =( literal[string] , identifier[command] . identifier[encode] ( literal[string] ), identifier[args] , identifier[kwargs] )
keyword[if] identifier[self] . identifier[_run_control_loop] :
identifier[self] . identifier[add_callback] ( identifier[self] . identifier[command_socket] . identifier[send_multipart] , identifier[frame] )
keyword[else] :
identifier[self] . identifier[command_socket] . identifier[send_multipart] ( identifier[frame] ) | def send_command_response(self, source: list, command: str, *args, **kwargs):
"""
Used in bot observer `on_next` method
"""
args = _json.dumps(args).encode('utf8')
kwargs = _json.dumps(kwargs).encode('utf8')
if isinstance(source, list):
frame = (*source, b'', command.encode('utf8'), args, kwargs) # depends on [control=['if'], data=[]]
else:
frame = (b'', command.encode('utf8'), args, kwargs)
if self._run_control_loop:
self.add_callback(self.command_socket.send_multipart, frame) # depends on [control=['if'], data=[]]
else:
self.command_socket.send_multipart(frame) |
def check_event_loop():
"""
Check if event loop is closed and
create a new event loop
"""
loop = asyncio.get_event_loop()
if loop.is_closed():
asyncio.set_event_loop(asyncio.new_event_loop()) | def function[check_event_loop, parameter[]]:
constant[
Check if event loop is closed and
create a new event loop
]
variable[loop] assign[=] call[name[asyncio].get_event_loop, parameter[]]
if call[name[loop].is_closed, parameter[]] begin[:]
call[name[asyncio].set_event_loop, parameter[call[name[asyncio].new_event_loop, parameter[]]]] | keyword[def] identifier[check_event_loop] ():
literal[string]
identifier[loop] = identifier[asyncio] . identifier[get_event_loop] ()
keyword[if] identifier[loop] . identifier[is_closed] ():
identifier[asyncio] . identifier[set_event_loop] ( identifier[asyncio] . identifier[new_event_loop] ()) | def check_event_loop():
"""
Check if event loop is closed and
create a new event loop
"""
loop = asyncio.get_event_loop()
if loop.is_closed():
asyncio.set_event_loop(asyncio.new_event_loop()) # depends on [control=['if'], data=[]] |
def rsp_process(rsp, sampling_rate=1000):
"""
Automated processing of RSP signals.
Parameters
----------
rsp : list or array
Respiratory (RSP) signal array.
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
processed_rsp : dict
Dict containing processed RSP features.
Contains the RSP raw signal, the filtered signal, the respiratory cycles onsets, and respiratory phases (inspirations and expirations).
Example
----------
>>> import neurokit as nk
>>>
>>> processed_rsp = nk.rsp_process(rsp_signal)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- biosppy
- numpy
- pandas
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
"""
processed_rsp = {"df": pd.DataFrame({"RSP_Raw": np.array(rsp)})}
biosppy_rsp = dict(biosppy.signals.resp.resp(rsp, sampling_rate=sampling_rate, show=False))
processed_rsp["df"]["RSP_Filtered"] = biosppy_rsp["filtered"]
# RSP Rate
# ============
rsp_rate = biosppy_rsp["resp_rate"]*60 # Get RSP rate value (in cycles per minute)
rsp_times = biosppy_rsp["resp_rate_ts"] # the time (in sec) of each rsp rate value
rsp_times = np.round(rsp_times*sampling_rate).astype(int) # Convert to timepoints
try:
rsp_rate = interpolate(rsp_rate, rsp_times, sampling_rate) # Interpolation using 3rd order spline
processed_rsp["df"]["RSP_Rate"] = rsp_rate
except TypeError:
print("NeuroKit Warning: rsp_process(): Sequence too short to compute respiratory rate.")
processed_rsp["df"]["RSP_Rate"] = np.nan
# RSP Cycles
# ===========================
rsp_cycles = rsp_find_cycles(biosppy_rsp["filtered"])
processed_rsp["df"]["RSP_Inspiration"] = rsp_cycles["RSP_Inspiration"]
processed_rsp["RSP"] = {}
processed_rsp["RSP"]["Cycles_Onsets"] = rsp_cycles["RSP_Cycles_Onsets"]
processed_rsp["RSP"]["Expiration_Onsets"] = rsp_cycles["RSP_Expiration_Onsets"]
processed_rsp["RSP"]["Cycles_Length"] = rsp_cycles["RSP_Cycles_Length"]/sampling_rate
# RSP Variability
# ===========================
rsp_diff = processed_rsp["RSP"]["Cycles_Length"]
processed_rsp["RSP"]["Respiratory_Variability"] = {}
processed_rsp["RSP"]["Respiratory_Variability"]["RSPV_SD"] = np.std(rsp_diff)
processed_rsp["RSP"]["Respiratory_Variability"]["RSPV_RMSSD"] = np.sqrt(np.mean(rsp_diff ** 2))
processed_rsp["RSP"]["Respiratory_Variability"]["RSPV_RMSSD_Log"] = np.log(processed_rsp["RSP"]["Respiratory_Variability"]["RSPV_RMSSD"])
return(processed_rsp) | def function[rsp_process, parameter[rsp, sampling_rate]]:
constant[
Automated processing of RSP signals.
Parameters
----------
rsp : list or array
Respiratory (RSP) signal array.
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
processed_rsp : dict
Dict containing processed RSP features.
Contains the RSP raw signal, the filtered signal, the respiratory cycles onsets, and respiratory phases (inspirations and expirations).
Example
----------
>>> import neurokit as nk
>>>
>>> processed_rsp = nk.rsp_process(rsp_signal)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- biosppy
- numpy
- pandas
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
]
variable[processed_rsp] assign[=] dictionary[[<ast.Constant object at 0x7da204345030>], [<ast.Call object at 0x7da204347580>]]
variable[biosppy_rsp] assign[=] call[name[dict], parameter[call[name[biosppy].signals.resp.resp, parameter[name[rsp]]]]]
call[call[name[processed_rsp]][constant[df]]][constant[RSP_Filtered]] assign[=] call[name[biosppy_rsp]][constant[filtered]]
variable[rsp_rate] assign[=] binary_operation[call[name[biosppy_rsp]][constant[resp_rate]] * constant[60]]
variable[rsp_times] assign[=] call[name[biosppy_rsp]][constant[resp_rate_ts]]
variable[rsp_times] assign[=] call[call[name[np].round, parameter[binary_operation[name[rsp_times] * name[sampling_rate]]]].astype, parameter[name[int]]]
<ast.Try object at 0x7da20e9b3f70>
variable[rsp_cycles] assign[=] call[name[rsp_find_cycles], parameter[call[name[biosppy_rsp]][constant[filtered]]]]
call[call[name[processed_rsp]][constant[df]]][constant[RSP_Inspiration]] assign[=] call[name[rsp_cycles]][constant[RSP_Inspiration]]
call[name[processed_rsp]][constant[RSP]] assign[=] dictionary[[], []]
call[call[name[processed_rsp]][constant[RSP]]][constant[Cycles_Onsets]] assign[=] call[name[rsp_cycles]][constant[RSP_Cycles_Onsets]]
call[call[name[processed_rsp]][constant[RSP]]][constant[Expiration_Onsets]] assign[=] call[name[rsp_cycles]][constant[RSP_Expiration_Onsets]]
call[call[name[processed_rsp]][constant[RSP]]][constant[Cycles_Length]] assign[=] binary_operation[call[name[rsp_cycles]][constant[RSP_Cycles_Length]] / name[sampling_rate]]
variable[rsp_diff] assign[=] call[call[name[processed_rsp]][constant[RSP]]][constant[Cycles_Length]]
call[call[name[processed_rsp]][constant[RSP]]][constant[Respiratory_Variability]] assign[=] dictionary[[], []]
call[call[call[name[processed_rsp]][constant[RSP]]][constant[Respiratory_Variability]]][constant[RSPV_SD]] assign[=] call[name[np].std, parameter[name[rsp_diff]]]
call[call[call[name[processed_rsp]][constant[RSP]]][constant[Respiratory_Variability]]][constant[RSPV_RMSSD]] assign[=] call[name[np].sqrt, parameter[call[name[np].mean, parameter[binary_operation[name[rsp_diff] ** constant[2]]]]]]
call[call[call[name[processed_rsp]][constant[RSP]]][constant[Respiratory_Variability]]][constant[RSPV_RMSSD_Log]] assign[=] call[name[np].log, parameter[call[call[call[name[processed_rsp]][constant[RSP]]][constant[Respiratory_Variability]]][constant[RSPV_RMSSD]]]]
return[name[processed_rsp]] | keyword[def] identifier[rsp_process] ( identifier[rsp] , identifier[sampling_rate] = literal[int] ):
literal[string]
identifier[processed_rsp] ={ literal[string] : identifier[pd] . identifier[DataFrame] ({ literal[string] : identifier[np] . identifier[array] ( identifier[rsp] )})}
identifier[biosppy_rsp] = identifier[dict] ( identifier[biosppy] . identifier[signals] . identifier[resp] . identifier[resp] ( identifier[rsp] , identifier[sampling_rate] = identifier[sampling_rate] , identifier[show] = keyword[False] ))
identifier[processed_rsp] [ literal[string] ][ literal[string] ]= identifier[biosppy_rsp] [ literal[string] ]
identifier[rsp_rate] = identifier[biosppy_rsp] [ literal[string] ]* literal[int]
identifier[rsp_times] = identifier[biosppy_rsp] [ literal[string] ]
identifier[rsp_times] = identifier[np] . identifier[round] ( identifier[rsp_times] * identifier[sampling_rate] ). identifier[astype] ( identifier[int] )
keyword[try] :
identifier[rsp_rate] = identifier[interpolate] ( identifier[rsp_rate] , identifier[rsp_times] , identifier[sampling_rate] )
identifier[processed_rsp] [ literal[string] ][ literal[string] ]= identifier[rsp_rate]
keyword[except] identifier[TypeError] :
identifier[print] ( literal[string] )
identifier[processed_rsp] [ literal[string] ][ literal[string] ]= identifier[np] . identifier[nan]
identifier[rsp_cycles] = identifier[rsp_find_cycles] ( identifier[biosppy_rsp] [ literal[string] ])
identifier[processed_rsp] [ literal[string] ][ literal[string] ]= identifier[rsp_cycles] [ literal[string] ]
identifier[processed_rsp] [ literal[string] ]={}
identifier[processed_rsp] [ literal[string] ][ literal[string] ]= identifier[rsp_cycles] [ literal[string] ]
identifier[processed_rsp] [ literal[string] ][ literal[string] ]= identifier[rsp_cycles] [ literal[string] ]
identifier[processed_rsp] [ literal[string] ][ literal[string] ]= identifier[rsp_cycles] [ literal[string] ]/ identifier[sampling_rate]
identifier[rsp_diff] = identifier[processed_rsp] [ literal[string] ][ literal[string] ]
identifier[processed_rsp] [ literal[string] ][ literal[string] ]={}
identifier[processed_rsp] [ literal[string] ][ literal[string] ][ literal[string] ]= identifier[np] . identifier[std] ( identifier[rsp_diff] )
identifier[processed_rsp] [ literal[string] ][ literal[string] ][ literal[string] ]= identifier[np] . identifier[sqrt] ( identifier[np] . identifier[mean] ( identifier[rsp_diff] ** literal[int] ))
identifier[processed_rsp] [ literal[string] ][ literal[string] ][ literal[string] ]= identifier[np] . identifier[log] ( identifier[processed_rsp] [ literal[string] ][ literal[string] ][ literal[string] ])
keyword[return] ( identifier[processed_rsp] ) | def rsp_process(rsp, sampling_rate=1000):
"""
Automated processing of RSP signals.
Parameters
----------
rsp : list or array
Respiratory (RSP) signal array.
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
processed_rsp : dict
Dict containing processed RSP features.
Contains the RSP raw signal, the filtered signal, the respiratory cycles onsets, and respiratory phases (inspirations and expirations).
Example
----------
>>> import neurokit as nk
>>>
>>> processed_rsp = nk.rsp_process(rsp_signal)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- biosppy
- numpy
- pandas
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
"""
processed_rsp = {'df': pd.DataFrame({'RSP_Raw': np.array(rsp)})}
biosppy_rsp = dict(biosppy.signals.resp.resp(rsp, sampling_rate=sampling_rate, show=False))
processed_rsp['df']['RSP_Filtered'] = biosppy_rsp['filtered']
# RSP Rate
# ============
rsp_rate = biosppy_rsp['resp_rate'] * 60 # Get RSP rate value (in cycles per minute)
rsp_times = biosppy_rsp['resp_rate_ts'] # the time (in sec) of each rsp rate value
rsp_times = np.round(rsp_times * sampling_rate).astype(int) # Convert to timepoints
try:
rsp_rate = interpolate(rsp_rate, rsp_times, sampling_rate) # Interpolation using 3rd order spline
processed_rsp['df']['RSP_Rate'] = rsp_rate # depends on [control=['try'], data=[]]
except TypeError:
print('NeuroKit Warning: rsp_process(): Sequence too short to compute respiratory rate.')
processed_rsp['df']['RSP_Rate'] = np.nan # depends on [control=['except'], data=[]]
# RSP Cycles
# ===========================
rsp_cycles = rsp_find_cycles(biosppy_rsp['filtered'])
processed_rsp['df']['RSP_Inspiration'] = rsp_cycles['RSP_Inspiration']
processed_rsp['RSP'] = {}
processed_rsp['RSP']['Cycles_Onsets'] = rsp_cycles['RSP_Cycles_Onsets']
processed_rsp['RSP']['Expiration_Onsets'] = rsp_cycles['RSP_Expiration_Onsets']
processed_rsp['RSP']['Cycles_Length'] = rsp_cycles['RSP_Cycles_Length'] / sampling_rate
# RSP Variability
# ===========================
rsp_diff = processed_rsp['RSP']['Cycles_Length']
processed_rsp['RSP']['Respiratory_Variability'] = {}
processed_rsp['RSP']['Respiratory_Variability']['RSPV_SD'] = np.std(rsp_diff)
processed_rsp['RSP']['Respiratory_Variability']['RSPV_RMSSD'] = np.sqrt(np.mean(rsp_diff ** 2))
processed_rsp['RSP']['Respiratory_Variability']['RSPV_RMSSD_Log'] = np.log(processed_rsp['RSP']['Respiratory_Variability']['RSPV_RMSSD'])
return processed_rsp |
def _dfs_edges(graph, source, max_steps=None):
"""
Perform a depth-first search on the given DiGraph, with a limit on maximum steps.
:param networkx.DiGraph graph: The graph to traverse.
:param Any source: The source to begin traversal.
:param int max_steps: Maximum steps of the traversal, or None if not limiting steps.
:return: An iterator of edges.
"""
if max_steps is None:
yield networkx.dfs_edges(graph, source)
else:
steps_map = defaultdict(int)
traversed = { source }
stack = [ source ]
while stack:
src = stack.pop()
for dst in graph.successors(src):
if dst in traversed:
continue
traversed.add(dst)
dst_steps = max(steps_map[src] + 1, steps_map[dst])
if dst_steps > max_steps:
continue
yield src, dst
steps_map[dst] = dst_steps
stack.append(dst) | def function[_dfs_edges, parameter[graph, source, max_steps]]:
constant[
Perform a depth-first search on the given DiGraph, with a limit on maximum steps.
:param networkx.DiGraph graph: The graph to traverse.
:param Any source: The source to begin traversal.
:param int max_steps: Maximum steps of the traversal, or None if not limiting steps.
:return: An iterator of edges.
]
if compare[name[max_steps] is constant[None]] begin[:]
<ast.Yield object at 0x7da20c795000> | keyword[def] identifier[_dfs_edges] ( identifier[graph] , identifier[source] , identifier[max_steps] = keyword[None] ):
literal[string]
keyword[if] identifier[max_steps] keyword[is] keyword[None] :
keyword[yield] identifier[networkx] . identifier[dfs_edges] ( identifier[graph] , identifier[source] )
keyword[else] :
identifier[steps_map] = identifier[defaultdict] ( identifier[int] )
identifier[traversed] ={ identifier[source] }
identifier[stack] =[ identifier[source] ]
keyword[while] identifier[stack] :
identifier[src] = identifier[stack] . identifier[pop] ()
keyword[for] identifier[dst] keyword[in] identifier[graph] . identifier[successors] ( identifier[src] ):
keyword[if] identifier[dst] keyword[in] identifier[traversed] :
keyword[continue]
identifier[traversed] . identifier[add] ( identifier[dst] )
identifier[dst_steps] = identifier[max] ( identifier[steps_map] [ identifier[src] ]+ literal[int] , identifier[steps_map] [ identifier[dst] ])
keyword[if] identifier[dst_steps] > identifier[max_steps] :
keyword[continue]
keyword[yield] identifier[src] , identifier[dst]
identifier[steps_map] [ identifier[dst] ]= identifier[dst_steps]
identifier[stack] . identifier[append] ( identifier[dst] ) | def _dfs_edges(graph, source, max_steps=None):
"""
Perform a depth-first search on the given DiGraph, with a limit on maximum steps.
:param networkx.DiGraph graph: The graph to traverse.
:param Any source: The source to begin traversal.
:param int max_steps: Maximum steps of the traversal, or None if not limiting steps.
:return: An iterator of edges.
"""
if max_steps is None:
yield networkx.dfs_edges(graph, source) # depends on [control=['if'], data=[]]
else:
steps_map = defaultdict(int)
traversed = {source}
stack = [source]
while stack:
src = stack.pop()
for dst in graph.successors(src):
if dst in traversed:
continue # depends on [control=['if'], data=[]]
traversed.add(dst)
dst_steps = max(steps_map[src] + 1, steps_map[dst])
if dst_steps > max_steps:
continue # depends on [control=['if'], data=[]]
yield (src, dst)
steps_map[dst] = dst_steps
stack.append(dst) # depends on [control=['for'], data=['dst']] # depends on [control=['while'], data=[]] |
def save_figures(block, block_vars, gallery_conf):
"""Save all open figures of the example code-block.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
Returns
-------
images_rst : str
rst code to embed the images in the document.
"""
image_path_iterator = block_vars['image_path_iterator']
all_rst = u''
prev_count = len(image_path_iterator)
for scraper in gallery_conf['image_scrapers']:
rst = scraper(block, block_vars, gallery_conf)
if not isinstance(rst, basestring):
raise TypeError('rst from scraper %r was not a string, '
'got type %s:\n%r'
% (scraper, type(rst), rst))
n_new = len(image_path_iterator) - prev_count
for ii in range(n_new):
current_path, _ = _find_image_ext(
image_path_iterator.paths[prev_count + ii])
if not os.path.isfile(current_path):
raise RuntimeError('Scraper %s did not produce expected image:'
'\n%s' % (scraper, current_path))
all_rst += rst
return all_rst | def function[save_figures, parameter[block, block_vars, gallery_conf]]:
constant[Save all open figures of the example code-block.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
Returns
-------
images_rst : str
rst code to embed the images in the document.
]
variable[image_path_iterator] assign[=] call[name[block_vars]][constant[image_path_iterator]]
variable[all_rst] assign[=] constant[]
variable[prev_count] assign[=] call[name[len], parameter[name[image_path_iterator]]]
for taget[name[scraper]] in starred[call[name[gallery_conf]][constant[image_scrapers]]] begin[:]
variable[rst] assign[=] call[name[scraper], parameter[name[block], name[block_vars], name[gallery_conf]]]
if <ast.UnaryOp object at 0x7da1b26ac160> begin[:]
<ast.Raise object at 0x7da18f00ea40>
variable[n_new] assign[=] binary_operation[call[name[len], parameter[name[image_path_iterator]]] - name[prev_count]]
for taget[name[ii]] in starred[call[name[range], parameter[name[n_new]]]] begin[:]
<ast.Tuple object at 0x7da18eb56e00> assign[=] call[name[_find_image_ext], parameter[call[name[image_path_iterator].paths][binary_operation[name[prev_count] + name[ii]]]]]
if <ast.UnaryOp object at 0x7da18eb54dc0> begin[:]
<ast.Raise object at 0x7da18eb540d0>
<ast.AugAssign object at 0x7da18eb55ab0>
return[name[all_rst]] | keyword[def] identifier[save_figures] ( identifier[block] , identifier[block_vars] , identifier[gallery_conf] ):
literal[string]
identifier[image_path_iterator] = identifier[block_vars] [ literal[string] ]
identifier[all_rst] = literal[string]
identifier[prev_count] = identifier[len] ( identifier[image_path_iterator] )
keyword[for] identifier[scraper] keyword[in] identifier[gallery_conf] [ literal[string] ]:
identifier[rst] = identifier[scraper] ( identifier[block] , identifier[block_vars] , identifier[gallery_conf] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[rst] , identifier[basestring] ):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string]
%( identifier[scraper] , identifier[type] ( identifier[rst] ), identifier[rst] ))
identifier[n_new] = identifier[len] ( identifier[image_path_iterator] )- identifier[prev_count]
keyword[for] identifier[ii] keyword[in] identifier[range] ( identifier[n_new] ):
identifier[current_path] , identifier[_] = identifier[_find_image_ext] (
identifier[image_path_iterator] . identifier[paths] [ identifier[prev_count] + identifier[ii] ])
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[current_path] ):
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string] %( identifier[scraper] , identifier[current_path] ))
identifier[all_rst] += identifier[rst]
keyword[return] identifier[all_rst] | def save_figures(block, block_vars, gallery_conf):
"""Save all open figures of the example code-block.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
Returns
-------
images_rst : str
rst code to embed the images in the document.
"""
image_path_iterator = block_vars['image_path_iterator']
all_rst = u''
prev_count = len(image_path_iterator)
for scraper in gallery_conf['image_scrapers']:
rst = scraper(block, block_vars, gallery_conf)
if not isinstance(rst, basestring):
raise TypeError('rst from scraper %r was not a string, got type %s:\n%r' % (scraper, type(rst), rst)) # depends on [control=['if'], data=[]]
n_new = len(image_path_iterator) - prev_count
for ii in range(n_new):
(current_path, _) = _find_image_ext(image_path_iterator.paths[prev_count + ii])
if not os.path.isfile(current_path):
raise RuntimeError('Scraper %s did not produce expected image:\n%s' % (scraper, current_path)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ii']]
all_rst += rst # depends on [control=['for'], data=['scraper']]
return all_rst |
def Arrow(start=(0.,0.,0.), direction=(1.,0.,0.), tip_length=0.25,
tip_radius=0.1, shaft_radius=0.05, shaft_resolution=20):
"""
Create a vtk Arrow
Parameters
----------
start : np.ndarray
Start location in [x, y, z]
direction : list or np.ndarray
Direction the arrow points to in [x, y, z]
tip_length : float, optional
Length of the tip.
tip_radius : float, optional
Radius of the tip.
shaft_radius : float, optional
Radius of the shaft.
shaft_resolution : int, optional
Number of faces around the shaft
Returns
-------
arrow : vtki.PolyData
Arrow surface.
"""
# Create arrow object
arrow = vtk.vtkArrowSource()
arrow.SetTipLength(tip_length)
arrow.SetTipRadius(tip_radius)
arrow.SetShaftRadius(shaft_radius)
arrow.SetShaftResolution(shaft_resolution)
arrow.Update()
surf = PolyData(arrow.GetOutput())
translate(surf, start, direction)
return surf | def function[Arrow, parameter[start, direction, tip_length, tip_radius, shaft_radius, shaft_resolution]]:
constant[
Create a vtk Arrow
Parameters
----------
start : np.ndarray
Start location in [x, y, z]
direction : list or np.ndarray
Direction the arrow points to in [x, y, z]
tip_length : float, optional
Length of the tip.
tip_radius : float, optional
Radius of the tip.
shaft_radius : float, optional
Radius of the shaft.
shaft_resolution : int, optional
Number of faces around the shaft
Returns
-------
arrow : vtki.PolyData
Arrow surface.
]
variable[arrow] assign[=] call[name[vtk].vtkArrowSource, parameter[]]
call[name[arrow].SetTipLength, parameter[name[tip_length]]]
call[name[arrow].SetTipRadius, parameter[name[tip_radius]]]
call[name[arrow].SetShaftRadius, parameter[name[shaft_radius]]]
call[name[arrow].SetShaftResolution, parameter[name[shaft_resolution]]]
call[name[arrow].Update, parameter[]]
variable[surf] assign[=] call[name[PolyData], parameter[call[name[arrow].GetOutput, parameter[]]]]
call[name[translate], parameter[name[surf], name[start], name[direction]]]
return[name[surf]] | keyword[def] identifier[Arrow] ( identifier[start] =( literal[int] , literal[int] , literal[int] ), identifier[direction] =( literal[int] , literal[int] , literal[int] ), identifier[tip_length] = literal[int] ,
identifier[tip_radius] = literal[int] , identifier[shaft_radius] = literal[int] , identifier[shaft_resolution] = literal[int] ):
literal[string]
identifier[arrow] = identifier[vtk] . identifier[vtkArrowSource] ()
identifier[arrow] . identifier[SetTipLength] ( identifier[tip_length] )
identifier[arrow] . identifier[SetTipRadius] ( identifier[tip_radius] )
identifier[arrow] . identifier[SetShaftRadius] ( identifier[shaft_radius] )
identifier[arrow] . identifier[SetShaftResolution] ( identifier[shaft_resolution] )
identifier[arrow] . identifier[Update] ()
identifier[surf] = identifier[PolyData] ( identifier[arrow] . identifier[GetOutput] ())
identifier[translate] ( identifier[surf] , identifier[start] , identifier[direction] )
keyword[return] identifier[surf] | def Arrow(start=(0.0, 0.0, 0.0), direction=(1.0, 0.0, 0.0), tip_length=0.25, tip_radius=0.1, shaft_radius=0.05, shaft_resolution=20):
"""
Create a vtk Arrow
Parameters
----------
start : np.ndarray
Start location in [x, y, z]
direction : list or np.ndarray
Direction the arrow points to in [x, y, z]
tip_length : float, optional
Length of the tip.
tip_radius : float, optional
Radius of the tip.
shaft_radius : float, optional
Radius of the shaft.
shaft_resolution : int, optional
Number of faces around the shaft
Returns
-------
arrow : vtki.PolyData
Arrow surface.
"""
# Create arrow object
arrow = vtk.vtkArrowSource()
arrow.SetTipLength(tip_length)
arrow.SetTipRadius(tip_radius)
arrow.SetShaftRadius(shaft_radius)
arrow.SetShaftResolution(shaft_resolution)
arrow.Update()
surf = PolyData(arrow.GetOutput())
translate(surf, start, direction)
return surf |
def create_instance(self, collection_or_class, attributes, lazy=False, call_hook=True, deserialize=True, db_loader=None):
"""
Creates an instance of a `Document` class corresponding to the given collection name or class.
:param collection_or_class: The name of the collection or a reference to the class for which to create an instance.
:param attributes: The attributes of the instance to be created
:param lazy: Whether to create a `lazy` object or not.
:returns: An instance of the requested Document class with the given attributes.
"""
creation_args = {
'backend' : self,
'autoload' : self._autoload_embedded,
'lazy' : lazy,
'db_loader' : db_loader
}
if collection_or_class in self.classes:
cls = collection_or_class
elif collection_or_class in self.collections:
cls = self.collections[collection_or_class]
else:
raise AttributeError("Unknown collection or class: %s!" % str(collection_or_class))
#we deserialize the attributes that we receive
if deserialize:
deserialized_attributes = self.deserialize(attributes, create_instance=False)
else:
deserialized_attributes = attributes
if 'constructor' in self.classes[cls]:
obj = self.classes[cls]['constructor'](deserialized_attributes, **creation_args)
else:
obj = cls(deserialized_attributes, **creation_args)
if call_hook:
self.call_hook('after_load',obj)
return obj | def function[create_instance, parameter[self, collection_or_class, attributes, lazy, call_hook, deserialize, db_loader]]:
constant[
Creates an instance of a `Document` class corresponding to the given collection name or class.
:param collection_or_class: The name of the collection or a reference to the class for which to create an instance.
:param attributes: The attributes of the instance to be created
:param lazy: Whether to create a `lazy` object or not.
:returns: An instance of the requested Document class with the given attributes.
]
variable[creation_args] assign[=] dictionary[[<ast.Constant object at 0x7da1b190ffa0>, <ast.Constant object at 0x7da1b190cd60>, <ast.Constant object at 0x7da1b190f5e0>, <ast.Constant object at 0x7da1b190db40>], [<ast.Name object at 0x7da1b190dc30>, <ast.Attribute object at 0x7da1b190dc90>, <ast.Name object at 0x7da1b190dbd0>, <ast.Name object at 0x7da1b190ceb0>]]
if compare[name[collection_or_class] in name[self].classes] begin[:]
variable[cls] assign[=] name[collection_or_class]
if name[deserialize] begin[:]
variable[deserialized_attributes] assign[=] call[name[self].deserialize, parameter[name[attributes]]]
if compare[constant[constructor] in call[name[self].classes][name[cls]]] begin[:]
variable[obj] assign[=] call[call[call[name[self].classes][name[cls]]][constant[constructor]], parameter[name[deserialized_attributes]]]
if name[call_hook] begin[:]
call[name[self].call_hook, parameter[constant[after_load], name[obj]]]
return[name[obj]] | keyword[def] identifier[create_instance] ( identifier[self] , identifier[collection_or_class] , identifier[attributes] , identifier[lazy] = keyword[False] , identifier[call_hook] = keyword[True] , identifier[deserialize] = keyword[True] , identifier[db_loader] = keyword[None] ):
literal[string]
identifier[creation_args] ={
literal[string] : identifier[self] ,
literal[string] : identifier[self] . identifier[_autoload_embedded] ,
literal[string] : identifier[lazy] ,
literal[string] : identifier[db_loader]
}
keyword[if] identifier[collection_or_class] keyword[in] identifier[self] . identifier[classes] :
identifier[cls] = identifier[collection_or_class]
keyword[elif] identifier[collection_or_class] keyword[in] identifier[self] . identifier[collections] :
identifier[cls] = identifier[self] . identifier[collections] [ identifier[collection_or_class] ]
keyword[else] :
keyword[raise] identifier[AttributeError] ( literal[string] % identifier[str] ( identifier[collection_or_class] ))
keyword[if] identifier[deserialize] :
identifier[deserialized_attributes] = identifier[self] . identifier[deserialize] ( identifier[attributes] , identifier[create_instance] = keyword[False] )
keyword[else] :
identifier[deserialized_attributes] = identifier[attributes]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[classes] [ identifier[cls] ]:
identifier[obj] = identifier[self] . identifier[classes] [ identifier[cls] ][ literal[string] ]( identifier[deserialized_attributes] ,** identifier[creation_args] )
keyword[else] :
identifier[obj] = identifier[cls] ( identifier[deserialized_attributes] ,** identifier[creation_args] )
keyword[if] identifier[call_hook] :
identifier[self] . identifier[call_hook] ( literal[string] , identifier[obj] )
keyword[return] identifier[obj] | def create_instance(self, collection_or_class, attributes, lazy=False, call_hook=True, deserialize=True, db_loader=None):
"""
Creates an instance of a `Document` class corresponding to the given collection name or class.
:param collection_or_class: The name of the collection or a reference to the class for which to create an instance.
:param attributes: The attributes of the instance to be created
:param lazy: Whether to create a `lazy` object or not.
:returns: An instance of the requested Document class with the given attributes.
"""
creation_args = {'backend': self, 'autoload': self._autoload_embedded, 'lazy': lazy, 'db_loader': db_loader}
if collection_or_class in self.classes:
cls = collection_or_class # depends on [control=['if'], data=['collection_or_class']]
elif collection_or_class in self.collections:
cls = self.collections[collection_or_class] # depends on [control=['if'], data=['collection_or_class']]
else:
raise AttributeError('Unknown collection or class: %s!' % str(collection_or_class))
#we deserialize the attributes that we receive
if deserialize:
deserialized_attributes = self.deserialize(attributes, create_instance=False) # depends on [control=['if'], data=[]]
else:
deserialized_attributes = attributes
if 'constructor' in self.classes[cls]:
obj = self.classes[cls]['constructor'](deserialized_attributes, **creation_args) # depends on [control=['if'], data=[]]
else:
obj = cls(deserialized_attributes, **creation_args)
if call_hook:
self.call_hook('after_load', obj) # depends on [control=['if'], data=[]]
return obj |
def stop_traffic(self, *ports):
""" Stop traffic on list of ports.
:param ports: list of ports to stop traffic on. Default - all session ports.
"""
for chassis, chassis_ports in self._per_chassis_ports(*self._get_operation_ports(*ports)).items():
chassis.stop_traffic(*chassis_ports) | def function[stop_traffic, parameter[self]]:
constant[ Stop traffic on list of ports.
:param ports: list of ports to stop traffic on. Default - all session ports.
]
for taget[tuple[[<ast.Name object at 0x7da20c6a96f0>, <ast.Name object at 0x7da20c6abe50>]]] in starred[call[call[name[self]._per_chassis_ports, parameter[<ast.Starred object at 0x7da20c6a9060>]].items, parameter[]]] begin[:]
call[name[chassis].stop_traffic, parameter[<ast.Starred object at 0x7da20c6aa920>]] | keyword[def] identifier[stop_traffic] ( identifier[self] ,* identifier[ports] ):
literal[string]
keyword[for] identifier[chassis] , identifier[chassis_ports] keyword[in] identifier[self] . identifier[_per_chassis_ports] (* identifier[self] . identifier[_get_operation_ports] (* identifier[ports] )). identifier[items] ():
identifier[chassis] . identifier[stop_traffic] (* identifier[chassis_ports] ) | def stop_traffic(self, *ports):
""" Stop traffic on list of ports.
:param ports: list of ports to stop traffic on. Default - all session ports.
"""
for (chassis, chassis_ports) in self._per_chassis_ports(*self._get_operation_ports(*ports)).items():
chassis.stop_traffic(*chassis_ports) # depends on [control=['for'], data=[]] |
def pad_to_same(self):
"""Pad shorter pianorolls with zeros at the end along the time axis to
make the resulting pianoroll lengths the same as the maximum pianoroll
length among all the tracks."""
max_length = self.get_max_length()
for track in self.tracks:
if track.pianoroll.shape[0] < max_length:
track.pad(max_length - track.pianoroll.shape[0]) | def function[pad_to_same, parameter[self]]:
constant[Pad shorter pianorolls with zeros at the end along the time axis to
make the resulting pianoroll lengths the same as the maximum pianoroll
length among all the tracks.]
variable[max_length] assign[=] call[name[self].get_max_length, parameter[]]
for taget[name[track]] in starred[name[self].tracks] begin[:]
if compare[call[name[track].pianoroll.shape][constant[0]] less[<] name[max_length]] begin[:]
call[name[track].pad, parameter[binary_operation[name[max_length] - call[name[track].pianoroll.shape][constant[0]]]]] | keyword[def] identifier[pad_to_same] ( identifier[self] ):
literal[string]
identifier[max_length] = identifier[self] . identifier[get_max_length] ()
keyword[for] identifier[track] keyword[in] identifier[self] . identifier[tracks] :
keyword[if] identifier[track] . identifier[pianoroll] . identifier[shape] [ literal[int] ]< identifier[max_length] :
identifier[track] . identifier[pad] ( identifier[max_length] - identifier[track] . identifier[pianoroll] . identifier[shape] [ literal[int] ]) | def pad_to_same(self):
"""Pad shorter pianorolls with zeros at the end along the time axis to
make the resulting pianoroll lengths the same as the maximum pianoroll
length among all the tracks."""
max_length = self.get_max_length()
for track in self.tracks:
if track.pianoroll.shape[0] < max_length:
track.pad(max_length - track.pianoroll.shape[0]) # depends on [control=['if'], data=['max_length']] # depends on [control=['for'], data=['track']] |
def add_homogeneous_model(self, magnitude, phase=0, frequency=None):
"""Add homogeneous models to one or all tomodirs. Register those as
forward models
Parameters
----------
magnitude : float
Value of homogeneous magnitude model
phase : float, optional
Value of homogeneous phase model. Default 0
frequency : float, optional
Frequency of of the tomodir to use. If None, then apply to all
tomodirs. Default is None.
"""
if frequency is None:
frequencies = self.frequencies
else:
assert isinstance(frequency, Number)
frequencies = [frequency, ]
for freq in frequencies:
pidm, pidp = self.tds[freq].add_homogeneous_model(magnitude, phase)
self.a['forward_rmag'][freq] = pidm
self.a['forward_rpha'][freq] = pidp | def function[add_homogeneous_model, parameter[self, magnitude, phase, frequency]]:
constant[Add homogeneous models to one or all tomodirs. Register those as
forward models
Parameters
----------
magnitude : float
Value of homogeneous magnitude model
phase : float, optional
Value of homogeneous phase model. Default 0
frequency : float, optional
Frequency of of the tomodir to use. If None, then apply to all
tomodirs. Default is None.
]
if compare[name[frequency] is constant[None]] begin[:]
variable[frequencies] assign[=] name[self].frequencies
for taget[name[freq]] in starred[name[frequencies]] begin[:]
<ast.Tuple object at 0x7da1b2347880> assign[=] call[call[name[self].tds][name[freq]].add_homogeneous_model, parameter[name[magnitude], name[phase]]]
call[call[name[self].a][constant[forward_rmag]]][name[freq]] assign[=] name[pidm]
call[call[name[self].a][constant[forward_rpha]]][name[freq]] assign[=] name[pidp] | keyword[def] identifier[add_homogeneous_model] ( identifier[self] , identifier[magnitude] , identifier[phase] = literal[int] , identifier[frequency] = keyword[None] ):
literal[string]
keyword[if] identifier[frequency] keyword[is] keyword[None] :
identifier[frequencies] = identifier[self] . identifier[frequencies]
keyword[else] :
keyword[assert] identifier[isinstance] ( identifier[frequency] , identifier[Number] )
identifier[frequencies] =[ identifier[frequency] ,]
keyword[for] identifier[freq] keyword[in] identifier[frequencies] :
identifier[pidm] , identifier[pidp] = identifier[self] . identifier[tds] [ identifier[freq] ]. identifier[add_homogeneous_model] ( identifier[magnitude] , identifier[phase] )
identifier[self] . identifier[a] [ literal[string] ][ identifier[freq] ]= identifier[pidm]
identifier[self] . identifier[a] [ literal[string] ][ identifier[freq] ]= identifier[pidp] | def add_homogeneous_model(self, magnitude, phase=0, frequency=None):
"""Add homogeneous models to one or all tomodirs. Register those as
forward models
Parameters
----------
magnitude : float
Value of homogeneous magnitude model
phase : float, optional
Value of homogeneous phase model. Default 0
frequency : float, optional
Frequency of of the tomodir to use. If None, then apply to all
tomodirs. Default is None.
"""
if frequency is None:
frequencies = self.frequencies # depends on [control=['if'], data=[]]
else:
assert isinstance(frequency, Number)
frequencies = [frequency]
for freq in frequencies:
(pidm, pidp) = self.tds[freq].add_homogeneous_model(magnitude, phase)
self.a['forward_rmag'][freq] = pidm
self.a['forward_rpha'][freq] = pidp # depends on [control=['for'], data=['freq']] |
def _get_minutes(self, duration):
"""
Calculate the number of minutes with the given duration.
:param duration: The duration
:type duration: int or datetime
:rtype: int or None
"""
if isinstance(duration, datetime.datetime):
from_now = (duration - datetime.datetime.now()).total_seconds()
from_now = math.ceil(from_now / 60)
if from_now > 0:
return from_now
return
return duration | def function[_get_minutes, parameter[self, duration]]:
constant[
Calculate the number of minutes with the given duration.
:param duration: The duration
:type duration: int or datetime
:rtype: int or None
]
if call[name[isinstance], parameter[name[duration], name[datetime].datetime]] begin[:]
variable[from_now] assign[=] call[binary_operation[name[duration] - call[name[datetime].datetime.now, parameter[]]].total_seconds, parameter[]]
variable[from_now] assign[=] call[name[math].ceil, parameter[binary_operation[name[from_now] / constant[60]]]]
if compare[name[from_now] greater[>] constant[0]] begin[:]
return[name[from_now]]
return[None]
return[name[duration]] | keyword[def] identifier[_get_minutes] ( identifier[self] , identifier[duration] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[duration] , identifier[datetime] . identifier[datetime] ):
identifier[from_now] =( identifier[duration] - identifier[datetime] . identifier[datetime] . identifier[now] ()). identifier[total_seconds] ()
identifier[from_now] = identifier[math] . identifier[ceil] ( identifier[from_now] / literal[int] )
keyword[if] identifier[from_now] > literal[int] :
keyword[return] identifier[from_now]
keyword[return]
keyword[return] identifier[duration] | def _get_minutes(self, duration):
"""
Calculate the number of minutes with the given duration.
:param duration: The duration
:type duration: int or datetime
:rtype: int or None
"""
if isinstance(duration, datetime.datetime):
from_now = (duration - datetime.datetime.now()).total_seconds()
from_now = math.ceil(from_now / 60)
if from_now > 0:
return from_now # depends on [control=['if'], data=['from_now']]
return # depends on [control=['if'], data=[]]
return duration |
def chests_per_chunk(chunk):
"""Find chests and get contents in a given chunk."""
chests = []
for entity in chunk['Entities']:
eid = entity["id"].value
if eid == "Minecart" and entity["type"].value == 1 or eid == "minecraft:chest_minecart":
x,y,z = entity["Pos"]
x,y,z = x.value,y.value,z.value
# Treasures are empty upon first opening
try:
items = items_from_nbt(entity["Items"])
except KeyError:
items = {}
chests.append(Chest("Minecart with chest",(x,y,z),items))
for entity in chunk['TileEntities']:
eid = entity["id"].value
if eid == "Chest" or eid == "minecraft:chest":
x,y,z = entity["x"].value,entity["y"].value,entity["z"].value
# Treasures are empty upon first opening
try:
items = items_from_nbt(entity["Items"])
except KeyError:
items = {}
chests.append(Chest("Chest",(x,y,z),items))
return chests | def function[chests_per_chunk, parameter[chunk]]:
constant[Find chests and get contents in a given chunk.]
variable[chests] assign[=] list[[]]
for taget[name[entity]] in starred[call[name[chunk]][constant[Entities]]] begin[:]
variable[eid] assign[=] call[name[entity]][constant[id]].value
if <ast.BoolOp object at 0x7da18c4cef80> begin[:]
<ast.Tuple object at 0x7da18c4ce6b0> assign[=] call[name[entity]][constant[Pos]]
<ast.Tuple object at 0x7da18c4cd0c0> assign[=] tuple[[<ast.Attribute object at 0x7da18c4cd4b0>, <ast.Attribute object at 0x7da18c4cf370>, <ast.Attribute object at 0x7da18c4cc580>]]
<ast.Try object at 0x7da18c4ce2c0>
call[name[chests].append, parameter[call[name[Chest], parameter[constant[Minecart with chest], tuple[[<ast.Name object at 0x7da18c4cf610>, <ast.Name object at 0x7da18c4cc880>, <ast.Name object at 0x7da18c4cd150>]], name[items]]]]]
for taget[name[entity]] in starred[call[name[chunk]][constant[TileEntities]]] begin[:]
variable[eid] assign[=] call[name[entity]][constant[id]].value
if <ast.BoolOp object at 0x7da18c4ceda0> begin[:]
<ast.Tuple object at 0x7da18c4cc220> assign[=] tuple[[<ast.Attribute object at 0x7da18c4cc0a0>, <ast.Attribute object at 0x7da18c4cdf30>, <ast.Attribute object at 0x7da18c4ce080>]]
<ast.Try object at 0x7da18c4cdb70>
call[name[chests].append, parameter[call[name[Chest], parameter[constant[Chest], tuple[[<ast.Name object at 0x7da18eb55630>, <ast.Name object at 0x7da18eb54190>, <ast.Name object at 0x7da18eb57a30>]], name[items]]]]]
return[name[chests]] | keyword[def] identifier[chests_per_chunk] ( identifier[chunk] ):
literal[string]
identifier[chests] =[]
keyword[for] identifier[entity] keyword[in] identifier[chunk] [ literal[string] ]:
identifier[eid] = identifier[entity] [ literal[string] ]. identifier[value]
keyword[if] identifier[eid] == literal[string] keyword[and] identifier[entity] [ literal[string] ]. identifier[value] == literal[int] keyword[or] identifier[eid] == literal[string] :
identifier[x] , identifier[y] , identifier[z] = identifier[entity] [ literal[string] ]
identifier[x] , identifier[y] , identifier[z] = identifier[x] . identifier[value] , identifier[y] . identifier[value] , identifier[z] . identifier[value]
keyword[try] :
identifier[items] = identifier[items_from_nbt] ( identifier[entity] [ literal[string] ])
keyword[except] identifier[KeyError] :
identifier[items] ={}
identifier[chests] . identifier[append] ( identifier[Chest] ( literal[string] ,( identifier[x] , identifier[y] , identifier[z] ), identifier[items] ))
keyword[for] identifier[entity] keyword[in] identifier[chunk] [ literal[string] ]:
identifier[eid] = identifier[entity] [ literal[string] ]. identifier[value]
keyword[if] identifier[eid] == literal[string] keyword[or] identifier[eid] == literal[string] :
identifier[x] , identifier[y] , identifier[z] = identifier[entity] [ literal[string] ]. identifier[value] , identifier[entity] [ literal[string] ]. identifier[value] , identifier[entity] [ literal[string] ]. identifier[value]
keyword[try] :
identifier[items] = identifier[items_from_nbt] ( identifier[entity] [ literal[string] ])
keyword[except] identifier[KeyError] :
identifier[items] ={}
identifier[chests] . identifier[append] ( identifier[Chest] ( literal[string] ,( identifier[x] , identifier[y] , identifier[z] ), identifier[items] ))
keyword[return] identifier[chests] | def chests_per_chunk(chunk):
"""Find chests and get contents in a given chunk."""
chests = []
for entity in chunk['Entities']:
eid = entity['id'].value
if eid == 'Minecart' and entity['type'].value == 1 or eid == 'minecraft:chest_minecart':
(x, y, z) = entity['Pos']
(x, y, z) = (x.value, y.value, z.value)
# Treasures are empty upon first opening
try:
items = items_from_nbt(entity['Items']) # depends on [control=['try'], data=[]]
except KeyError:
items = {} # depends on [control=['except'], data=[]]
chests.append(Chest('Minecart with chest', (x, y, z), items)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entity']]
for entity in chunk['TileEntities']:
eid = entity['id'].value
if eid == 'Chest' or eid == 'minecraft:chest':
(x, y, z) = (entity['x'].value, entity['y'].value, entity['z'].value)
# Treasures are empty upon first opening
try:
items = items_from_nbt(entity['Items']) # depends on [control=['try'], data=[]]
except KeyError:
items = {} # depends on [control=['except'], data=[]]
chests.append(Chest('Chest', (x, y, z), items)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entity']]
return chests |
def delete_records(server, token, data):
"""Delete records of specific domain.
Arguments:
server: TonicDNS API server
token: TonicDNS API authentication token
data: Delete records
ContentType: application/json
x-authentication-token: token
"""
method = 'DELETE'
uri = 'https://' + server + '/zone'
for i in data:
connect.tonicdns_client(uri, method, token, i) | def function[delete_records, parameter[server, token, data]]:
constant[Delete records of specific domain.
Arguments:
server: TonicDNS API server
token: TonicDNS API authentication token
data: Delete records
ContentType: application/json
x-authentication-token: token
]
variable[method] assign[=] constant[DELETE]
variable[uri] assign[=] binary_operation[binary_operation[constant[https://] + name[server]] + constant[/zone]]
for taget[name[i]] in starred[name[data]] begin[:]
call[name[connect].tonicdns_client, parameter[name[uri], name[method], name[token], name[i]]] | keyword[def] identifier[delete_records] ( identifier[server] , identifier[token] , identifier[data] ):
literal[string]
identifier[method] = literal[string]
identifier[uri] = literal[string] + identifier[server] + literal[string]
keyword[for] identifier[i] keyword[in] identifier[data] :
identifier[connect] . identifier[tonicdns_client] ( identifier[uri] , identifier[method] , identifier[token] , identifier[i] ) | def delete_records(server, token, data):
"""Delete records of specific domain.
Arguments:
server: TonicDNS API server
token: TonicDNS API authentication token
data: Delete records
ContentType: application/json
x-authentication-token: token
"""
method = 'DELETE'
uri = 'https://' + server + '/zone'
for i in data:
connect.tonicdns_client(uri, method, token, i) # depends on [control=['for'], data=['i']] |
def classmethod(self, encoding):
"""Function decorator for class methods."""
# Add encodings for hidden self and cmd arguments.
encoding = ensure_bytes(encoding)
typecodes = parse_type_encoding(encoding)
typecodes.insert(1, b'@:')
encoding = b''.join(typecodes)
def decorator(f):
def objc_class_method(objc_cls, objc_cmd, *args):
py_cls = ObjCClass(objc_cls)
py_cls.objc_cmd = objc_cmd
args = convert_method_arguments(encoding, args)
result = f(py_cls, *args)
if isinstance(result, ObjCClass):
result = result.ptr.value
elif isinstance(result, ObjCInstance):
result = result.ptr.value
return result
name = f.__name__.replace('_', ':')
self.add_class_method(objc_class_method, name, encoding)
return objc_class_method
return decorator | def function[classmethod, parameter[self, encoding]]:
constant[Function decorator for class methods.]
variable[encoding] assign[=] call[name[ensure_bytes], parameter[name[encoding]]]
variable[typecodes] assign[=] call[name[parse_type_encoding], parameter[name[encoding]]]
call[name[typecodes].insert, parameter[constant[1], constant[b'@:']]]
variable[encoding] assign[=] call[constant[b''].join, parameter[name[typecodes]]]
def function[decorator, parameter[f]]:
def function[objc_class_method, parameter[objc_cls, objc_cmd]]:
variable[py_cls] assign[=] call[name[ObjCClass], parameter[name[objc_cls]]]
name[py_cls].objc_cmd assign[=] name[objc_cmd]
variable[args] assign[=] call[name[convert_method_arguments], parameter[name[encoding], name[args]]]
variable[result] assign[=] call[name[f], parameter[name[py_cls], <ast.Starred object at 0x7da1b0ea0070>]]
if call[name[isinstance], parameter[name[result], name[ObjCClass]]] begin[:]
variable[result] assign[=] name[result].ptr.value
return[name[result]]
variable[name] assign[=] call[name[f].__name__.replace, parameter[constant[_], constant[:]]]
call[name[self].add_class_method, parameter[name[objc_class_method], name[name], name[encoding]]]
return[name[objc_class_method]]
return[name[decorator]] | keyword[def] identifier[classmethod] ( identifier[self] , identifier[encoding] ):
literal[string]
identifier[encoding] = identifier[ensure_bytes] ( identifier[encoding] )
identifier[typecodes] = identifier[parse_type_encoding] ( identifier[encoding] )
identifier[typecodes] . identifier[insert] ( literal[int] , literal[string] )
identifier[encoding] = literal[string] . identifier[join] ( identifier[typecodes] )
keyword[def] identifier[decorator] ( identifier[f] ):
keyword[def] identifier[objc_class_method] ( identifier[objc_cls] , identifier[objc_cmd] ,* identifier[args] ):
identifier[py_cls] = identifier[ObjCClass] ( identifier[objc_cls] )
identifier[py_cls] . identifier[objc_cmd] = identifier[objc_cmd]
identifier[args] = identifier[convert_method_arguments] ( identifier[encoding] , identifier[args] )
identifier[result] = identifier[f] ( identifier[py_cls] ,* identifier[args] )
keyword[if] identifier[isinstance] ( identifier[result] , identifier[ObjCClass] ):
identifier[result] = identifier[result] . identifier[ptr] . identifier[value]
keyword[elif] identifier[isinstance] ( identifier[result] , identifier[ObjCInstance] ):
identifier[result] = identifier[result] . identifier[ptr] . identifier[value]
keyword[return] identifier[result]
identifier[name] = identifier[f] . identifier[__name__] . identifier[replace] ( literal[string] , literal[string] )
identifier[self] . identifier[add_class_method] ( identifier[objc_class_method] , identifier[name] , identifier[encoding] )
keyword[return] identifier[objc_class_method]
keyword[return] identifier[decorator] | def classmethod(self, encoding):
"""Function decorator for class methods."""
# Add encodings for hidden self and cmd arguments.
encoding = ensure_bytes(encoding)
typecodes = parse_type_encoding(encoding)
typecodes.insert(1, b'@:')
encoding = b''.join(typecodes)
def decorator(f):
def objc_class_method(objc_cls, objc_cmd, *args):
py_cls = ObjCClass(objc_cls)
py_cls.objc_cmd = objc_cmd
args = convert_method_arguments(encoding, args)
result = f(py_cls, *args)
if isinstance(result, ObjCClass):
result = result.ptr.value # depends on [control=['if'], data=[]]
elif isinstance(result, ObjCInstance):
result = result.ptr.value # depends on [control=['if'], data=[]]
return result
name = f.__name__.replace('_', ':')
self.add_class_method(objc_class_method, name, encoding)
return objc_class_method
return decorator |
def remove_legend(self):
""" Removes legend actor """
if hasattr(self, 'legend'):
self.remove_actor(self.legend, reset_camera=False)
self._render() | def function[remove_legend, parameter[self]]:
constant[ Removes legend actor ]
if call[name[hasattr], parameter[name[self], constant[legend]]] begin[:]
call[name[self].remove_actor, parameter[name[self].legend]]
call[name[self]._render, parameter[]] | keyword[def] identifier[remove_legend] ( identifier[self] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[remove_actor] ( identifier[self] . identifier[legend] , identifier[reset_camera] = keyword[False] )
identifier[self] . identifier[_render] () | def remove_legend(self):
""" Removes legend actor """
if hasattr(self, 'legend'):
self.remove_actor(self.legend, reset_camera=False)
self._render() # depends on [control=['if'], data=[]] |
def location_nearbystops(self, origin_coord_lat, origin_coord_long):
""" location.nearbystops """
response = self._request(
'location.nearbystops',
originCoordLat=origin_coord_lat,
originCoordLong=origin_coord_long)
return _get_node(response, 'LocationList', 'StopLocation') | def function[location_nearbystops, parameter[self, origin_coord_lat, origin_coord_long]]:
constant[ location.nearbystops ]
variable[response] assign[=] call[name[self]._request, parameter[constant[location.nearbystops]]]
return[call[name[_get_node], parameter[name[response], constant[LocationList], constant[StopLocation]]]] | keyword[def] identifier[location_nearbystops] ( identifier[self] , identifier[origin_coord_lat] , identifier[origin_coord_long] ):
literal[string]
identifier[response] = identifier[self] . identifier[_request] (
literal[string] ,
identifier[originCoordLat] = identifier[origin_coord_lat] ,
identifier[originCoordLong] = identifier[origin_coord_long] )
keyword[return] identifier[_get_node] ( identifier[response] , literal[string] , literal[string] ) | def location_nearbystops(self, origin_coord_lat, origin_coord_long):
""" location.nearbystops """
response = self._request('location.nearbystops', originCoordLat=origin_coord_lat, originCoordLong=origin_coord_long)
return _get_node(response, 'LocationList', 'StopLocation') |
def smooth_cuboid(space, min_pt=None, max_pt=None, axis=0):
"""Cuboid with smooth variations.
Parameters
----------
space : `DiscreteLp`
Discretized space in which the phantom is supposed to be created.
min_pt : array-like of shape ``(space.ndim,)``, optional
Lower left corner of the cuboid. If ``None`` is given, a quarter
of the extent from ``space.min_pt`` towards the inside is chosen.
max_pt : array-like of shape ``(space.ndim,)``, optional
Upper right corner of the cuboid. If ``None`` is given, ``min_pt``
plus half the extent is chosen.
axis : int or sequence of int
Dimension(s) along which the smooth variation should happen.
Returns
-------
phantom : ``space``-element
The generated cuboid phantom in ``space``. Values have range [0, 1].
"""
dom_min_pt = space.domain.min()
dom_max_pt = space.domain.max()
if min_pt is None:
min_pt = dom_min_pt * 0.75 + dom_max_pt * 0.25
if max_pt is None:
max_pt = dom_min_pt * 0.25 + dom_max_pt * 0.75
min_pt = np.atleast_1d(min_pt)
max_pt = np.atleast_1d(max_pt)
axis = np.array(axis, dtype=int, ndmin=1)
if min_pt.shape != (space.ndim,):
raise ValueError('shape of `min_pt` must be {}, got {}'
''.format((space.ndim,), min_pt.shape))
if max_pt.shape != (space.ndim,):
raise ValueError('shape of `max_pt` must be {}, got {}'
''.format((space.ndim,), max_pt.shape))
sign = 0
for i, coord in enumerate(space.meshgrid):
sign = sign | (coord < min_pt[i]) | (coord > max_pt[i])
values = 0
for i in axis:
coord = space.meshgrid[i]
extent = (dom_max_pt[i] - dom_min_pt[i])
values = values + 2 * (coord - dom_min_pt[i]) / extent - 1
# Properly scale using sign
sign = (3 * sign - 2) / axis.size
# Fit in [0, 1]
values = values * sign
values = (values - np.min(values)) / (np.max(values) - np.min(values))
return space.element(values) | def function[smooth_cuboid, parameter[space, min_pt, max_pt, axis]]:
constant[Cuboid with smooth variations.
Parameters
----------
space : `DiscreteLp`
Discretized space in which the phantom is supposed to be created.
min_pt : array-like of shape ``(space.ndim,)``, optional
Lower left corner of the cuboid. If ``None`` is given, a quarter
of the extent from ``space.min_pt`` towards the inside is chosen.
max_pt : array-like of shape ``(space.ndim,)``, optional
Upper right corner of the cuboid. If ``None`` is given, ``min_pt``
plus half the extent is chosen.
axis : int or sequence of int
Dimension(s) along which the smooth variation should happen.
Returns
-------
phantom : ``space``-element
The generated cuboid phantom in ``space``. Values have range [0, 1].
]
variable[dom_min_pt] assign[=] call[name[space].domain.min, parameter[]]
variable[dom_max_pt] assign[=] call[name[space].domain.max, parameter[]]
if compare[name[min_pt] is constant[None]] begin[:]
variable[min_pt] assign[=] binary_operation[binary_operation[name[dom_min_pt] * constant[0.75]] + binary_operation[name[dom_max_pt] * constant[0.25]]]
if compare[name[max_pt] is constant[None]] begin[:]
variable[max_pt] assign[=] binary_operation[binary_operation[name[dom_min_pt] * constant[0.25]] + binary_operation[name[dom_max_pt] * constant[0.75]]]
variable[min_pt] assign[=] call[name[np].atleast_1d, parameter[name[min_pt]]]
variable[max_pt] assign[=] call[name[np].atleast_1d, parameter[name[max_pt]]]
variable[axis] assign[=] call[name[np].array, parameter[name[axis]]]
if compare[name[min_pt].shape not_equal[!=] tuple[[<ast.Attribute object at 0x7da1b1e97040>]]] begin[:]
<ast.Raise object at 0x7da1b1e97010>
if compare[name[max_pt].shape not_equal[!=] tuple[[<ast.Attribute object at 0x7da1b1e96d10>]]] begin[:]
<ast.Raise object at 0x7da1b1e96dd0>
variable[sign] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b1e96530>, <ast.Name object at 0x7da1b1e96590>]]] in starred[call[name[enumerate], parameter[name[space].meshgrid]]] begin[:]
variable[sign] assign[=] binary_operation[binary_operation[name[sign] <ast.BitOr object at 0x7da2590d6aa0> compare[name[coord] less[<] call[name[min_pt]][name[i]]]] <ast.BitOr object at 0x7da2590d6aa0> compare[name[coord] greater[>] call[name[max_pt]][name[i]]]]
variable[values] assign[=] constant[0]
for taget[name[i]] in starred[name[axis]] begin[:]
variable[coord] assign[=] call[name[space].meshgrid][name[i]]
variable[extent] assign[=] binary_operation[call[name[dom_max_pt]][name[i]] - call[name[dom_min_pt]][name[i]]]
variable[values] assign[=] binary_operation[binary_operation[name[values] + binary_operation[binary_operation[constant[2] * binary_operation[name[coord] - call[name[dom_min_pt]][name[i]]]] / name[extent]]] - constant[1]]
variable[sign] assign[=] binary_operation[binary_operation[binary_operation[constant[3] * name[sign]] - constant[2]] / name[axis].size]
variable[values] assign[=] binary_operation[name[values] * name[sign]]
variable[values] assign[=] binary_operation[binary_operation[name[values] - call[name[np].min, parameter[name[values]]]] / binary_operation[call[name[np].max, parameter[name[values]]] - call[name[np].min, parameter[name[values]]]]]
return[call[name[space].element, parameter[name[values]]]] | keyword[def] identifier[smooth_cuboid] ( identifier[space] , identifier[min_pt] = keyword[None] , identifier[max_pt] = keyword[None] , identifier[axis] = literal[int] ):
literal[string]
identifier[dom_min_pt] = identifier[space] . identifier[domain] . identifier[min] ()
identifier[dom_max_pt] = identifier[space] . identifier[domain] . identifier[max] ()
keyword[if] identifier[min_pt] keyword[is] keyword[None] :
identifier[min_pt] = identifier[dom_min_pt] * literal[int] + identifier[dom_max_pt] * literal[int]
keyword[if] identifier[max_pt] keyword[is] keyword[None] :
identifier[max_pt] = identifier[dom_min_pt] * literal[int] + identifier[dom_max_pt] * literal[int]
identifier[min_pt] = identifier[np] . identifier[atleast_1d] ( identifier[min_pt] )
identifier[max_pt] = identifier[np] . identifier[atleast_1d] ( identifier[max_pt] )
identifier[axis] = identifier[np] . identifier[array] ( identifier[axis] , identifier[dtype] = identifier[int] , identifier[ndmin] = literal[int] )
keyword[if] identifier[min_pt] . identifier[shape] !=( identifier[space] . identifier[ndim] ,):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] (( identifier[space] . identifier[ndim] ,), identifier[min_pt] . identifier[shape] ))
keyword[if] identifier[max_pt] . identifier[shape] !=( identifier[space] . identifier[ndim] ,):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] (( identifier[space] . identifier[ndim] ,), identifier[max_pt] . identifier[shape] ))
identifier[sign] = literal[int]
keyword[for] identifier[i] , identifier[coord] keyword[in] identifier[enumerate] ( identifier[space] . identifier[meshgrid] ):
identifier[sign] = identifier[sign] |( identifier[coord] < identifier[min_pt] [ identifier[i] ])|( identifier[coord] > identifier[max_pt] [ identifier[i] ])
identifier[values] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[axis] :
identifier[coord] = identifier[space] . identifier[meshgrid] [ identifier[i] ]
identifier[extent] =( identifier[dom_max_pt] [ identifier[i] ]- identifier[dom_min_pt] [ identifier[i] ])
identifier[values] = identifier[values] + literal[int] *( identifier[coord] - identifier[dom_min_pt] [ identifier[i] ])/ identifier[extent] - literal[int]
identifier[sign] =( literal[int] * identifier[sign] - literal[int] )/ identifier[axis] . identifier[size]
identifier[values] = identifier[values] * identifier[sign]
identifier[values] =( identifier[values] - identifier[np] . identifier[min] ( identifier[values] ))/( identifier[np] . identifier[max] ( identifier[values] )- identifier[np] . identifier[min] ( identifier[values] ))
keyword[return] identifier[space] . identifier[element] ( identifier[values] ) | def smooth_cuboid(space, min_pt=None, max_pt=None, axis=0):
"""Cuboid with smooth variations.
Parameters
----------
space : `DiscreteLp`
Discretized space in which the phantom is supposed to be created.
min_pt : array-like of shape ``(space.ndim,)``, optional
Lower left corner of the cuboid. If ``None`` is given, a quarter
of the extent from ``space.min_pt`` towards the inside is chosen.
max_pt : array-like of shape ``(space.ndim,)``, optional
Upper right corner of the cuboid. If ``None`` is given, ``min_pt``
plus half the extent is chosen.
axis : int or sequence of int
Dimension(s) along which the smooth variation should happen.
Returns
-------
phantom : ``space``-element
The generated cuboid phantom in ``space``. Values have range [0, 1].
"""
dom_min_pt = space.domain.min()
dom_max_pt = space.domain.max()
if min_pt is None:
min_pt = dom_min_pt * 0.75 + dom_max_pt * 0.25 # depends on [control=['if'], data=['min_pt']]
if max_pt is None:
max_pt = dom_min_pt * 0.25 + dom_max_pt * 0.75 # depends on [control=['if'], data=['max_pt']]
min_pt = np.atleast_1d(min_pt)
max_pt = np.atleast_1d(max_pt)
axis = np.array(axis, dtype=int, ndmin=1)
if min_pt.shape != (space.ndim,):
raise ValueError('shape of `min_pt` must be {}, got {}'.format((space.ndim,), min_pt.shape)) # depends on [control=['if'], data=[]]
if max_pt.shape != (space.ndim,):
raise ValueError('shape of `max_pt` must be {}, got {}'.format((space.ndim,), max_pt.shape)) # depends on [control=['if'], data=[]]
sign = 0
for (i, coord) in enumerate(space.meshgrid):
sign = sign | (coord < min_pt[i]) | (coord > max_pt[i]) # depends on [control=['for'], data=[]]
values = 0
for i in axis:
coord = space.meshgrid[i]
extent = dom_max_pt[i] - dom_min_pt[i]
values = values + 2 * (coord - dom_min_pt[i]) / extent - 1 # depends on [control=['for'], data=['i']]
# Properly scale using sign
sign = (3 * sign - 2) / axis.size
# Fit in [0, 1]
values = values * sign
values = (values - np.min(values)) / (np.max(values) - np.min(values))
return space.element(values) |
def btemp_threshold(img, min_in, max_in, threshold, threshold_out=None, **kwargs):
"""Scale data linearly in two separate regions.
This enhancement scales the input data linearly by splitting the data
into two regions; min_in to threshold and threshold to max_in. These
regions are mapped to 1 to threshold_out and threshold_out to 0
respectively, resulting in the data being "flipped" around the
threshold. A default threshold_out is set to `176.0 / 255.0` to
match the behavior of the US National Weather Service's forecasting
tool called AWIPS.
Args:
img (XRImage): Image object to be scaled
min_in (float): Minimum input value to scale
max_in (float): Maximum input value to scale
threshold (float): Input value where to split data in to two regions
threshold_out (float): Output value to map the input `threshold`
to. Optional, defaults to 176.0 / 255.0.
"""
threshold_out = threshold_out if threshold_out is not None else (176 / 255.0)
low_factor = (threshold_out - 1.) / (min_in - threshold)
low_offset = 1. + (low_factor * min_in)
high_factor = threshold_out / (max_in - threshold)
high_offset = high_factor * max_in
def _bt_threshold(band_data):
# expects dask array to be passed
return da.where(band_data >= threshold,
high_offset - high_factor * band_data,
low_offset - low_factor * band_data)
return apply_enhancement(img.data, _bt_threshold, pass_dask=True) | def function[btemp_threshold, parameter[img, min_in, max_in, threshold, threshold_out]]:
constant[Scale data linearly in two separate regions.
This enhancement scales the input data linearly by splitting the data
into two regions; min_in to threshold and threshold to max_in. These
regions are mapped to 1 to threshold_out and threshold_out to 0
respectively, resulting in the data being "flipped" around the
threshold. A default threshold_out is set to `176.0 / 255.0` to
match the behavior of the US National Weather Service's forecasting
tool called AWIPS.
Args:
img (XRImage): Image object to be scaled
min_in (float): Minimum input value to scale
max_in (float): Maximum input value to scale
threshold (float): Input value where to split data in to two regions
threshold_out (float): Output value to map the input `threshold`
to. Optional, defaults to 176.0 / 255.0.
]
variable[threshold_out] assign[=] <ast.IfExp object at 0x7da18fe912d0>
variable[low_factor] assign[=] binary_operation[binary_operation[name[threshold_out] - constant[1.0]] / binary_operation[name[min_in] - name[threshold]]]
variable[low_offset] assign[=] binary_operation[constant[1.0] + binary_operation[name[low_factor] * name[min_in]]]
variable[high_factor] assign[=] binary_operation[name[threshold_out] / binary_operation[name[max_in] - name[threshold]]]
variable[high_offset] assign[=] binary_operation[name[high_factor] * name[max_in]]
def function[_bt_threshold, parameter[band_data]]:
return[call[name[da].where, parameter[compare[name[band_data] greater_or_equal[>=] name[threshold]], binary_operation[name[high_offset] - binary_operation[name[high_factor] * name[band_data]]], binary_operation[name[low_offset] - binary_operation[name[low_factor] * name[band_data]]]]]]
return[call[name[apply_enhancement], parameter[name[img].data, name[_bt_threshold]]]] | keyword[def] identifier[btemp_threshold] ( identifier[img] , identifier[min_in] , identifier[max_in] , identifier[threshold] , identifier[threshold_out] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[threshold_out] = identifier[threshold_out] keyword[if] identifier[threshold_out] keyword[is] keyword[not] keyword[None] keyword[else] ( literal[int] / literal[int] )
identifier[low_factor] =( identifier[threshold_out] - literal[int] )/( identifier[min_in] - identifier[threshold] )
identifier[low_offset] = literal[int] +( identifier[low_factor] * identifier[min_in] )
identifier[high_factor] = identifier[threshold_out] /( identifier[max_in] - identifier[threshold] )
identifier[high_offset] = identifier[high_factor] * identifier[max_in]
keyword[def] identifier[_bt_threshold] ( identifier[band_data] ):
keyword[return] identifier[da] . identifier[where] ( identifier[band_data] >= identifier[threshold] ,
identifier[high_offset] - identifier[high_factor] * identifier[band_data] ,
identifier[low_offset] - identifier[low_factor] * identifier[band_data] )
keyword[return] identifier[apply_enhancement] ( identifier[img] . identifier[data] , identifier[_bt_threshold] , identifier[pass_dask] = keyword[True] ) | def btemp_threshold(img, min_in, max_in, threshold, threshold_out=None, **kwargs):
"""Scale data linearly in two separate regions.
This enhancement scales the input data linearly by splitting the data
into two regions; min_in to threshold and threshold to max_in. These
regions are mapped to 1 to threshold_out and threshold_out to 0
respectively, resulting in the data being "flipped" around the
threshold. A default threshold_out is set to `176.0 / 255.0` to
match the behavior of the US National Weather Service's forecasting
tool called AWIPS.
Args:
img (XRImage): Image object to be scaled
min_in (float): Minimum input value to scale
max_in (float): Maximum input value to scale
threshold (float): Input value where to split data in to two regions
threshold_out (float): Output value to map the input `threshold`
to. Optional, defaults to 176.0 / 255.0.
"""
threshold_out = threshold_out if threshold_out is not None else 176 / 255.0
low_factor = (threshold_out - 1.0) / (min_in - threshold)
low_offset = 1.0 + low_factor * min_in
high_factor = threshold_out / (max_in - threshold)
high_offset = high_factor * max_in
def _bt_threshold(band_data):
# expects dask array to be passed
return da.where(band_data >= threshold, high_offset - high_factor * band_data, low_offset - low_factor * band_data)
return apply_enhancement(img.data, _bt_threshold, pass_dask=True) |
def logical_interconnect_groups(self):
"""
Gets the LogicalInterconnectGroups API client.
Returns:
LogicalInterconnectGroups:
"""
if not self.__logical_interconnect_groups:
self.__logical_interconnect_groups = LogicalInterconnectGroups(
self.__connection)
return self.__logical_interconnect_groups | def function[logical_interconnect_groups, parameter[self]]:
constant[
Gets the LogicalInterconnectGroups API client.
Returns:
LogicalInterconnectGroups:
]
if <ast.UnaryOp object at 0x7da20c76e020> begin[:]
name[self].__logical_interconnect_groups assign[=] call[name[LogicalInterconnectGroups], parameter[name[self].__connection]]
return[name[self].__logical_interconnect_groups] | keyword[def] identifier[logical_interconnect_groups] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[__logical_interconnect_groups] :
identifier[self] . identifier[__logical_interconnect_groups] = identifier[LogicalInterconnectGroups] (
identifier[self] . identifier[__connection] )
keyword[return] identifier[self] . identifier[__logical_interconnect_groups] | def logical_interconnect_groups(self):
"""
Gets the LogicalInterconnectGroups API client.
Returns:
LogicalInterconnectGroups:
"""
if not self.__logical_interconnect_groups:
self.__logical_interconnect_groups = LogicalInterconnectGroups(self.__connection) # depends on [control=['if'], data=[]]
return self.__logical_interconnect_groups |
def get_versioned_files(cls):
"""List all files versioned in Bazaar in the current directory."""
encoding = cls._get_terminal_encoding()
output = run(['bzr', 'ls', '-VR'], encoding=encoding)
return output.splitlines() | def function[get_versioned_files, parameter[cls]]:
constant[List all files versioned in Bazaar in the current directory.]
variable[encoding] assign[=] call[name[cls]._get_terminal_encoding, parameter[]]
variable[output] assign[=] call[name[run], parameter[list[[<ast.Constant object at 0x7da18f00ec50>, <ast.Constant object at 0x7da18f00fa60>, <ast.Constant object at 0x7da18f00f8b0>]]]]
return[call[name[output].splitlines, parameter[]]] | keyword[def] identifier[get_versioned_files] ( identifier[cls] ):
literal[string]
identifier[encoding] = identifier[cls] . identifier[_get_terminal_encoding] ()
identifier[output] = identifier[run] ([ literal[string] , literal[string] , literal[string] ], identifier[encoding] = identifier[encoding] )
keyword[return] identifier[output] . identifier[splitlines] () | def get_versioned_files(cls):
"""List all files versioned in Bazaar in the current directory."""
encoding = cls._get_terminal_encoding()
output = run(['bzr', 'ls', '-VR'], encoding=encoding)
return output.splitlines() |
def _list(api_list_class, arg_namespace, **extra):
""" A common function for building methods of the "list showing".
"""
if arg_namespace.starting_point:
ordering_field = (arg_namespace.ordering or '').lstrip('-')
if ordering_field in ('', 'datetime_uploaded', 'datetime_created'):
arg_namespace.starting_point = parser.parse(
arg_namespace.starting_point)
items = api_list_class(
starting_point=arg_namespace.starting_point,
ordering=arg_namespace.ordering,
limit=arg_namespace.limit,
request_limit=arg_namespace.request_limit,
**extra
)
items.constructor = lambda x: x
try:
pprint(list(items))
except ValueError as e:
print(e) | def function[_list, parameter[api_list_class, arg_namespace]]:
constant[ A common function for building methods of the "list showing".
]
if name[arg_namespace].starting_point begin[:]
variable[ordering_field] assign[=] call[<ast.BoolOp object at 0x7da1b0217790>.lstrip, parameter[constant[-]]]
if compare[name[ordering_field] in tuple[[<ast.Constant object at 0x7da1b0214be0>, <ast.Constant object at 0x7da1b02157e0>, <ast.Constant object at 0x7da1b0215f90>]]] begin[:]
name[arg_namespace].starting_point assign[=] call[name[parser].parse, parameter[name[arg_namespace].starting_point]]
variable[items] assign[=] call[name[api_list_class], parameter[]]
name[items].constructor assign[=] <ast.Lambda object at 0x7da1b0216500>
<ast.Try object at 0x7da1b0214370> | keyword[def] identifier[_list] ( identifier[api_list_class] , identifier[arg_namespace] ,** identifier[extra] ):
literal[string]
keyword[if] identifier[arg_namespace] . identifier[starting_point] :
identifier[ordering_field] =( identifier[arg_namespace] . identifier[ordering] keyword[or] literal[string] ). identifier[lstrip] ( literal[string] )
keyword[if] identifier[ordering_field] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[arg_namespace] . identifier[starting_point] = identifier[parser] . identifier[parse] (
identifier[arg_namespace] . identifier[starting_point] )
identifier[items] = identifier[api_list_class] (
identifier[starting_point] = identifier[arg_namespace] . identifier[starting_point] ,
identifier[ordering] = identifier[arg_namespace] . identifier[ordering] ,
identifier[limit] = identifier[arg_namespace] . identifier[limit] ,
identifier[request_limit] = identifier[arg_namespace] . identifier[request_limit] ,
** identifier[extra]
)
identifier[items] . identifier[constructor] = keyword[lambda] identifier[x] : identifier[x]
keyword[try] :
identifier[pprint] ( identifier[list] ( identifier[items] ))
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[print] ( identifier[e] ) | def _list(api_list_class, arg_namespace, **extra):
""" A common function for building methods of the "list showing".
"""
if arg_namespace.starting_point:
ordering_field = (arg_namespace.ordering or '').lstrip('-')
if ordering_field in ('', 'datetime_uploaded', 'datetime_created'):
arg_namespace.starting_point = parser.parse(arg_namespace.starting_point) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
items = api_list_class(starting_point=arg_namespace.starting_point, ordering=arg_namespace.ordering, limit=arg_namespace.limit, request_limit=arg_namespace.request_limit, **extra)
items.constructor = lambda x: x
try:
pprint(list(items)) # depends on [control=['try'], data=[]]
except ValueError as e:
print(e) # depends on [control=['except'], data=['e']] |
def _periodically_flush_profile_events(self):
"""Drivers run this as a thread to flush profile data in the
background."""
# Note(rkn): This is run on a background thread in the driver. It uses
# the raylet client. This should be ok because it doesn't read
# from the raylet client and we have the GIL here. However,
# if either of those things changes, then we could run into issues.
while True:
# Sleep for 1 second. This will be interrupted if
# self.threads_stopped is set.
self.threads_stopped.wait(timeout=1)
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
self.flush_profile_data() | def function[_periodically_flush_profile_events, parameter[self]]:
constant[Drivers run this as a thread to flush profile data in the
background.]
while constant[True] begin[:]
call[name[self].threads_stopped.wait, parameter[]]
if call[name[self].threads_stopped.is_set, parameter[]] begin[:]
return[None]
call[name[self].flush_profile_data, parameter[]] | keyword[def] identifier[_periodically_flush_profile_events] ( identifier[self] ):
literal[string]
keyword[while] keyword[True] :
identifier[self] . identifier[threads_stopped] . identifier[wait] ( identifier[timeout] = literal[int] )
keyword[if] identifier[self] . identifier[threads_stopped] . identifier[is_set] ():
keyword[return]
identifier[self] . identifier[flush_profile_data] () | def _periodically_flush_profile_events(self):
"""Drivers run this as a thread to flush profile data in the
background."""
# Note(rkn): This is run on a background thread in the driver. It uses
# the raylet client. This should be ok because it doesn't read
# from the raylet client and we have the GIL here. However,
# if either of those things changes, then we could run into issues.
while True:
# Sleep for 1 second. This will be interrupted if
# self.threads_stopped is set.
self.threads_stopped.wait(timeout=1)
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return # depends on [control=['if'], data=[]]
self.flush_profile_data() # depends on [control=['while'], data=[]] |
def singular_subresource(raml_resource, route_name):
""" Determine if :raml_resource: is a singular subresource.
:param raml_resource: Instance of ramlfications.raml.ResourceNode.
:param route_name: Name of the :raml_resource:.
"""
static_parent = get_static_parent(raml_resource, method='POST')
if static_parent is None:
return False
schema = resource_schema(static_parent) or {}
properties = schema.get('properties', {})
if route_name not in properties:
return False
db_settings = properties[route_name].get('_db_settings', {})
is_obj = db_settings.get('type') == 'relationship'
single_obj = not db_settings.get('uselist', True)
return is_obj and single_obj | def function[singular_subresource, parameter[raml_resource, route_name]]:
constant[ Determine if :raml_resource: is a singular subresource.
:param raml_resource: Instance of ramlfications.raml.ResourceNode.
:param route_name: Name of the :raml_resource:.
]
variable[static_parent] assign[=] call[name[get_static_parent], parameter[name[raml_resource]]]
if compare[name[static_parent] is constant[None]] begin[:]
return[constant[False]]
variable[schema] assign[=] <ast.BoolOp object at 0x7da18f722020>
variable[properties] assign[=] call[name[schema].get, parameter[constant[properties], dictionary[[], []]]]
if compare[name[route_name] <ast.NotIn object at 0x7da2590d7190> name[properties]] begin[:]
return[constant[False]]
variable[db_settings] assign[=] call[call[name[properties]][name[route_name]].get, parameter[constant[_db_settings], dictionary[[], []]]]
variable[is_obj] assign[=] compare[call[name[db_settings].get, parameter[constant[type]]] equal[==] constant[relationship]]
variable[single_obj] assign[=] <ast.UnaryOp object at 0x7da18f720d00>
return[<ast.BoolOp object at 0x7da18f7237f0>] | keyword[def] identifier[singular_subresource] ( identifier[raml_resource] , identifier[route_name] ):
literal[string]
identifier[static_parent] = identifier[get_static_parent] ( identifier[raml_resource] , identifier[method] = literal[string] )
keyword[if] identifier[static_parent] keyword[is] keyword[None] :
keyword[return] keyword[False]
identifier[schema] = identifier[resource_schema] ( identifier[static_parent] ) keyword[or] {}
identifier[properties] = identifier[schema] . identifier[get] ( literal[string] ,{})
keyword[if] identifier[route_name] keyword[not] keyword[in] identifier[properties] :
keyword[return] keyword[False]
identifier[db_settings] = identifier[properties] [ identifier[route_name] ]. identifier[get] ( literal[string] ,{})
identifier[is_obj] = identifier[db_settings] . identifier[get] ( literal[string] )== literal[string]
identifier[single_obj] = keyword[not] identifier[db_settings] . identifier[get] ( literal[string] , keyword[True] )
keyword[return] identifier[is_obj] keyword[and] identifier[single_obj] | def singular_subresource(raml_resource, route_name):
""" Determine if :raml_resource: is a singular subresource.
:param raml_resource: Instance of ramlfications.raml.ResourceNode.
:param route_name: Name of the :raml_resource:.
"""
static_parent = get_static_parent(raml_resource, method='POST')
if static_parent is None:
return False # depends on [control=['if'], data=[]]
schema = resource_schema(static_parent) or {}
properties = schema.get('properties', {})
if route_name not in properties:
return False # depends on [control=['if'], data=[]]
db_settings = properties[route_name].get('_db_settings', {})
is_obj = db_settings.get('type') == 'relationship'
single_obj = not db_settings.get('uselist', True)
return is_obj and single_obj |
def render_registration(self):
'''
Render pinned points on video frame as red rectangle.
'''
surface = self.get_surface()
if self.canvas is None or self.df_canvas_corners.shape[0] == 0:
return surface
corners = self.df_canvas_corners.copy()
corners['w'] = 1
transform = self.canvas.shapes_to_canvas_transform
canvas_corners = corners.values.dot(transform.T.values).T
points_x = canvas_corners[0]
points_y = canvas_corners[1]
cairo_context = cairo.Context(surface)
cairo_context.move_to(points_x[0], points_y[0])
for x, y in zip(points_x[1:], points_y[1:]):
cairo_context.line_to(x, y)
cairo_context.line_to(points_x[0], points_y[0])
cairo_context.set_source_rgb(1, 0, 0)
cairo_context.stroke()
return surface | def function[render_registration, parameter[self]]:
constant[
Render pinned points on video frame as red rectangle.
]
variable[surface] assign[=] call[name[self].get_surface, parameter[]]
if <ast.BoolOp object at 0x7da1b277fd90> begin[:]
return[name[surface]]
variable[corners] assign[=] call[name[self].df_canvas_corners.copy, parameter[]]
call[name[corners]][constant[w]] assign[=] constant[1]
variable[transform] assign[=] name[self].canvas.shapes_to_canvas_transform
variable[canvas_corners] assign[=] call[name[corners].values.dot, parameter[name[transform].T.values]].T
variable[points_x] assign[=] call[name[canvas_corners]][constant[0]]
variable[points_y] assign[=] call[name[canvas_corners]][constant[1]]
variable[cairo_context] assign[=] call[name[cairo].Context, parameter[name[surface]]]
call[name[cairo_context].move_to, parameter[call[name[points_x]][constant[0]], call[name[points_y]][constant[0]]]]
for taget[tuple[[<ast.Name object at 0x7da1b27a4730>, <ast.Name object at 0x7da1b27a4880>]]] in starred[call[name[zip], parameter[call[name[points_x]][<ast.Slice object at 0x7da1b27a6890>], call[name[points_y]][<ast.Slice object at 0x7da1b27a4610>]]]] begin[:]
call[name[cairo_context].line_to, parameter[name[x], name[y]]]
call[name[cairo_context].line_to, parameter[call[name[points_x]][constant[0]], call[name[points_y]][constant[0]]]]
call[name[cairo_context].set_source_rgb, parameter[constant[1], constant[0], constant[0]]]
call[name[cairo_context].stroke, parameter[]]
return[name[surface]] | keyword[def] identifier[render_registration] ( identifier[self] ):
literal[string]
identifier[surface] = identifier[self] . identifier[get_surface] ()
keyword[if] identifier[self] . identifier[canvas] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[df_canvas_corners] . identifier[shape] [ literal[int] ]== literal[int] :
keyword[return] identifier[surface]
identifier[corners] = identifier[self] . identifier[df_canvas_corners] . identifier[copy] ()
identifier[corners] [ literal[string] ]= literal[int]
identifier[transform] = identifier[self] . identifier[canvas] . identifier[shapes_to_canvas_transform]
identifier[canvas_corners] = identifier[corners] . identifier[values] . identifier[dot] ( identifier[transform] . identifier[T] . identifier[values] ). identifier[T]
identifier[points_x] = identifier[canvas_corners] [ literal[int] ]
identifier[points_y] = identifier[canvas_corners] [ literal[int] ]
identifier[cairo_context] = identifier[cairo] . identifier[Context] ( identifier[surface] )
identifier[cairo_context] . identifier[move_to] ( identifier[points_x] [ literal[int] ], identifier[points_y] [ literal[int] ])
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[zip] ( identifier[points_x] [ literal[int] :], identifier[points_y] [ literal[int] :]):
identifier[cairo_context] . identifier[line_to] ( identifier[x] , identifier[y] )
identifier[cairo_context] . identifier[line_to] ( identifier[points_x] [ literal[int] ], identifier[points_y] [ literal[int] ])
identifier[cairo_context] . identifier[set_source_rgb] ( literal[int] , literal[int] , literal[int] )
identifier[cairo_context] . identifier[stroke] ()
keyword[return] identifier[surface] | def render_registration(self):
"""
Render pinned points on video frame as red rectangle.
"""
surface = self.get_surface()
if self.canvas is None or self.df_canvas_corners.shape[0] == 0:
return surface # depends on [control=['if'], data=[]]
corners = self.df_canvas_corners.copy()
corners['w'] = 1
transform = self.canvas.shapes_to_canvas_transform
canvas_corners = corners.values.dot(transform.T.values).T
points_x = canvas_corners[0]
points_y = canvas_corners[1]
cairo_context = cairo.Context(surface)
cairo_context.move_to(points_x[0], points_y[0])
for (x, y) in zip(points_x[1:], points_y[1:]):
cairo_context.line_to(x, y) # depends on [control=['for'], data=[]]
cairo_context.line_to(points_x[0], points_y[0])
cairo_context.set_source_rgb(1, 0, 0)
cairo_context.stroke()
return surface |
def download_url_regex(inputs, outdir, regex=".*"):
"""
Downloads http(s) urls to a local files
:param str inputs: Required, the seed url
:param str outdir: Required. the local directory to put the downloadedfiles.
:param str regex: Optional, a regex string. If not given, then all urls will be valid
:return: A list of local full path names (downloaded from inputs)
"""
if not inputs or type(inputs) != str \
or not outdir or type(outdir) != str:
logging.error("The call parameters are invalid.")
return
else:
if not os.path.exists(outdir):
os.makedirs(outdir)
output_files = []
files = get_urls_from_seed(inputs)
for f in files:
if re.compile(regex).match(f):
output_file = handle_single_url(f, outdir)
output_files.append(output_file)
return output_files | def function[download_url_regex, parameter[inputs, outdir, regex]]:
constant[
Downloads http(s) urls to a local files
:param str inputs: Required, the seed url
:param str outdir: Required. the local directory to put the downloadedfiles.
:param str regex: Optional, a regex string. If not given, then all urls will be valid
:return: A list of local full path names (downloaded from inputs)
]
if <ast.BoolOp object at 0x7da1b003f9a0> begin[:]
call[name[logging].error, parameter[constant[The call parameters are invalid.]]]
return[None]
variable[output_files] assign[=] list[[]]
variable[files] assign[=] call[name[get_urls_from_seed], parameter[name[inputs]]]
for taget[name[f]] in starred[name[files]] begin[:]
if call[call[name[re].compile, parameter[name[regex]]].match, parameter[name[f]]] begin[:]
variable[output_file] assign[=] call[name[handle_single_url], parameter[name[f], name[outdir]]]
call[name[output_files].append, parameter[name[output_file]]]
return[name[output_files]] | keyword[def] identifier[download_url_regex] ( identifier[inputs] , identifier[outdir] , identifier[regex] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[inputs] keyword[or] identifier[type] ( identifier[inputs] )!= identifier[str] keyword[or] keyword[not] identifier[outdir] keyword[or] identifier[type] ( identifier[outdir] )!= identifier[str] :
identifier[logging] . identifier[error] ( literal[string] )
keyword[return]
keyword[else] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[outdir] ):
identifier[os] . identifier[makedirs] ( identifier[outdir] )
identifier[output_files] =[]
identifier[files] = identifier[get_urls_from_seed] ( identifier[inputs] )
keyword[for] identifier[f] keyword[in] identifier[files] :
keyword[if] identifier[re] . identifier[compile] ( identifier[regex] ). identifier[match] ( identifier[f] ):
identifier[output_file] = identifier[handle_single_url] ( identifier[f] , identifier[outdir] )
identifier[output_files] . identifier[append] ( identifier[output_file] )
keyword[return] identifier[output_files] | def download_url_regex(inputs, outdir, regex='.*'):
"""
Downloads http(s) urls to a local files
:param str inputs: Required, the seed url
:param str outdir: Required. the local directory to put the downloadedfiles.
:param str regex: Optional, a regex string. If not given, then all urls will be valid
:return: A list of local full path names (downloaded from inputs)
"""
if not inputs or type(inputs) != str or (not outdir) or (type(outdir) != str):
logging.error('The call parameters are invalid.')
return # depends on [control=['if'], data=[]]
elif not os.path.exists(outdir):
os.makedirs(outdir) # depends on [control=['if'], data=[]]
output_files = []
files = get_urls_from_seed(inputs)
for f in files:
if re.compile(regex).match(f):
output_file = handle_single_url(f, outdir)
output_files.append(output_file) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
return output_files |
def save_content(self, file_path, encoding='utf-8'):
""" Save the content of the .txt file in a text file.
Parameters
----------
file_path: str
Path to the output file.
"""
if self.file_content_ is None:
msg = 'Template content has not been updated. \
Please fill the template before rendering it.'
log.exception(msg)
raise ValueError(msg)
try:
write_to_file(file_path, content=self.file_content_,
encoding=encoding)
except Exception as exc:
msg = 'Document of type {} got an error when \
writing content.'.format(self.__class__)
log.exception(msg)
raise Exception(msg) from exc | def function[save_content, parameter[self, file_path, encoding]]:
constant[ Save the content of the .txt file in a text file.
Parameters
----------
file_path: str
Path to the output file.
]
if compare[name[self].file_content_ is constant[None]] begin[:]
variable[msg] assign[=] constant[Template content has not been updated. Please fill the template before rendering it.]
call[name[log].exception, parameter[name[msg]]]
<ast.Raise object at 0x7da1b021ccd0>
<ast.Try object at 0x7da1b021cac0> | keyword[def] identifier[save_content] ( identifier[self] , identifier[file_path] , identifier[encoding] = literal[string] ):
literal[string]
keyword[if] identifier[self] . identifier[file_content_] keyword[is] keyword[None] :
identifier[msg] = literal[string]
identifier[log] . identifier[exception] ( identifier[msg] )
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[try] :
identifier[write_to_file] ( identifier[file_path] , identifier[content] = identifier[self] . identifier[file_content_] ,
identifier[encoding] = identifier[encoding] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[msg] = literal[string] . identifier[format] ( identifier[self] . identifier[__class__] )
identifier[log] . identifier[exception] ( identifier[msg] )
keyword[raise] identifier[Exception] ( identifier[msg] ) keyword[from] identifier[exc] | def save_content(self, file_path, encoding='utf-8'):
""" Save the content of the .txt file in a text file.
Parameters
----------
file_path: str
Path to the output file.
"""
if self.file_content_ is None:
msg = 'Template content has not been updated. Please fill the template before rendering it.'
log.exception(msg)
raise ValueError(msg) # depends on [control=['if'], data=[]]
try:
write_to_file(file_path, content=self.file_content_, encoding=encoding) # depends on [control=['try'], data=[]]
except Exception as exc:
msg = 'Document of type {} got an error when writing content.'.format(self.__class__)
log.exception(msg)
raise Exception(msg) from exc # depends on [control=['except'], data=['exc']] |
def lca(root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root is None or root is p or root is q:
return root
left = lca(root.left, p, q)
right = lca(root.right, p, q)
if left is not None and right is not None:
return root
return left if left else right | def function[lca, parameter[root, p, q]]:
constant[
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
]
if <ast.BoolOp object at 0x7da1b20fb370> begin[:]
return[name[root]]
variable[left] assign[=] call[name[lca], parameter[name[root].left, name[p], name[q]]]
variable[right] assign[=] call[name[lca], parameter[name[root].right, name[p], name[q]]]
if <ast.BoolOp object at 0x7da1b20faec0> begin[:]
return[name[root]]
return[<ast.IfExp object at 0x7da1b20f9900>] | keyword[def] identifier[lca] ( identifier[root] , identifier[p] , identifier[q] ):
literal[string]
keyword[if] identifier[root] keyword[is] keyword[None] keyword[or] identifier[root] keyword[is] identifier[p] keyword[or] identifier[root] keyword[is] identifier[q] :
keyword[return] identifier[root]
identifier[left] = identifier[lca] ( identifier[root] . identifier[left] , identifier[p] , identifier[q] )
identifier[right] = identifier[lca] ( identifier[root] . identifier[right] , identifier[p] , identifier[q] )
keyword[if] identifier[left] keyword[is] keyword[not] keyword[None] keyword[and] identifier[right] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[root]
keyword[return] identifier[left] keyword[if] identifier[left] keyword[else] identifier[right] | def lca(root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root is None or root is p or root is q:
return root # depends on [control=['if'], data=[]]
left = lca(root.left, p, q)
right = lca(root.right, p, q)
if left is not None and right is not None:
return root # depends on [control=['if'], data=[]]
return left if left else right |
def get_chempot_range_stability_phase(self, target_comp, open_elt):
"""
returns a set of chemical potentials corresponding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element:(mu_min,mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != open_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != open_elt])
for e in self.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != open_elt]
max_open = -float('inf')
min_open = float('inf')
max_mus = None
min_mus = None
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[open_elt] / target_comp[open_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
all_coords.append(v)
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] > max_open:
max_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
max_mus = v
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] < min_open:
min_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
min_mus = v
elts = [e for e in self.elements if e != open_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = (min_mus[i] + muref[i], max_mus[i] + muref[i])
res[open_elt] = (min_open, max_open)
return res | def function[get_chempot_range_stability_phase, parameter[self, target_comp, open_elt]]:
constant[
returns a set of chemical potentials corresponding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element:(mu_min,mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
]
variable[muref] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da18dc9a740>]]
variable[chempot_ranges] assign[=] call[name[self].get_chempot_range_map, parameter[<ast.ListComp object at 0x7da18dc9aec0>]]
for taget[name[e]] in starred[name[self].elements] begin[:]
if <ast.UnaryOp object at 0x7da18dc9b9d0> begin[:]
variable[target_comp] assign[=] binary_operation[name[target_comp] + call[name[Composition], parameter[dictionary[[<ast.Name object at 0x7da18dc9a530>], [<ast.Constant object at 0x7da18dc9b610>]]]]]
variable[coeff] assign[=] <ast.ListComp object at 0x7da18dc985b0>
variable[max_open] assign[=] <ast.UnaryOp object at 0x7da18dc99240>
variable[min_open] assign[=] call[name[float], parameter[constant[inf]]]
variable[max_mus] assign[=] constant[None]
variable[min_mus] assign[=] constant[None]
for taget[name[e]] in starred[call[name[chempot_ranges].keys, parameter[]]] begin[:]
if compare[name[e].composition.reduced_composition equal[==] name[target_comp].reduced_composition] begin[:]
variable[multiplicator] assign[=] binary_operation[call[name[e].composition][name[open_elt]] / call[name[target_comp]][name[open_elt]]]
variable[ef] assign[=] binary_operation[name[e].energy / name[multiplicator]]
variable[all_coords] assign[=] list[[]]
for taget[name[s]] in starred[call[name[chempot_ranges]][name[e]]] begin[:]
for taget[name[v]] in starred[name[s]._coords] begin[:]
call[name[all_coords].append, parameter[name[v]]]
if compare[binary_operation[binary_operation[call[name[np].dot, parameter[binary_operation[name[v] + name[muref]], name[coeff]]] + name[ef]] / call[name[target_comp]][name[open_elt]]] greater[>] name[max_open]] begin[:]
variable[max_open] assign[=] binary_operation[binary_operation[call[name[np].dot, parameter[binary_operation[name[v] + name[muref]], name[coeff]]] + name[ef]] / call[name[target_comp]][name[open_elt]]]
variable[max_mus] assign[=] name[v]
if compare[binary_operation[binary_operation[call[name[np].dot, parameter[binary_operation[name[v] + name[muref]], name[coeff]]] + name[ef]] / call[name[target_comp]][name[open_elt]]] less[<] name[min_open]] begin[:]
variable[min_open] assign[=] binary_operation[binary_operation[call[name[np].dot, parameter[binary_operation[name[v] + name[muref]], name[coeff]]] + name[ef]] / call[name[target_comp]][name[open_elt]]]
variable[min_mus] assign[=] name[v]
variable[elts] assign[=] <ast.ListComp object at 0x7da18dc9ab30>
variable[res] assign[=] dictionary[[], []]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[elts]]]]]] begin[:]
call[name[res]][call[name[elts]][name[i]]] assign[=] tuple[[<ast.BinOp object at 0x7da18dc98430>, <ast.BinOp object at 0x7da18dc994b0>]]
call[name[res]][name[open_elt]] assign[=] tuple[[<ast.Name object at 0x7da18dc9baf0>, <ast.Name object at 0x7da18dc9a260>]]
return[name[res]] | keyword[def] identifier[get_chempot_range_stability_phase] ( identifier[self] , identifier[target_comp] , identifier[open_elt] ):
literal[string]
identifier[muref] = identifier[np] . identifier[array] ([ identifier[self] . identifier[el_refs] [ identifier[e] ]. identifier[energy_per_atom]
keyword[for] identifier[e] keyword[in] identifier[self] . identifier[elements] keyword[if] identifier[e] != identifier[open_elt] ])
identifier[chempot_ranges] = identifier[self] . identifier[get_chempot_range_map] (
[ identifier[e] keyword[for] identifier[e] keyword[in] identifier[self] . identifier[elements] keyword[if] identifier[e] != identifier[open_elt] ])
keyword[for] identifier[e] keyword[in] identifier[self] . identifier[elements] :
keyword[if] keyword[not] identifier[e] keyword[in] identifier[target_comp] . identifier[elements] :
identifier[target_comp] = identifier[target_comp] + identifier[Composition] ({ identifier[e] : literal[int] })
identifier[coeff] =[- identifier[target_comp] [ identifier[e] ] keyword[for] identifier[e] keyword[in] identifier[self] . identifier[elements] keyword[if] identifier[e] != identifier[open_elt] ]
identifier[max_open] =- identifier[float] ( literal[string] )
identifier[min_open] = identifier[float] ( literal[string] )
identifier[max_mus] = keyword[None]
identifier[min_mus] = keyword[None]
keyword[for] identifier[e] keyword[in] identifier[chempot_ranges] . identifier[keys] ():
keyword[if] identifier[e] . identifier[composition] . identifier[reduced_composition] == identifier[target_comp] . identifier[reduced_composition] :
identifier[multiplicator] = identifier[e] . identifier[composition] [ identifier[open_elt] ]/ identifier[target_comp] [ identifier[open_elt] ]
identifier[ef] = identifier[e] . identifier[energy] / identifier[multiplicator]
identifier[all_coords] =[]
keyword[for] identifier[s] keyword[in] identifier[chempot_ranges] [ identifier[e] ]:
keyword[for] identifier[v] keyword[in] identifier[s] . identifier[_coords] :
identifier[all_coords] . identifier[append] ( identifier[v] )
keyword[if] ( identifier[np] . identifier[dot] ( identifier[v] + identifier[muref] , identifier[coeff] )+ identifier[ef] )/ identifier[target_comp] [
identifier[open_elt] ]> identifier[max_open] :
identifier[max_open] =( identifier[np] . identifier[dot] ( identifier[v] + identifier[muref] , identifier[coeff] )+ identifier[ef] )/ identifier[target_comp] [ identifier[open_elt] ]
identifier[max_mus] = identifier[v]
keyword[if] ( identifier[np] . identifier[dot] ( identifier[v] + identifier[muref] , identifier[coeff] )+ identifier[ef] )/ identifier[target_comp] [
identifier[open_elt] ]< identifier[min_open] :
identifier[min_open] =( identifier[np] . identifier[dot] ( identifier[v] + identifier[muref] , identifier[coeff] )+ identifier[ef] )/ identifier[target_comp] [ identifier[open_elt] ]
identifier[min_mus] = identifier[v]
identifier[elts] =[ identifier[e] keyword[for] identifier[e] keyword[in] identifier[self] . identifier[elements] keyword[if] identifier[e] != identifier[open_elt] ]
identifier[res] ={}
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[elts] )):
identifier[res] [ identifier[elts] [ identifier[i] ]]=( identifier[min_mus] [ identifier[i] ]+ identifier[muref] [ identifier[i] ], identifier[max_mus] [ identifier[i] ]+ identifier[muref] [ identifier[i] ])
identifier[res] [ identifier[open_elt] ]=( identifier[min_open] , identifier[max_open] )
keyword[return] identifier[res] | def get_chempot_range_stability_phase(self, target_comp, open_elt):
"""
returns a set of chemical potentials corresponding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element:(mu_min,mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
"""
muref = np.array([self.el_refs[e].energy_per_atom for e in self.elements if e != open_elt])
chempot_ranges = self.get_chempot_range_map([e for e in self.elements if e != open_elt])
for e in self.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']]
coeff = [-target_comp[e] for e in self.elements if e != open_elt]
max_open = -float('inf')
min_open = float('inf')
max_mus = None
min_mus = None
for e in chempot_ranges.keys():
if e.composition.reduced_composition == target_comp.reduced_composition:
multiplicator = e.composition[open_elt] / target_comp[open_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
all_coords.append(v)
if (np.dot(v + muref, coeff) + ef) / target_comp[open_elt] > max_open:
max_open = (np.dot(v + muref, coeff) + ef) / target_comp[open_elt]
max_mus = v # depends on [control=['if'], data=['max_open']]
if (np.dot(v + muref, coeff) + ef) / target_comp[open_elt] < min_open:
min_open = (np.dot(v + muref, coeff) + ef) / target_comp[open_elt]
min_mus = v # depends on [control=['if'], data=['min_open']] # depends on [control=['for'], data=['v']] # depends on [control=['for'], data=['s']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']]
elts = [e for e in self.elements if e != open_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = (min_mus[i] + muref[i], max_mus[i] + muref[i]) # depends on [control=['for'], data=['i']]
res[open_elt] = (min_open, max_open)
return res |
def match_color_index(self, color):
"""Takes an "R,G,B" string or wx.Color and returns a matching xlwt
color.
"""
from jcvi.utils.webcolors import color_diff
if isinstance(color, int):
return color
if color:
if isinstance(color, six.string_types):
rgb = map(int, color.split(','))
else:
rgb = color.Get()
logging.disable(logging.DEBUG)
distances = [color_diff(rgb, x) for x in self.xlwt_colors]
logging.disable(logging.NOTSET)
result = distances.index(min(distances))
self.unused_colors.discard(self.xlwt_colors[result])
return result | def function[match_color_index, parameter[self, color]]:
constant[Takes an "R,G,B" string or wx.Color and returns a matching xlwt
color.
]
from relative_module[jcvi.utils.webcolors] import module[color_diff]
if call[name[isinstance], parameter[name[color], name[int]]] begin[:]
return[name[color]]
if name[color] begin[:]
if call[name[isinstance], parameter[name[color], name[six].string_types]] begin[:]
variable[rgb] assign[=] call[name[map], parameter[name[int], call[name[color].split, parameter[constant[,]]]]]
call[name[logging].disable, parameter[name[logging].DEBUG]]
variable[distances] assign[=] <ast.ListComp object at 0x7da1b09bfe80>
call[name[logging].disable, parameter[name[logging].NOTSET]]
variable[result] assign[=] call[name[distances].index, parameter[call[name[min], parameter[name[distances]]]]]
call[name[self].unused_colors.discard, parameter[call[name[self].xlwt_colors][name[result]]]]
return[name[result]] | keyword[def] identifier[match_color_index] ( identifier[self] , identifier[color] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[utils] . identifier[webcolors] keyword[import] identifier[color_diff]
keyword[if] identifier[isinstance] ( identifier[color] , identifier[int] ):
keyword[return] identifier[color]
keyword[if] identifier[color] :
keyword[if] identifier[isinstance] ( identifier[color] , identifier[six] . identifier[string_types] ):
identifier[rgb] = identifier[map] ( identifier[int] , identifier[color] . identifier[split] ( literal[string] ))
keyword[else] :
identifier[rgb] = identifier[color] . identifier[Get] ()
identifier[logging] . identifier[disable] ( identifier[logging] . identifier[DEBUG] )
identifier[distances] =[ identifier[color_diff] ( identifier[rgb] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[xlwt_colors] ]
identifier[logging] . identifier[disable] ( identifier[logging] . identifier[NOTSET] )
identifier[result] = identifier[distances] . identifier[index] ( identifier[min] ( identifier[distances] ))
identifier[self] . identifier[unused_colors] . identifier[discard] ( identifier[self] . identifier[xlwt_colors] [ identifier[result] ])
keyword[return] identifier[result] | def match_color_index(self, color):
"""Takes an "R,G,B" string or wx.Color and returns a matching xlwt
color.
"""
from jcvi.utils.webcolors import color_diff
if isinstance(color, int):
return color # depends on [control=['if'], data=[]]
if color:
if isinstance(color, six.string_types):
rgb = map(int, color.split(',')) # depends on [control=['if'], data=[]]
else:
rgb = color.Get()
logging.disable(logging.DEBUG)
distances = [color_diff(rgb, x) for x in self.xlwt_colors]
logging.disable(logging.NOTSET)
result = distances.index(min(distances))
self.unused_colors.discard(self.xlwt_colors[result])
return result # depends on [control=['if'], data=[]] |
def linreg_mle(y, X, algorithm='Nelder-Mead', debug=False):
"""MLE for Linear Regression Model
Parameters:
-----------
y : ndarray
target variable with N observations
X : ndarray
The <N x C> design matrix with C independent
variables, features, factors, etc.
algorithm : str
Optional. Default 'Nelder-Mead' (Simplex).
The algorithm used in scipy.optimize.minimize
debug : bool
Optional.
Returns:
--------
beta : ndarray
Estimated regression coefficients.
results : scipy.optimize.optimize.OptimizeResult
Optional. If debug=True then only scipy's
optimization result variable is returned.
"""
import numpy as np
import scipy.stats as sstat
import scipy.optimize as sopt
def objective_nll_linreg(theta, y, X):
yhat = np.dot(X, theta[:-1]) # =X*beta
return -1.0 * sstat.norm.logpdf(y, loc=yhat, scale=theta[-1]).sum()
# check eligible algorithm
if algorithm not in ('Nelder-Mead', 'CG', 'BFGS'):
raise Exception('Optimization Algorithm not supported.')
# set start values
theta0 = np.ones((X.shape[1] + 1, ))
# run solver
results = sopt.minimize(
objective_nll_linreg,
theta0,
args=(y, X),
method=algorithm,
options={'disp': False})
# debug?
if debug:
return results
# done
return results.x[:-1] | def function[linreg_mle, parameter[y, X, algorithm, debug]]:
constant[MLE for Linear Regression Model
Parameters:
-----------
y : ndarray
target variable with N observations
X : ndarray
The <N x C> design matrix with C independent
variables, features, factors, etc.
algorithm : str
Optional. Default 'Nelder-Mead' (Simplex).
The algorithm used in scipy.optimize.minimize
debug : bool
Optional.
Returns:
--------
beta : ndarray
Estimated regression coefficients.
results : scipy.optimize.optimize.OptimizeResult
Optional. If debug=True then only scipy's
optimization result variable is returned.
]
import module[numpy] as alias[np]
import module[scipy.stats] as alias[sstat]
import module[scipy.optimize] as alias[sopt]
def function[objective_nll_linreg, parameter[theta, y, X]]:
variable[yhat] assign[=] call[name[np].dot, parameter[name[X], call[name[theta]][<ast.Slice object at 0x7da1b14e6e60>]]]
return[binary_operation[<ast.UnaryOp object at 0x7da1b14e7f70> * call[call[name[sstat].norm.logpdf, parameter[name[y]]].sum, parameter[]]]]
if compare[name[algorithm] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b14e5780>, <ast.Constant object at 0x7da1b14e5960>, <ast.Constant object at 0x7da1b14e4d90>]]] begin[:]
<ast.Raise object at 0x7da1b14e62f0>
variable[theta0] assign[=] call[name[np].ones, parameter[tuple[[<ast.BinOp object at 0x7da18f810280>]]]]
variable[results] assign[=] call[name[sopt].minimize, parameter[name[objective_nll_linreg], name[theta0]]]
if name[debug] begin[:]
return[name[results]]
return[call[name[results].x][<ast.Slice object at 0x7da18f811090>]] | keyword[def] identifier[linreg_mle] ( identifier[y] , identifier[X] , identifier[algorithm] = literal[string] , identifier[debug] = keyword[False] ):
literal[string]
keyword[import] identifier[numpy] keyword[as] identifier[np]
keyword[import] identifier[scipy] . identifier[stats] keyword[as] identifier[sstat]
keyword[import] identifier[scipy] . identifier[optimize] keyword[as] identifier[sopt]
keyword[def] identifier[objective_nll_linreg] ( identifier[theta] , identifier[y] , identifier[X] ):
identifier[yhat] = identifier[np] . identifier[dot] ( identifier[X] , identifier[theta] [:- literal[int] ])
keyword[return] - literal[int] * identifier[sstat] . identifier[norm] . identifier[logpdf] ( identifier[y] , identifier[loc] = identifier[yhat] , identifier[scale] = identifier[theta] [- literal[int] ]). identifier[sum] ()
keyword[if] identifier[algorithm] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[Exception] ( literal[string] )
identifier[theta0] = identifier[np] . identifier[ones] (( identifier[X] . identifier[shape] [ literal[int] ]+ literal[int] ,))
identifier[results] = identifier[sopt] . identifier[minimize] (
identifier[objective_nll_linreg] ,
identifier[theta0] ,
identifier[args] =( identifier[y] , identifier[X] ),
identifier[method] = identifier[algorithm] ,
identifier[options] ={ literal[string] : keyword[False] })
keyword[if] identifier[debug] :
keyword[return] identifier[results]
keyword[return] identifier[results] . identifier[x] [:- literal[int] ] | def linreg_mle(y, X, algorithm='Nelder-Mead', debug=False):
"""MLE for Linear Regression Model
Parameters:
-----------
y : ndarray
target variable with N observations
X : ndarray
The <N x C> design matrix with C independent
variables, features, factors, etc.
algorithm : str
Optional. Default 'Nelder-Mead' (Simplex).
The algorithm used in scipy.optimize.minimize
debug : bool
Optional.
Returns:
--------
beta : ndarray
Estimated regression coefficients.
results : scipy.optimize.optimize.OptimizeResult
Optional. If debug=True then only scipy's
optimization result variable is returned.
"""
import numpy as np
import scipy.stats as sstat
import scipy.optimize as sopt
def objective_nll_linreg(theta, y, X):
yhat = np.dot(X, theta[:-1]) # =X*beta
return -1.0 * sstat.norm.logpdf(y, loc=yhat, scale=theta[-1]).sum()
# check eligible algorithm
if algorithm not in ('Nelder-Mead', 'CG', 'BFGS'):
raise Exception('Optimization Algorithm not supported.') # depends on [control=['if'], data=[]]
# set start values
theta0 = np.ones((X.shape[1] + 1,))
# run solver
results = sopt.minimize(objective_nll_linreg, theta0, args=(y, X), method=algorithm, options={'disp': False})
# debug?
if debug:
return results # depends on [control=['if'], data=[]]
# done
return results.x[:-1] |
def check_qt():
"""Check Qt binding requirements"""
qt_infos = dict(pyqt5=("PyQt5", "5.6"))
try:
import qtpy
package_name, required_ver = qt_infos[qtpy.API]
actual_ver = qtpy.PYQT_VERSION
if LooseVersion(actual_ver) < LooseVersion(required_ver):
show_warning("Please check Spyder installation requirements:\n"
"%s %s+ is required (found v%s)."
% (package_name, required_ver, actual_ver))
except ImportError:
show_warning("Failed to import qtpy.\n"
"Please check Spyder installation requirements:\n\n"
"qtpy 1.2.0+ and\n"
"%s %s+\n\n"
"are required to run Spyder."
% (qt_infos['pyqt5'])) | def function[check_qt, parameter[]]:
constant[Check Qt binding requirements]
variable[qt_infos] assign[=] call[name[dict], parameter[]]
<ast.Try object at 0x7da20e9b0070> | keyword[def] identifier[check_qt] ():
literal[string]
identifier[qt_infos] = identifier[dict] ( identifier[pyqt5] =( literal[string] , literal[string] ))
keyword[try] :
keyword[import] identifier[qtpy]
identifier[package_name] , identifier[required_ver] = identifier[qt_infos] [ identifier[qtpy] . identifier[API] ]
identifier[actual_ver] = identifier[qtpy] . identifier[PYQT_VERSION]
keyword[if] identifier[LooseVersion] ( identifier[actual_ver] )< identifier[LooseVersion] ( identifier[required_ver] ):
identifier[show_warning] ( literal[string]
literal[string]
%( identifier[package_name] , identifier[required_ver] , identifier[actual_ver] ))
keyword[except] identifier[ImportError] :
identifier[show_warning] ( literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
%( identifier[qt_infos] [ literal[string] ])) | def check_qt():
"""Check Qt binding requirements"""
qt_infos = dict(pyqt5=('PyQt5', '5.6'))
try:
import qtpy
(package_name, required_ver) = qt_infos[qtpy.API]
actual_ver = qtpy.PYQT_VERSION
if LooseVersion(actual_ver) < LooseVersion(required_ver):
show_warning('Please check Spyder installation requirements:\n%s %s+ is required (found v%s).' % (package_name, required_ver, actual_ver)) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except ImportError:
show_warning('Failed to import qtpy.\nPlease check Spyder installation requirements:\n\nqtpy 1.2.0+ and\n%s %s+\n\nare required to run Spyder.' % qt_infos['pyqt5']) # depends on [control=['except'], data=[]] |
def param_particle_rad(self, ind):
""" Get radius of one or more particles """
ind = self._vps(listify(ind))
return [self._i2p(i, 'a') for i in ind] | def function[param_particle_rad, parameter[self, ind]]:
constant[ Get radius of one or more particles ]
variable[ind] assign[=] call[name[self]._vps, parameter[call[name[listify], parameter[name[ind]]]]]
return[<ast.ListComp object at 0x7da18f58e800>] | keyword[def] identifier[param_particle_rad] ( identifier[self] , identifier[ind] ):
literal[string]
identifier[ind] = identifier[self] . identifier[_vps] ( identifier[listify] ( identifier[ind] ))
keyword[return] [ identifier[self] . identifier[_i2p] ( identifier[i] , literal[string] ) keyword[for] identifier[i] keyword[in] identifier[ind] ] | def param_particle_rad(self, ind):
""" Get radius of one or more particles """
ind = self._vps(listify(ind))
return [self._i2p(i, 'a') for i in ind] |
def mom(self, K, **kws):
"""
Raw statistical moments.
Creates non-centralized raw moments from the random variable. If
analytical options can not be utilized, Monte Carlo integration
will be used.
Args:
K (numpy.ndarray):
Index of the raw moments. k.shape must be compatible with
distribution shape. Sampling scheme when performing Monte
Carlo
rule (str):
rule for estimating the moment if the analytical method fails.
composite (numpy.ndarray):
If provided, composit quadrature will be used. Ignored in the
case if gaussian=True. If int provided, determines number of
even domain splits. If array of ints, determines number of even
domain splits along each axis. If array of arrays/floats,
determines location of splits.
antithetic (numpy.ndarray):
List of bool. Represents the axes to mirror using antithetic
variable during MCI.
Returns:
(numpy.ndarray):
Shapes are related through the identity
``k.shape == dist.shape+k.shape``.
"""
K = numpy.asarray(K, dtype=int)
shape = K.shape
dim = len(self)
if dim > 1:
shape = shape[1:]
size = int(K.size/dim)
K = K.reshape(dim, size)
cache = {}
out = [evaluation.evaluate_moment(self, kdata, cache) for kdata in K.T]
out = numpy.array(out)
return out.reshape(shape) | def function[mom, parameter[self, K]]:
constant[
Raw statistical moments.
Creates non-centralized raw moments from the random variable. If
analytical options can not be utilized, Monte Carlo integration
will be used.
Args:
K (numpy.ndarray):
Index of the raw moments. k.shape must be compatible with
distribution shape. Sampling scheme when performing Monte
Carlo
rule (str):
rule for estimating the moment if the analytical method fails.
composite (numpy.ndarray):
If provided, composit quadrature will be used. Ignored in the
case if gaussian=True. If int provided, determines number of
even domain splits. If array of ints, determines number of even
domain splits along each axis. If array of arrays/floats,
determines location of splits.
antithetic (numpy.ndarray):
List of bool. Represents the axes to mirror using antithetic
variable during MCI.
Returns:
(numpy.ndarray):
Shapes are related through the identity
``k.shape == dist.shape+k.shape``.
]
variable[K] assign[=] call[name[numpy].asarray, parameter[name[K]]]
variable[shape] assign[=] name[K].shape
variable[dim] assign[=] call[name[len], parameter[name[self]]]
if compare[name[dim] greater[>] constant[1]] begin[:]
variable[shape] assign[=] call[name[shape]][<ast.Slice object at 0x7da18f09dc90>]
variable[size] assign[=] call[name[int], parameter[binary_operation[name[K].size / name[dim]]]]
variable[K] assign[=] call[name[K].reshape, parameter[name[dim], name[size]]]
variable[cache] assign[=] dictionary[[], []]
variable[out] assign[=] <ast.ListComp object at 0x7da18f09c640>
variable[out] assign[=] call[name[numpy].array, parameter[name[out]]]
return[call[name[out].reshape, parameter[name[shape]]]] | keyword[def] identifier[mom] ( identifier[self] , identifier[K] ,** identifier[kws] ):
literal[string]
identifier[K] = identifier[numpy] . identifier[asarray] ( identifier[K] , identifier[dtype] = identifier[int] )
identifier[shape] = identifier[K] . identifier[shape]
identifier[dim] = identifier[len] ( identifier[self] )
keyword[if] identifier[dim] > literal[int] :
identifier[shape] = identifier[shape] [ literal[int] :]
identifier[size] = identifier[int] ( identifier[K] . identifier[size] / identifier[dim] )
identifier[K] = identifier[K] . identifier[reshape] ( identifier[dim] , identifier[size] )
identifier[cache] ={}
identifier[out] =[ identifier[evaluation] . identifier[evaluate_moment] ( identifier[self] , identifier[kdata] , identifier[cache] ) keyword[for] identifier[kdata] keyword[in] identifier[K] . identifier[T] ]
identifier[out] = identifier[numpy] . identifier[array] ( identifier[out] )
keyword[return] identifier[out] . identifier[reshape] ( identifier[shape] ) | def mom(self, K, **kws):
"""
Raw statistical moments.
Creates non-centralized raw moments from the random variable. If
analytical options can not be utilized, Monte Carlo integration
will be used.
Args:
K (numpy.ndarray):
Index of the raw moments. k.shape must be compatible with
distribution shape. Sampling scheme when performing Monte
Carlo
rule (str):
rule for estimating the moment if the analytical method fails.
composite (numpy.ndarray):
If provided, composit quadrature will be used. Ignored in the
case if gaussian=True. If int provided, determines number of
even domain splits. If array of ints, determines number of even
domain splits along each axis. If array of arrays/floats,
determines location of splits.
antithetic (numpy.ndarray):
List of bool. Represents the axes to mirror using antithetic
variable during MCI.
Returns:
(numpy.ndarray):
Shapes are related through the identity
``k.shape == dist.shape+k.shape``.
"""
K = numpy.asarray(K, dtype=int)
shape = K.shape
dim = len(self)
if dim > 1:
shape = shape[1:] # depends on [control=['if'], data=[]]
size = int(K.size / dim)
K = K.reshape(dim, size)
cache = {}
out = [evaluation.evaluate_moment(self, kdata, cache) for kdata in K.T]
out = numpy.array(out)
return out.reshape(shape) |
def int_to_varbyte(self, value):
"""Convert an integer into a variable length byte.
How it works: the bytes are stored in big-endian (significant bit
first), the highest bit of the byte (mask 0x80) is set when there
are more bytes following. The remaining 7 bits (mask 0x7F) are used
to store the value.
"""
# Warning: bit kung-fu ahead. The length of the integer in bytes
length = int(log(max(value, 1), 0x80)) + 1
# Remove the highest bit and move the bits to the right if length > 1
bytes = [value >> i * 7 & 0x7F for i in range(length)]
bytes.reverse()
# Set the first bit on every one but the last bit.
for i in range(len(bytes) - 1):
bytes[i] = bytes[i] | 0x80
return pack('%sB' % len(bytes), *bytes) | def function[int_to_varbyte, parameter[self, value]]:
constant[Convert an integer into a variable length byte.
How it works: the bytes are stored in big-endian (significant bit
first), the highest bit of the byte (mask 0x80) is set when there
are more bytes following. The remaining 7 bits (mask 0x7F) are used
to store the value.
]
variable[length] assign[=] binary_operation[call[name[int], parameter[call[name[log], parameter[call[name[max], parameter[name[value], constant[1]]], constant[128]]]]] + constant[1]]
variable[bytes] assign[=] <ast.ListComp object at 0x7da18f00df60>
call[name[bytes].reverse, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[bytes]]] - constant[1]]]]] begin[:]
call[name[bytes]][name[i]] assign[=] binary_operation[call[name[bytes]][name[i]] <ast.BitOr object at 0x7da2590d6aa0> constant[128]]
return[call[name[pack], parameter[binary_operation[constant[%sB] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[bytes]]]], <ast.Starred object at 0x7da18dc98910>]]] | keyword[def] identifier[int_to_varbyte] ( identifier[self] , identifier[value] ):
literal[string]
identifier[length] = identifier[int] ( identifier[log] ( identifier[max] ( identifier[value] , literal[int] ), literal[int] ))+ literal[int]
identifier[bytes] =[ identifier[value] >> identifier[i] * literal[int] & literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[length] )]
identifier[bytes] . identifier[reverse] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[bytes] )- literal[int] ):
identifier[bytes] [ identifier[i] ]= identifier[bytes] [ identifier[i] ]| literal[int]
keyword[return] identifier[pack] ( literal[string] % identifier[len] ( identifier[bytes] ),* identifier[bytes] ) | def int_to_varbyte(self, value):
"""Convert an integer into a variable length byte.
How it works: the bytes are stored in big-endian (significant bit
first), the highest bit of the byte (mask 0x80) is set when there
are more bytes following. The remaining 7 bits (mask 0x7F) are used
to store the value.
"""
# Warning: bit kung-fu ahead. The length of the integer in bytes
length = int(log(max(value, 1), 128)) + 1
# Remove the highest bit and move the bits to the right if length > 1
bytes = [value >> i * 7 & 127 for i in range(length)]
bytes.reverse()
# Set the first bit on every one but the last bit.
for i in range(len(bytes) - 1):
bytes[i] = bytes[i] | 128 # depends on [control=['for'], data=['i']]
return pack('%sB' % len(bytes), *bytes) |
def build(self, docs=None, filename=None):
"""Build FM-index
Params:
<iterator> | <generator> docs
<str> filename
"""
if docs:
if hasattr(docs, 'items'):
for (idx, doc) in sorted(getattr(docs, 'items')(),
key=lambda x: x[0]):
self.fm.push_back(doc)
else:
for doc in filter(bool, docs):
self.fm.push_back(doc)
self.fm.build()
if filename:
self.fm.write(filename) | def function[build, parameter[self, docs, filename]]:
constant[Build FM-index
Params:
<iterator> | <generator> docs
<str> filename
]
if name[docs] begin[:]
if call[name[hasattr], parameter[name[docs], constant[items]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0ce49a0>, <ast.Name object at 0x7da1b0ce49d0>]]] in starred[call[name[sorted], parameter[call[call[name[getattr], parameter[name[docs], constant[items]]], parameter[]]]]] begin[:]
call[name[self].fm.push_back, parameter[name[doc]]]
call[name[self].fm.build, parameter[]]
if name[filename] begin[:]
call[name[self].fm.write, parameter[name[filename]]] | keyword[def] identifier[build] ( identifier[self] , identifier[docs] = keyword[None] , identifier[filename] = keyword[None] ):
literal[string]
keyword[if] identifier[docs] :
keyword[if] identifier[hasattr] ( identifier[docs] , literal[string] ):
keyword[for] ( identifier[idx] , identifier[doc] ) keyword[in] identifier[sorted] ( identifier[getattr] ( identifier[docs] , literal[string] )(),
identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]):
identifier[self] . identifier[fm] . identifier[push_back] ( identifier[doc] )
keyword[else] :
keyword[for] identifier[doc] keyword[in] identifier[filter] ( identifier[bool] , identifier[docs] ):
identifier[self] . identifier[fm] . identifier[push_back] ( identifier[doc] )
identifier[self] . identifier[fm] . identifier[build] ()
keyword[if] identifier[filename] :
identifier[self] . identifier[fm] . identifier[write] ( identifier[filename] ) | def build(self, docs=None, filename=None):
"""Build FM-index
Params:
<iterator> | <generator> docs
<str> filename
"""
if docs:
if hasattr(docs, 'items'):
for (idx, doc) in sorted(getattr(docs, 'items')(), key=lambda x: x[0]):
self.fm.push_back(doc) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
for doc in filter(bool, docs):
self.fm.push_back(doc) # depends on [control=['for'], data=['doc']] # depends on [control=['if'], data=[]]
self.fm.build()
if filename:
self.fm.write(filename) # depends on [control=['if'], data=[]] |
def import_rsa_key(pem_data):
"""
Extract an RSA key from a PEM-encoded X.509 certificate
:param pem_data: RSA key encoded in standard form
:return: rsa.RSAPublicKey instance
"""
if not pem_data.startswith(PREFIX):
pem_data = bytes('{}\n{}\n{}'.format(PREFIX, pem_data, POSTFIX),
'utf-8')
else:
pem_data = bytes(pem_data, 'utf-8')
cert = x509.load_pem_x509_certificate(pem_data, default_backend())
return cert.public_key() | def function[import_rsa_key, parameter[pem_data]]:
constant[
Extract an RSA key from a PEM-encoded X.509 certificate
:param pem_data: RSA key encoded in standard form
:return: rsa.RSAPublicKey instance
]
if <ast.UnaryOp object at 0x7da1b05bde10> begin[:]
variable[pem_data] assign[=] call[name[bytes], parameter[call[constant[{}
{}
{}].format, parameter[name[PREFIX], name[pem_data], name[POSTFIX]]], constant[utf-8]]]
variable[cert] assign[=] call[name[x509].load_pem_x509_certificate, parameter[name[pem_data], call[name[default_backend], parameter[]]]]
return[call[name[cert].public_key, parameter[]]] | keyword[def] identifier[import_rsa_key] ( identifier[pem_data] ):
literal[string]
keyword[if] keyword[not] identifier[pem_data] . identifier[startswith] ( identifier[PREFIX] ):
identifier[pem_data] = identifier[bytes] ( literal[string] . identifier[format] ( identifier[PREFIX] , identifier[pem_data] , identifier[POSTFIX] ),
literal[string] )
keyword[else] :
identifier[pem_data] = identifier[bytes] ( identifier[pem_data] , literal[string] )
identifier[cert] = identifier[x509] . identifier[load_pem_x509_certificate] ( identifier[pem_data] , identifier[default_backend] ())
keyword[return] identifier[cert] . identifier[public_key] () | def import_rsa_key(pem_data):
"""
Extract an RSA key from a PEM-encoded X.509 certificate
:param pem_data: RSA key encoded in standard form
:return: rsa.RSAPublicKey instance
"""
if not pem_data.startswith(PREFIX):
pem_data = bytes('{}\n{}\n{}'.format(PREFIX, pem_data, POSTFIX), 'utf-8') # depends on [control=['if'], data=[]]
else:
pem_data = bytes(pem_data, 'utf-8')
cert = x509.load_pem_x509_certificate(pem_data, default_backend())
return cert.public_key() |
def run_in_greenlet(callable):
"""Decorator to run a ``callable`` on a new greenlet.
A ``callable`` decorated with this decorator returns a coroutine
"""
@wraps(callable)
async def _(*args, **kwargs):
green = greenlet(callable)
# switch to the new greenlet
result = green.switch(*args, **kwargs)
# back to the parent
while isawaitable(result):
# keep on switching back to the greenlet if we get an awaitable
try:
result = green.switch((await result))
except Exception:
exc_info = sys.exc_info()
result = green.throw(*exc_info)
return green.switch(result)
return _ | def function[run_in_greenlet, parameter[callable]]:
constant[Decorator to run a ``callable`` on a new greenlet.
A ``callable`` decorated with this decorator returns a coroutine
]
<ast.AsyncFunctionDef object at 0x7da20e9b0040>
return[name[_]] | keyword[def] identifier[run_in_greenlet] ( identifier[callable] ):
literal[string]
@ identifier[wraps] ( identifier[callable] )
keyword[async] keyword[def] identifier[_] (* identifier[args] ,** identifier[kwargs] ):
identifier[green] = identifier[greenlet] ( identifier[callable] )
identifier[result] = identifier[green] . identifier[switch] (* identifier[args] ,** identifier[kwargs] )
keyword[while] identifier[isawaitable] ( identifier[result] ):
keyword[try] :
identifier[result] = identifier[green] . identifier[switch] (( keyword[await] identifier[result] ))
keyword[except] identifier[Exception] :
identifier[exc_info] = identifier[sys] . identifier[exc_info] ()
identifier[result] = identifier[green] . identifier[throw] (* identifier[exc_info] )
keyword[return] identifier[green] . identifier[switch] ( identifier[result] )
keyword[return] identifier[_] | def run_in_greenlet(callable):
"""Decorator to run a ``callable`` on a new greenlet.
A ``callable`` decorated with this decorator returns a coroutine
"""
@wraps(callable)
async def _(*args, **kwargs):
green = greenlet(callable)
# switch to the new greenlet
result = green.switch(*args, **kwargs)
# back to the parent
while isawaitable(result):
# keep on switching back to the greenlet if we get an awaitable
try:
result = green.switch(await result) # depends on [control=['try'], data=[]]
except Exception:
exc_info = sys.exc_info()
result = green.throw(*exc_info) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
return green.switch(result)
return _ |
def is_indexed(self, identifier):
""" Returns True if identifier is already indexed. Otherwise returns False. """
with self.index.searcher() as searcher:
result = searcher.search(Term('identifier', identifier['identifier']))
return bool(result) | def function[is_indexed, parameter[self, identifier]]:
constant[ Returns True if identifier is already indexed. Otherwise returns False. ]
with call[name[self].index.searcher, parameter[]] begin[:]
variable[result] assign[=] call[name[searcher].search, parameter[call[name[Term], parameter[constant[identifier], call[name[identifier]][constant[identifier]]]]]]
return[call[name[bool], parameter[name[result]]]] | keyword[def] identifier[is_indexed] ( identifier[self] , identifier[identifier] ):
literal[string]
keyword[with] identifier[self] . identifier[index] . identifier[searcher] () keyword[as] identifier[searcher] :
identifier[result] = identifier[searcher] . identifier[search] ( identifier[Term] ( literal[string] , identifier[identifier] [ literal[string] ]))
keyword[return] identifier[bool] ( identifier[result] ) | def is_indexed(self, identifier):
""" Returns True if identifier is already indexed. Otherwise returns False. """
with self.index.searcher() as searcher:
result = searcher.search(Term('identifier', identifier['identifier']))
return bool(result) # depends on [control=['with'], data=['searcher']] |
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features | def function[convert_examples_to_features, parameter[examples, label_list, max_seq_length, tokenizer, output_mode]]:
constant[Loads a data file into a list of `InputBatch`s.]
variable[label_map] assign[=] <ast.DictComp object at 0x7da18f723310>
variable[features] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18f721930>, <ast.Name object at 0x7da18f720250>]]] in starred[call[name[enumerate], parameter[name[examples]]]] begin[:]
if compare[binary_operation[name[ex_index] <ast.Mod object at 0x7da2590d6920> constant[10000]] equal[==] constant[0]] begin[:]
call[name[logger].info, parameter[binary_operation[constant[Writing example %d of %d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f7239d0>, <ast.Call object at 0x7da18f7233a0>]]]]]
variable[tokens_a] assign[=] call[name[tokenizer].tokenize, parameter[name[example].text_a]]
variable[tokens_b] assign[=] constant[None]
if name[example].text_b begin[:]
variable[tokens_b] assign[=] call[name[tokenizer].tokenize, parameter[name[example].text_b]]
call[name[_truncate_seq_pair], parameter[name[tokens_a], name[tokens_b], binary_operation[name[max_seq_length] - constant[3]]]]
variable[tokens] assign[=] binary_operation[binary_operation[list[[<ast.Constant object at 0x7da18f7214b0>]] + name[tokens_a]] + list[[<ast.Constant object at 0x7da18f720e20>]]]
variable[segment_ids] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18f7218a0>]] * call[name[len], parameter[name[tokens]]]]
if name[tokens_b] begin[:]
<ast.AugAssign object at 0x7da18f723130>
<ast.AugAssign object at 0x7da18f720d00>
variable[input_ids] assign[=] call[name[tokenizer].convert_tokens_to_ids, parameter[name[tokens]]]
variable[input_mask] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18f722080>]] * call[name[len], parameter[name[input_ids]]]]
variable[padding] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18f722e00>]] * binary_operation[name[max_seq_length] - call[name[len], parameter[name[input_ids]]]]]
<ast.AugAssign object at 0x7da18f723d00>
<ast.AugAssign object at 0x7da18f7226b0>
<ast.AugAssign object at 0x7da18f7223b0>
assert[compare[call[name[len], parameter[name[input_ids]]] equal[==] name[max_seq_length]]]
assert[compare[call[name[len], parameter[name[input_mask]]] equal[==] name[max_seq_length]]]
assert[compare[call[name[len], parameter[name[segment_ids]]] equal[==] name[max_seq_length]]]
if compare[name[output_mode] equal[==] constant[classification]] begin[:]
variable[label_id] assign[=] call[name[label_map]][name[example].label]
if compare[name[ex_index] less[<] constant[5]] begin[:]
call[name[logger].info, parameter[constant[*** Example ***]]]
call[name[logger].info, parameter[binary_operation[constant[guid: %s] <ast.Mod object at 0x7da2590d6920> name[example].guid]]]
call[name[logger].info, parameter[binary_operation[constant[tokens: %s] <ast.Mod object at 0x7da2590d6920> call[constant[ ].join, parameter[<ast.ListComp object at 0x7da18f721780>]]]]]
call[name[logger].info, parameter[binary_operation[constant[input_ids: %s] <ast.Mod object at 0x7da2590d6920> call[constant[ ].join, parameter[<ast.ListComp object at 0x7da18f7207c0>]]]]]
call[name[logger].info, parameter[binary_operation[constant[input_mask: %s] <ast.Mod object at 0x7da2590d6920> call[constant[ ].join, parameter[<ast.ListComp object at 0x7da18f723f10>]]]]]
call[name[logger].info, parameter[binary_operation[constant[segment_ids: %s] <ast.Mod object at 0x7da2590d6920> call[constant[ ].join, parameter[<ast.ListComp object at 0x7da18fe92fe0>]]]]]
call[name[logger].info, parameter[binary_operation[constant[label: %s (id = %d)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18fe92110>, <ast.Name object at 0x7da18fe93e50>]]]]]
call[name[features].append, parameter[call[name[InputFeatures], parameter[]]]]
return[name[features]] | keyword[def] identifier[convert_examples_to_features] ( identifier[examples] , identifier[label_list] , identifier[max_seq_length] ,
identifier[tokenizer] , identifier[output_mode] ):
literal[string]
identifier[label_map] ={ identifier[label] : identifier[i] keyword[for] identifier[i] , identifier[label] keyword[in] identifier[enumerate] ( identifier[label_list] )}
identifier[features] =[]
keyword[for] ( identifier[ex_index] , identifier[example] ) keyword[in] identifier[enumerate] ( identifier[examples] ):
keyword[if] identifier[ex_index] % literal[int] == literal[int] :
identifier[logger] . identifier[info] ( literal[string] %( identifier[ex_index] , identifier[len] ( identifier[examples] )))
identifier[tokens_a] = identifier[tokenizer] . identifier[tokenize] ( identifier[example] . identifier[text_a] )
identifier[tokens_b] = keyword[None]
keyword[if] identifier[example] . identifier[text_b] :
identifier[tokens_b] = identifier[tokenizer] . identifier[tokenize] ( identifier[example] . identifier[text_b] )
identifier[_truncate_seq_pair] ( identifier[tokens_a] , identifier[tokens_b] , identifier[max_seq_length] - literal[int] )
keyword[else] :
keyword[if] identifier[len] ( identifier[tokens_a] )> identifier[max_seq_length] - literal[int] :
identifier[tokens_a] = identifier[tokens_a] [:( identifier[max_seq_length] - literal[int] )]
identifier[tokens] =[ literal[string] ]+ identifier[tokens_a] +[ literal[string] ]
identifier[segment_ids] =[ literal[int] ]* identifier[len] ( identifier[tokens] )
keyword[if] identifier[tokens_b] :
identifier[tokens] += identifier[tokens_b] +[ literal[string] ]
identifier[segment_ids] +=[ literal[int] ]*( identifier[len] ( identifier[tokens_b] )+ literal[int] )
identifier[input_ids] = identifier[tokenizer] . identifier[convert_tokens_to_ids] ( identifier[tokens] )
identifier[input_mask] =[ literal[int] ]* identifier[len] ( identifier[input_ids] )
identifier[padding] =[ literal[int] ]*( identifier[max_seq_length] - identifier[len] ( identifier[input_ids] ))
identifier[input_ids] += identifier[padding]
identifier[input_mask] += identifier[padding]
identifier[segment_ids] += identifier[padding]
keyword[assert] identifier[len] ( identifier[input_ids] )== identifier[max_seq_length]
keyword[assert] identifier[len] ( identifier[input_mask] )== identifier[max_seq_length]
keyword[assert] identifier[len] ( identifier[segment_ids] )== identifier[max_seq_length]
keyword[if] identifier[output_mode] == literal[string] :
identifier[label_id] = identifier[label_map] [ identifier[example] . identifier[label] ]
keyword[elif] identifier[output_mode] == literal[string] :
identifier[label_id] = identifier[float] ( identifier[example] . identifier[label] )
keyword[else] :
keyword[raise] identifier[KeyError] ( identifier[output_mode] )
keyword[if] identifier[ex_index] < literal[int] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] %( identifier[example] . identifier[guid] ))
identifier[logger] . identifier[info] ( literal[string] % literal[string] . identifier[join] (
[ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[tokens] ]))
identifier[logger] . identifier[info] ( literal[string] % literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[input_ids] ]))
identifier[logger] . identifier[info] ( literal[string] % literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[input_mask] ]))
identifier[logger] . identifier[info] (
literal[string] % literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[segment_ids] ]))
identifier[logger] . identifier[info] ( literal[string] %( identifier[example] . identifier[label] , identifier[label_id] ))
identifier[features] . identifier[append] (
identifier[InputFeatures] ( identifier[input_ids] = identifier[input_ids] ,
identifier[input_mask] = identifier[input_mask] ,
identifier[segment_ids] = identifier[segment_ids] ,
identifier[label_id] = identifier[label_id] ))
keyword[return] identifier[features] | def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for (i, label) in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(examples))) # depends on [control=['if'], data=[]]
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) # depends on [control=['if'], data=[]]
# Account for [CLS] and [SEP] with "- 2"
elif len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:max_seq_length - 2] # depends on [control=['if'], data=[]]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ['[CLS]'] + tokens_a + ['[SEP]']
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ['[SEP]']
segment_ids += [1] * (len(tokens_b) + 1) # depends on [control=['if'], data=[]]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == 'classification':
label_id = label_map[example.label] # depends on [control=['if'], data=[]]
elif output_mode == 'regression':
label_id = float(example.label) # depends on [control=['if'], data=[]]
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info('*** Example ***')
logger.info('guid: %s' % example.guid)
logger.info('tokens: %s' % ' '.join([str(x) for x in tokens]))
logger.info('input_ids: %s' % ' '.join([str(x) for x in input_ids]))
logger.info('input_mask: %s' % ' '.join([str(x) for x in input_mask]))
logger.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids]))
logger.info('label: %s (id = %d)' % (example.label, label_id)) # depends on [control=['if'], data=[]]
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) # depends on [control=['for'], data=[]]
return features |
def variable(self):
"""
variable : variable
Feature Type Array adds:
variable : variable[expression]
Feature Type Func adds:
variable : variable(arg_list)
"""
var = Var(self.cur_token)
self.eat(TokenTypes.VAR)
if Features.TYPE_ARRAY in self.features:
while self.cur_token.type == TokenTypes.LBRACKET:
self.eat(TokenTypes.LBRACKET)
# Start passed the logical ops.
expr = self.operator_expression(level=2)
self.eat(TokenTypes.RBRACKET)
var = GetArrayItem(left=var, right=expr)
if Features.FUNC in self.features:
if self.cur_token.type == TokenTypes.LPAREN:
self.eat(TokenTypes.LPAREN)
args = self.arg_list()
self.eat(TokenTypes.RPAREN)
var = Call(var, args)
return var | def function[variable, parameter[self]]:
constant[
variable : variable
Feature Type Array adds:
variable : variable[expression]
Feature Type Func adds:
variable : variable(arg_list)
]
variable[var] assign[=] call[name[Var], parameter[name[self].cur_token]]
call[name[self].eat, parameter[name[TokenTypes].VAR]]
if compare[name[Features].TYPE_ARRAY in name[self].features] begin[:]
while compare[name[self].cur_token.type equal[==] name[TokenTypes].LBRACKET] begin[:]
call[name[self].eat, parameter[name[TokenTypes].LBRACKET]]
variable[expr] assign[=] call[name[self].operator_expression, parameter[]]
call[name[self].eat, parameter[name[TokenTypes].RBRACKET]]
variable[var] assign[=] call[name[GetArrayItem], parameter[]]
if compare[name[Features].FUNC in name[self].features] begin[:]
if compare[name[self].cur_token.type equal[==] name[TokenTypes].LPAREN] begin[:]
call[name[self].eat, parameter[name[TokenTypes].LPAREN]]
variable[args] assign[=] call[name[self].arg_list, parameter[]]
call[name[self].eat, parameter[name[TokenTypes].RPAREN]]
variable[var] assign[=] call[name[Call], parameter[name[var], name[args]]]
return[name[var]] | keyword[def] identifier[variable] ( identifier[self] ):
literal[string]
identifier[var] = identifier[Var] ( identifier[self] . identifier[cur_token] )
identifier[self] . identifier[eat] ( identifier[TokenTypes] . identifier[VAR] )
keyword[if] identifier[Features] . identifier[TYPE_ARRAY] keyword[in] identifier[self] . identifier[features] :
keyword[while] identifier[self] . identifier[cur_token] . identifier[type] == identifier[TokenTypes] . identifier[LBRACKET] :
identifier[self] . identifier[eat] ( identifier[TokenTypes] . identifier[LBRACKET] )
identifier[expr] = identifier[self] . identifier[operator_expression] ( identifier[level] = literal[int] )
identifier[self] . identifier[eat] ( identifier[TokenTypes] . identifier[RBRACKET] )
identifier[var] = identifier[GetArrayItem] ( identifier[left] = identifier[var] , identifier[right] = identifier[expr] )
keyword[if] identifier[Features] . identifier[FUNC] keyword[in] identifier[self] . identifier[features] :
keyword[if] identifier[self] . identifier[cur_token] . identifier[type] == identifier[TokenTypes] . identifier[LPAREN] :
identifier[self] . identifier[eat] ( identifier[TokenTypes] . identifier[LPAREN] )
identifier[args] = identifier[self] . identifier[arg_list] ()
identifier[self] . identifier[eat] ( identifier[TokenTypes] . identifier[RPAREN] )
identifier[var] = identifier[Call] ( identifier[var] , identifier[args] )
keyword[return] identifier[var] | def variable(self):
"""
variable : variable
Feature Type Array adds:
variable : variable[expression]
Feature Type Func adds:
variable : variable(arg_list)
"""
var = Var(self.cur_token)
self.eat(TokenTypes.VAR)
if Features.TYPE_ARRAY in self.features:
while self.cur_token.type == TokenTypes.LBRACKET:
self.eat(TokenTypes.LBRACKET)
# Start passed the logical ops.
expr = self.operator_expression(level=2)
self.eat(TokenTypes.RBRACKET)
var = GetArrayItem(left=var, right=expr) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
if Features.FUNC in self.features:
if self.cur_token.type == TokenTypes.LPAREN:
self.eat(TokenTypes.LPAREN)
args = self.arg_list()
self.eat(TokenTypes.RPAREN)
var = Call(var, args) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return var |
def as_opcode(cls: Type[T],
logic_fn: Callable[..., Any],
mnemonic: str,
gas_cost: int) -> Type[T]:
"""
Class factory method for turning vanilla functions into Opcode classes.
"""
if gas_cost:
@functools.wraps(logic_fn)
def wrapped_logic_fn(computation: 'BaseComputation') -> Any:
"""
Wrapper functionf or the logic function which consumes the base
opcode gas cost prior to execution.
"""
computation.consume_gas(
gas_cost,
mnemonic,
)
return logic_fn(computation)
else:
wrapped_logic_fn = logic_fn
props = {
'__call__': staticmethod(wrapped_logic_fn),
'mnemonic': mnemonic,
'gas_cost': gas_cost,
}
opcode_cls = type("opcode:{0}".format(mnemonic), (cls,), props)
return opcode_cls() | def function[as_opcode, parameter[cls, logic_fn, mnemonic, gas_cost]]:
constant[
Class factory method for turning vanilla functions into Opcode classes.
]
if name[gas_cost] begin[:]
def function[wrapped_logic_fn, parameter[computation]]:
constant[
Wrapper functionf or the logic function which consumes the base
opcode gas cost prior to execution.
]
call[name[computation].consume_gas, parameter[name[gas_cost], name[mnemonic]]]
return[call[name[logic_fn], parameter[name[computation]]]]
variable[props] assign[=] dictionary[[<ast.Constant object at 0x7da1b16474f0>, <ast.Constant object at 0x7da1b1647640>, <ast.Constant object at 0x7da1b1645d50>], [<ast.Call object at 0x7da1b1646950>, <ast.Name object at 0x7da1b1647580>, <ast.Name object at 0x7da1b1645c00>]]
variable[opcode_cls] assign[=] call[name[type], parameter[call[constant[opcode:{0}].format, parameter[name[mnemonic]]], tuple[[<ast.Name object at 0x7da1b1647220>]], name[props]]]
return[call[name[opcode_cls], parameter[]]] | keyword[def] identifier[as_opcode] ( identifier[cls] : identifier[Type] [ identifier[T] ],
identifier[logic_fn] : identifier[Callable] [..., identifier[Any] ],
identifier[mnemonic] : identifier[str] ,
identifier[gas_cost] : identifier[int] )-> identifier[Type] [ identifier[T] ]:
literal[string]
keyword[if] identifier[gas_cost] :
@ identifier[functools] . identifier[wraps] ( identifier[logic_fn] )
keyword[def] identifier[wrapped_logic_fn] ( identifier[computation] : literal[string] )-> identifier[Any] :
literal[string]
identifier[computation] . identifier[consume_gas] (
identifier[gas_cost] ,
identifier[mnemonic] ,
)
keyword[return] identifier[logic_fn] ( identifier[computation] )
keyword[else] :
identifier[wrapped_logic_fn] = identifier[logic_fn]
identifier[props] ={
literal[string] : identifier[staticmethod] ( identifier[wrapped_logic_fn] ),
literal[string] : identifier[mnemonic] ,
literal[string] : identifier[gas_cost] ,
}
identifier[opcode_cls] = identifier[type] ( literal[string] . identifier[format] ( identifier[mnemonic] ),( identifier[cls] ,), identifier[props] )
keyword[return] identifier[opcode_cls] () | def as_opcode(cls: Type[T], logic_fn: Callable[..., Any], mnemonic: str, gas_cost: int) -> Type[T]:
"""
Class factory method for turning vanilla functions into Opcode classes.
"""
if gas_cost:
@functools.wraps(logic_fn)
def wrapped_logic_fn(computation: 'BaseComputation') -> Any:
"""
Wrapper functionf or the logic function which consumes the base
opcode gas cost prior to execution.
"""
computation.consume_gas(gas_cost, mnemonic)
return logic_fn(computation) # depends on [control=['if'], data=[]]
else:
wrapped_logic_fn = logic_fn
props = {'__call__': staticmethod(wrapped_logic_fn), 'mnemonic': mnemonic, 'gas_cost': gas_cost}
opcode_cls = type('opcode:{0}'.format(mnemonic), (cls,), props)
return opcode_cls() |
def customize_ruleset(self, custom_ruleset_file=None):
"""
Updates the ruleset to include a set of custom rules. These rules will
be _added_ to the existing ruleset or replace the existing rule with
the same ID.
Args:
custom_ruleset_file (optional): The filepath to the custom rules.
Defaults to `None`. If `custom_ruleset_file` isn't passed, the
environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be
checked. If a filepath isn't specified by either of these
methods, the ruleset will not be updated.
Raises:
`IOError` if the specified file does not exist.
Examples:
To include the rules defined in `axe-core-custom-rules.js`::
page.a11y_audit.config.customize_ruleset(
"axe-core-custom-rules.js"
)
Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE`
to specify the path to the file containing the custom rules.
Documentation for how to write rules:
https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md
An example of a custom rules file can be found at
https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js
"""
custom_file = custom_ruleset_file or os.environ.get(
"BOKCHOY_A11Y_CUSTOM_RULES_FILE"
)
if not custom_file:
return
with open(custom_file, "r") as additional_rules:
custom_rules = additional_rules.read()
if "var customRules" not in custom_rules:
raise A11yAuditConfigError(
"Custom rules file must include \"var customRules\""
)
self.custom_rules = custom_rules | def function[customize_ruleset, parameter[self, custom_ruleset_file]]:
constant[
Updates the ruleset to include a set of custom rules. These rules will
be _added_ to the existing ruleset or replace the existing rule with
the same ID.
Args:
custom_ruleset_file (optional): The filepath to the custom rules.
Defaults to `None`. If `custom_ruleset_file` isn't passed, the
environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be
checked. If a filepath isn't specified by either of these
methods, the ruleset will not be updated.
Raises:
`IOError` if the specified file does not exist.
Examples:
To include the rules defined in `axe-core-custom-rules.js`::
page.a11y_audit.config.customize_ruleset(
"axe-core-custom-rules.js"
)
Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE`
to specify the path to the file containing the custom rules.
Documentation for how to write rules:
https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md
An example of a custom rules file can be found at
https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js
]
variable[custom_file] assign[=] <ast.BoolOp object at 0x7da20e74ae30>
if <ast.UnaryOp object at 0x7da18fe933a0> begin[:]
return[None]
with call[name[open], parameter[name[custom_file], constant[r]]] begin[:]
variable[custom_rules] assign[=] call[name[additional_rules].read, parameter[]]
if compare[constant[var customRules] <ast.NotIn object at 0x7da2590d7190> name[custom_rules]] begin[:]
<ast.Raise object at 0x7da18fe93100>
name[self].custom_rules assign[=] name[custom_rules] | keyword[def] identifier[customize_ruleset] ( identifier[self] , identifier[custom_ruleset_file] = keyword[None] ):
literal[string]
identifier[custom_file] = identifier[custom_ruleset_file] keyword[or] identifier[os] . identifier[environ] . identifier[get] (
literal[string]
)
keyword[if] keyword[not] identifier[custom_file] :
keyword[return]
keyword[with] identifier[open] ( identifier[custom_file] , literal[string] ) keyword[as] identifier[additional_rules] :
identifier[custom_rules] = identifier[additional_rules] . identifier[read] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[custom_rules] :
keyword[raise] identifier[A11yAuditConfigError] (
literal[string]
)
identifier[self] . identifier[custom_rules] = identifier[custom_rules] | def customize_ruleset(self, custom_ruleset_file=None):
"""
Updates the ruleset to include a set of custom rules. These rules will
be _added_ to the existing ruleset or replace the existing rule with
the same ID.
Args:
custom_ruleset_file (optional): The filepath to the custom rules.
Defaults to `None`. If `custom_ruleset_file` isn't passed, the
environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be
checked. If a filepath isn't specified by either of these
methods, the ruleset will not be updated.
Raises:
`IOError` if the specified file does not exist.
Examples:
To include the rules defined in `axe-core-custom-rules.js`::
page.a11y_audit.config.customize_ruleset(
"axe-core-custom-rules.js"
)
Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE`
to specify the path to the file containing the custom rules.
Documentation for how to write rules:
https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md
An example of a custom rules file can be found at
https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js
"""
custom_file = custom_ruleset_file or os.environ.get('BOKCHOY_A11Y_CUSTOM_RULES_FILE')
if not custom_file:
return # depends on [control=['if'], data=[]]
with open(custom_file, 'r') as additional_rules:
custom_rules = additional_rules.read() # depends on [control=['with'], data=['additional_rules']]
if 'var customRules' not in custom_rules:
raise A11yAuditConfigError('Custom rules file must include "var customRules"') # depends on [control=['if'], data=[]]
self.custom_rules = custom_rules |
def _is_at_ref_end(self, nucmer_hit):
'''Returns True iff the hit is "close enough" to the end of the reference sequence'''
hit_coords = nucmer_hit.ref_coords()
return hit_coords.end >= nucmer_hit.ref_length - self.ref_end_tolerance | def function[_is_at_ref_end, parameter[self, nucmer_hit]]:
constant[Returns True iff the hit is "close enough" to the end of the reference sequence]
variable[hit_coords] assign[=] call[name[nucmer_hit].ref_coords, parameter[]]
return[compare[name[hit_coords].end greater_or_equal[>=] binary_operation[name[nucmer_hit].ref_length - name[self].ref_end_tolerance]]] | keyword[def] identifier[_is_at_ref_end] ( identifier[self] , identifier[nucmer_hit] ):
literal[string]
identifier[hit_coords] = identifier[nucmer_hit] . identifier[ref_coords] ()
keyword[return] identifier[hit_coords] . identifier[end] >= identifier[nucmer_hit] . identifier[ref_length] - identifier[self] . identifier[ref_end_tolerance] | def _is_at_ref_end(self, nucmer_hit):
"""Returns True iff the hit is "close enough" to the end of the reference sequence"""
hit_coords = nucmer_hit.ref_coords()
return hit_coords.end >= nucmer_hit.ref_length - self.ref_end_tolerance |
def load_asdf(self, asdf_obj, **kwargs):
"""
Load from an ASDF object.
See :func:`ginga.util.io_asdf.load_asdf` for more info.
"""
self.clear_metadata()
data, wcs, ahdr = io_asdf.load_asdf(asdf_obj, **kwargs)
self.setup_data(data, naxispath=None)
wcsinfo = wcsmod.get_wcs_class('astropy_ape14')
self.wcs = wcsinfo.wrapper_class(logger=self.logger)
self.wcs.wcs = wcs
if wcs is not None:
self.wcs.coordsys = wcs.output_frame.name | def function[load_asdf, parameter[self, asdf_obj]]:
constant[
Load from an ASDF object.
See :func:`ginga.util.io_asdf.load_asdf` for more info.
]
call[name[self].clear_metadata, parameter[]]
<ast.Tuple object at 0x7da1b0dbcb50> assign[=] call[name[io_asdf].load_asdf, parameter[name[asdf_obj]]]
call[name[self].setup_data, parameter[name[data]]]
variable[wcsinfo] assign[=] call[name[wcsmod].get_wcs_class, parameter[constant[astropy_ape14]]]
name[self].wcs assign[=] call[name[wcsinfo].wrapper_class, parameter[]]
name[self].wcs.wcs assign[=] name[wcs]
if compare[name[wcs] is_not constant[None]] begin[:]
name[self].wcs.coordsys assign[=] name[wcs].output_frame.name | keyword[def] identifier[load_asdf] ( identifier[self] , identifier[asdf_obj] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[clear_metadata] ()
identifier[data] , identifier[wcs] , identifier[ahdr] = identifier[io_asdf] . identifier[load_asdf] ( identifier[asdf_obj] ,** identifier[kwargs] )
identifier[self] . identifier[setup_data] ( identifier[data] , identifier[naxispath] = keyword[None] )
identifier[wcsinfo] = identifier[wcsmod] . identifier[get_wcs_class] ( literal[string] )
identifier[self] . identifier[wcs] = identifier[wcsinfo] . identifier[wrapper_class] ( identifier[logger] = identifier[self] . identifier[logger] )
identifier[self] . identifier[wcs] . identifier[wcs] = identifier[wcs]
keyword[if] identifier[wcs] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[wcs] . identifier[coordsys] = identifier[wcs] . identifier[output_frame] . identifier[name] | def load_asdf(self, asdf_obj, **kwargs):
"""
Load from an ASDF object.
See :func:`ginga.util.io_asdf.load_asdf` for more info.
"""
self.clear_metadata()
(data, wcs, ahdr) = io_asdf.load_asdf(asdf_obj, **kwargs)
self.setup_data(data, naxispath=None)
wcsinfo = wcsmod.get_wcs_class('astropy_ape14')
self.wcs = wcsinfo.wrapper_class(logger=self.logger)
self.wcs.wcs = wcs
if wcs is not None:
self.wcs.coordsys = wcs.output_frame.name # depends on [control=['if'], data=['wcs']] |
def fix_html(x:str) -> str:
"List of replacements from html strings in `x`."
re1 = re.compile(r' +')
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace(
'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace(
'<br />', "\n").replace('\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(
' @-@ ','-').replace(' @,@ ',',').replace('\\', ' \\ ')
return re1.sub(' ', html.unescape(x)) | def function[fix_html, parameter[x]]:
constant[List of replacements from html strings in `x`.]
variable[re1] assign[=] call[name[re].compile, parameter[constant[ +]]]
variable[x] assign[=] call[call[call[call[call[call[call[call[call[call[call[call[call[call[name[x].replace, parameter[constant[#39;], constant[']]].replace, parameter[constant[amp;], constant[&]]].replace, parameter[constant[#146;], constant[']]].replace, parameter[constant[nbsp;], constant[ ]]].replace, parameter[constant[#36;], constant[$]]].replace, parameter[constant[\n], constant[
]]].replace, parameter[constant[quot;], constant[']]].replace, parameter[constant[<br />], constant[
]]].replace, parameter[constant[\"], constant["]]].replace, parameter[constant[<unk>], name[UNK]]].replace, parameter[constant[ @.@ ], constant[.]]].replace, parameter[constant[ @-@ ], constant[-]]].replace, parameter[constant[ @,@ ], constant[,]]].replace, parameter[constant[\], constant[ \ ]]]
return[call[name[re1].sub, parameter[constant[ ], call[name[html].unescape, parameter[name[x]]]]]] | keyword[def] identifier[fix_html] ( identifier[x] : identifier[str] )-> identifier[str] :
literal[string]
identifier[re1] = identifier[re] . identifier[compile] ( literal[string] )
identifier[x] = identifier[x] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] (
literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] (
literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , identifier[UNK] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] (
literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[re1] . identifier[sub] ( literal[string] , identifier[html] . identifier[unescape] ( identifier[x] )) | def fix_html(x: str) -> str:
"""List of replacements from html strings in `x`."""
re1 = re.compile(' +')
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace('#36;', '$').replace('\\n', '\n').replace('quot;', "'").replace('<br />', '\n').replace('\\"', '"').replace('<unk>', UNK).replace(' @.@ ', '.').replace(' @-@ ', '-').replace(' @,@ ', ',').replace('\\', ' \\ ')
return re1.sub(' ', html.unescape(x)) |
def from_entry_dict(cls, entry_dict):
"""
This is a "constructor" for the LogEntry class.
:param entry_dict: A dict we get from the REST API
:return: An instance of LogEntry.
"""
# Debug helper
# https://circleci.com/gh/andresriancho/w3af-api-docker/30
try:
_type = entry_dict['type']
_id = entry_dict['id']
_time = entry_dict['time']
message = entry_dict['message']
severity = entry_dict['severity']
except KeyError:
msg = ('Missing expected log entry attribute. Log entry'
' object is:\n\n%s')
raise APIException(msg % json.dumps(entry_dict, indent=4))
return cls(_type, message, _time, severity, _id) | def function[from_entry_dict, parameter[cls, entry_dict]]:
constant[
This is a "constructor" for the LogEntry class.
:param entry_dict: A dict we get from the REST API
:return: An instance of LogEntry.
]
<ast.Try object at 0x7da20c6a9ab0>
return[call[name[cls], parameter[name[_type], name[message], name[_time], name[severity], name[_id]]]] | keyword[def] identifier[from_entry_dict] ( identifier[cls] , identifier[entry_dict] ):
literal[string]
keyword[try] :
identifier[_type] = identifier[entry_dict] [ literal[string] ]
identifier[_id] = identifier[entry_dict] [ literal[string] ]
identifier[_time] = identifier[entry_dict] [ literal[string] ]
identifier[message] = identifier[entry_dict] [ literal[string] ]
identifier[severity] = identifier[entry_dict] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[msg] =( literal[string]
literal[string] )
keyword[raise] identifier[APIException] ( identifier[msg] % identifier[json] . identifier[dumps] ( identifier[entry_dict] , identifier[indent] = literal[int] ))
keyword[return] identifier[cls] ( identifier[_type] , identifier[message] , identifier[_time] , identifier[severity] , identifier[_id] ) | def from_entry_dict(cls, entry_dict):
"""
This is a "constructor" for the LogEntry class.
:param entry_dict: A dict we get from the REST API
:return: An instance of LogEntry.
"""
# Debug helper
# https://circleci.com/gh/andresriancho/w3af-api-docker/30
try:
_type = entry_dict['type']
_id = entry_dict['id']
_time = entry_dict['time']
message = entry_dict['message']
severity = entry_dict['severity'] # depends on [control=['try'], data=[]]
except KeyError:
msg = 'Missing expected log entry attribute. Log entry object is:\n\n%s'
raise APIException(msg % json.dumps(entry_dict, indent=4)) # depends on [control=['except'], data=[]]
return cls(_type, message, _time, severity, _id) |
def modify_target_group(TargetGroupArn=None, HealthCheckProtocol=None, HealthCheckPort=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, HealthCheckTimeoutSeconds=None, HealthyThresholdCount=None, UnhealthyThresholdCount=None, Matcher=None):
"""
Modifies the health checks used when evaluating the health state of the targets in the specified target group.
To monitor the health of the targets, use DescribeTargetHealth .
See also: AWS API Documentation
Examples
This example changes the configuration of the health checks used to evaluate the health of the targets for the specified target group.
Expected Output:
:example: response = client.modify_target_group(
TargetGroupArn='string',
HealthCheckProtocol='HTTP'|'HTTPS',
HealthCheckPort='string',
HealthCheckPath='string',
HealthCheckIntervalSeconds=123,
HealthCheckTimeoutSeconds=123,
HealthyThresholdCount=123,
UnhealthyThresholdCount=123,
Matcher={
'HttpCode': 'string'
}
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type HealthCheckProtocol: string
:param HealthCheckProtocol: The protocol to use to connect with the target.
:type HealthCheckPort: string
:param HealthCheckPort: The port to use to connect with the target.
:type HealthCheckPath: string
:param HealthCheckPath: The ping path that is the destination for the health check request.
:type HealthCheckIntervalSeconds: integer
:param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target.
:type HealthCheckTimeoutSeconds: integer
:param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response means a failed health check.
:type HealthyThresholdCount: integer
:param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy.
:type UnhealthyThresholdCount: integer
:param UnhealthyThresholdCount: The number of consecutive health check failures required before considering the target unhealthy.
:type Matcher: dict
:param Matcher: The HTTP codes to use when checking for a successful response from a target.
HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').
:rtype: dict
:return: {
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS',
'HealthCheckPort': 'string',
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass | def function[modify_target_group, parameter[TargetGroupArn, HealthCheckProtocol, HealthCheckPort, HealthCheckPath, HealthCheckIntervalSeconds, HealthCheckTimeoutSeconds, HealthyThresholdCount, UnhealthyThresholdCount, Matcher]]:
constant[
Modifies the health checks used when evaluating the health state of the targets in the specified target group.
To monitor the health of the targets, use DescribeTargetHealth .
See also: AWS API Documentation
Examples
This example changes the configuration of the health checks used to evaluate the health of the targets for the specified target group.
Expected Output:
:example: response = client.modify_target_group(
TargetGroupArn='string',
HealthCheckProtocol='HTTP'|'HTTPS',
HealthCheckPort='string',
HealthCheckPath='string',
HealthCheckIntervalSeconds=123,
HealthCheckTimeoutSeconds=123,
HealthyThresholdCount=123,
UnhealthyThresholdCount=123,
Matcher={
'HttpCode': 'string'
}
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type HealthCheckProtocol: string
:param HealthCheckProtocol: The protocol to use to connect with the target.
:type HealthCheckPort: string
:param HealthCheckPort: The port to use to connect with the target.
:type HealthCheckPath: string
:param HealthCheckPath: The ping path that is the destination for the health check request.
:type HealthCheckIntervalSeconds: integer
:param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target.
:type HealthCheckTimeoutSeconds: integer
:param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response means a failed health check.
:type HealthyThresholdCount: integer
:param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy.
:type UnhealthyThresholdCount: integer
:param UnhealthyThresholdCount: The number of consecutive health check failures required before considering the target unhealthy.
:type Matcher: dict
:param Matcher: The HTTP codes to use when checking for a successful response from a target.
HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').
:rtype: dict
:return: {
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS',
'HealthCheckPort': 'string',
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
]
},
]
}
:returns:
(string) --
]
pass | keyword[def] identifier[modify_target_group] ( identifier[TargetGroupArn] = keyword[None] , identifier[HealthCheckProtocol] = keyword[None] , identifier[HealthCheckPort] = keyword[None] , identifier[HealthCheckPath] = keyword[None] , identifier[HealthCheckIntervalSeconds] = keyword[None] , identifier[HealthCheckTimeoutSeconds] = keyword[None] , identifier[HealthyThresholdCount] = keyword[None] , identifier[UnhealthyThresholdCount] = keyword[None] , identifier[Matcher] = keyword[None] ):
literal[string]
keyword[pass] | def modify_target_group(TargetGroupArn=None, HealthCheckProtocol=None, HealthCheckPort=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, HealthCheckTimeoutSeconds=None, HealthyThresholdCount=None, UnhealthyThresholdCount=None, Matcher=None):
"""
Modifies the health checks used when evaluating the health state of the targets in the specified target group.
To monitor the health of the targets, use DescribeTargetHealth .
See also: AWS API Documentation
Examples
This example changes the configuration of the health checks used to evaluate the health of the targets for the specified target group.
Expected Output:
:example: response = client.modify_target_group(
TargetGroupArn='string',
HealthCheckProtocol='HTTP'|'HTTPS',
HealthCheckPort='string',
HealthCheckPath='string',
HealthCheckIntervalSeconds=123,
HealthCheckTimeoutSeconds=123,
HealthyThresholdCount=123,
UnhealthyThresholdCount=123,
Matcher={
'HttpCode': 'string'
}
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type HealthCheckProtocol: string
:param HealthCheckProtocol: The protocol to use to connect with the target.
:type HealthCheckPort: string
:param HealthCheckPort: The port to use to connect with the target.
:type HealthCheckPath: string
:param HealthCheckPath: The ping path that is the destination for the health check request.
:type HealthCheckIntervalSeconds: integer
:param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target.
:type HealthCheckTimeoutSeconds: integer
:param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response means a failed health check.
:type HealthyThresholdCount: integer
:param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy.
:type UnhealthyThresholdCount: integer
:param UnhealthyThresholdCount: The number of consecutive health check failures required before considering the target unhealthy.
:type Matcher: dict
:param Matcher: The HTTP codes to use when checking for a successful response from a target.
HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').
:rtype: dict
:return: {
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS',
'HealthCheckPort': 'string',
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass |
def get_index_models(index):
"""Return list of models configured for a named index.
Args:
index: string, the name of the index to look up.
"""
models = []
for app_model in get_index_config(index).get("models"):
app, model = app_model.split(".")
models.append(apps.get_model(app, model))
return models | def function[get_index_models, parameter[index]]:
constant[Return list of models configured for a named index.
Args:
index: string, the name of the index to look up.
]
variable[models] assign[=] list[[]]
for taget[name[app_model]] in starred[call[call[name[get_index_config], parameter[name[index]]].get, parameter[constant[models]]]] begin[:]
<ast.Tuple object at 0x7da1b1042fb0> assign[=] call[name[app_model].split, parameter[constant[.]]]
call[name[models].append, parameter[call[name[apps].get_model, parameter[name[app], name[model]]]]]
return[name[models]] | keyword[def] identifier[get_index_models] ( identifier[index] ):
literal[string]
identifier[models] =[]
keyword[for] identifier[app_model] keyword[in] identifier[get_index_config] ( identifier[index] ). identifier[get] ( literal[string] ):
identifier[app] , identifier[model] = identifier[app_model] . identifier[split] ( literal[string] )
identifier[models] . identifier[append] ( identifier[apps] . identifier[get_model] ( identifier[app] , identifier[model] ))
keyword[return] identifier[models] | def get_index_models(index):
"""Return list of models configured for a named index.
Args:
index: string, the name of the index to look up.
"""
models = []
for app_model in get_index_config(index).get('models'):
(app, model) = app_model.split('.')
models.append(apps.get_model(app, model)) # depends on [control=['for'], data=['app_model']]
return models |
def is_hidden(path):
"""
Check whether a file is presumed hidden, either because
the pathname starts with dot or because the platform
indicates such.
"""
full_path = os.path.abspath(path)
name = os.path.basename(full_path)
def no(path):
return False
platform_hidden = globals().get('is_hidden_' + platform.system(), no)
return name.startswith('.') or platform_hidden(full_path) | def function[is_hidden, parameter[path]]:
constant[
Check whether a file is presumed hidden, either because
the pathname starts with dot or because the platform
indicates such.
]
variable[full_path] assign[=] call[name[os].path.abspath, parameter[name[path]]]
variable[name] assign[=] call[name[os].path.basename, parameter[name[full_path]]]
def function[no, parameter[path]]:
return[constant[False]]
variable[platform_hidden] assign[=] call[call[name[globals], parameter[]].get, parameter[binary_operation[constant[is_hidden_] + call[name[platform].system, parameter[]]], name[no]]]
return[<ast.BoolOp object at 0x7da18bccab60>] | keyword[def] identifier[is_hidden] ( identifier[path] ):
literal[string]
identifier[full_path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[path] )
identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[full_path] )
keyword[def] identifier[no] ( identifier[path] ):
keyword[return] keyword[False]
identifier[platform_hidden] = identifier[globals] (). identifier[get] ( literal[string] + identifier[platform] . identifier[system] (), identifier[no] )
keyword[return] identifier[name] . identifier[startswith] ( literal[string] ) keyword[or] identifier[platform_hidden] ( identifier[full_path] ) | def is_hidden(path):
"""
Check whether a file is presumed hidden, either because
the pathname starts with dot or because the platform
indicates such.
"""
full_path = os.path.abspath(path)
name = os.path.basename(full_path)
def no(path):
return False
platform_hidden = globals().get('is_hidden_' + platform.system(), no)
return name.startswith('.') or platform_hidden(full_path) |
def fate(name):
"""Download and return a path to a sample from the FFmpeg test suite.
Data is handled by :func:`cached_download`.
See the `FFmpeg Automated Test Environment <https://www.ffmpeg.org/fate.html>`_
"""
return cached_download('http://fate.ffmpeg.org/fate-suite/' + name,
os.path.join('fate-suite', name.replace('/', os.path.sep))) | def function[fate, parameter[name]]:
constant[Download and return a path to a sample from the FFmpeg test suite.
Data is handled by :func:`cached_download`.
See the `FFmpeg Automated Test Environment <https://www.ffmpeg.org/fate.html>`_
]
return[call[name[cached_download], parameter[binary_operation[constant[http://fate.ffmpeg.org/fate-suite/] + name[name]], call[name[os].path.join, parameter[constant[fate-suite], call[name[name].replace, parameter[constant[/], name[os].path.sep]]]]]]] | keyword[def] identifier[fate] ( identifier[name] ):
literal[string]
keyword[return] identifier[cached_download] ( literal[string] + identifier[name] ,
identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[name] . identifier[replace] ( literal[string] , identifier[os] . identifier[path] . identifier[sep] ))) | def fate(name):
"""Download and return a path to a sample from the FFmpeg test suite.
Data is handled by :func:`cached_download`.
See the `FFmpeg Automated Test Environment <https://www.ffmpeg.org/fate.html>`_
"""
return cached_download('http://fate.ffmpeg.org/fate-suite/' + name, os.path.join('fate-suite', name.replace('/', os.path.sep))) |
def save_model(model_folder: str, model_name: str, model: Model,
compiler_options: Dict[str, str]) -> None:
"""
Saves a CasADi model to disk.
:param model_folder: Folder where the precompiled CasADi model will be stored.
:param model_name: Name of the model.
:param model: Model instance.
:param compiler_options: Dictionary of compiler options.
"""
objects = {'dae_residual': None, 'initial_residual': None, 'variable_metadata': None, 'delay_arguments': None}
for o in objects.keys():
f = getattr(model, o + '_function')
if compiler_options.get('codegen', False):
objects[o] = _codegen_model(model_folder, f, '{}_{}'.format(model_name, o))
else:
objects[o] = f
# Output metadata
db_file = os.path.join(model_folder, model_name + ".pymoca_cache")
with open(db_file, 'wb') as f:
db = {}
# Store version
db['version'] = __version__
# Include references to the shared libraries (codegen) or pickled functions (cache)
db.update(objects)
db['library_os'] = os.name
db['options'] = compiler_options
# Describe variables per category
for key in ['states', 'der_states', 'alg_states', 'inputs', 'parameters', 'constants']:
db[key] = [e.to_dict() for e in getattr(model, key)]
# Caching using CasADi functions will lead to constants seemingly
# depending on MX variables. Figuring out that they do not is slow,
# especially when doing it on a lazy function call, as would be the
# case when reading from cache. So instead, we do the depency check
# once when saving the model.
# Metadata dependency checking
parameter_vector = ca.veccat(*[v.symbol for v in model.parameters])
for k, key in enumerate(['states', 'alg_states', 'inputs', 'parameters', 'constants']):
metadata_shape = (len(getattr(model, key)), len(CASADI_ATTRIBUTES))
m = db[key + "__metadata_dependent"] = np.zeros(metadata_shape, dtype=bool)
for i, v in enumerate(getattr(model, key)):
for j, tmp in enumerate(CASADI_ATTRIBUTES):
attr = getattr(v, tmp)
if (isinstance(attr, ca.MX) and not attr.is_constant()
and ca.depends_on(attr, parameter_vector)):
m[i, j] = True
# Delay dependency checking
if model.delay_states:
all_symbols = [model.time,
*model._symbols(model.states),
*model._symbols(model.der_states),
*model._symbols(model.alg_states),
*model._symbols(model.inputs),
*model._symbols(model.constants),
*model._symbols(model.parameters)]
symbol_to_index = {x: i for i, x in enumerate(all_symbols)}
expressions, durations = zip(*model.delay_arguments)
duration_dependencies = []
for dur in durations:
duration_dependencies.append(
[symbol_to_index[var] for var in ca.symvar(dur) if ca.depends_on(dur, var)])
db['__delay_duration_dependent'] = duration_dependencies
db['outputs'] = model.outputs
db['delay_states'] = model.delay_states
db['alias_relation'] = model.alias_relation
pickle.dump(db, f, protocol=-1) | def function[save_model, parameter[model_folder, model_name, model, compiler_options]]:
constant[
Saves a CasADi model to disk.
:param model_folder: Folder where the precompiled CasADi model will be stored.
:param model_name: Name of the model.
:param model: Model instance.
:param compiler_options: Dictionary of compiler options.
]
variable[objects] assign[=] dictionary[[<ast.Constant object at 0x7da1b257c460>, <ast.Constant object at 0x7da1b257c490>, <ast.Constant object at 0x7da1b257c4c0>, <ast.Constant object at 0x7da1b257c4f0>], [<ast.Constant object at 0x7da1b257c520>, <ast.Constant object at 0x7da1b257c550>, <ast.Constant object at 0x7da1b257c580>, <ast.Constant object at 0x7da1b257c5b0>]]
for taget[name[o]] in starred[call[name[objects].keys, parameter[]]] begin[:]
variable[f] assign[=] call[name[getattr], parameter[name[model], binary_operation[name[o] + constant[_function]]]]
if call[name[compiler_options].get, parameter[constant[codegen], constant[False]]] begin[:]
call[name[objects]][name[o]] assign[=] call[name[_codegen_model], parameter[name[model_folder], name[f], call[constant[{}_{}].format, parameter[name[model_name], name[o]]]]]
variable[db_file] assign[=] call[name[os].path.join, parameter[name[model_folder], binary_operation[name[model_name] + constant[.pymoca_cache]]]]
with call[name[open], parameter[name[db_file], constant[wb]]] begin[:]
variable[db] assign[=] dictionary[[], []]
call[name[db]][constant[version]] assign[=] name[__version__]
call[name[db].update, parameter[name[objects]]]
call[name[db]][constant[library_os]] assign[=] name[os].name
call[name[db]][constant[options]] assign[=] name[compiler_options]
for taget[name[key]] in starred[list[[<ast.Constant object at 0x7da1b257d4e0>, <ast.Constant object at 0x7da1b257d510>, <ast.Constant object at 0x7da1b257d540>, <ast.Constant object at 0x7da1b257d570>, <ast.Constant object at 0x7da1b257d5a0>, <ast.Constant object at 0x7da1b257d5d0>]]] begin[:]
call[name[db]][name[key]] assign[=] <ast.ListComp object at 0x7da1b257d6c0>
variable[parameter_vector] assign[=] call[name[ca].veccat, parameter[<ast.Starred object at 0x7da1b257d9c0>]]
for taget[tuple[[<ast.Name object at 0x7da1b257dbd0>, <ast.Name object at 0x7da1b257dc00>]]] in starred[call[name[enumerate], parameter[list[[<ast.Constant object at 0x7da1b257dcf0>, <ast.Constant object at 0x7da1b257dd20>, <ast.Constant object at 0x7da1b257dd50>, <ast.Constant object at 0x7da1b257dd80>, <ast.Constant object at 0x7da1b257ddb0>]]]]] begin[:]
variable[metadata_shape] assign[=] tuple[[<ast.Call object at 0x7da1b257de70>, <ast.Call object at 0x7da1b257df90>]]
variable[m] assign[=] call[name[np].zeros, parameter[name[metadata_shape]]]
for taget[tuple[[<ast.Name object at 0x7da1b257f400>, <ast.Name object at 0x7da1b257f430>]]] in starred[call[name[enumerate], parameter[call[name[getattr], parameter[name[model], name[key]]]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b257f5e0>, <ast.Name object at 0x7da1b257f610>]]] in starred[call[name[enumerate], parameter[name[CASADI_ATTRIBUTES]]]] begin[:]
variable[attr] assign[=] call[name[getattr], parameter[name[v], name[tmp]]]
if <ast.BoolOp object at 0x7da1b257f820> begin[:]
call[name[m]][tuple[[<ast.Name object at 0x7da1b257fbb0>, <ast.Name object at 0x7da1b257fbe0>]]] assign[=] constant[True]
if name[model].delay_states begin[:]
variable[all_symbols] assign[=] list[[<ast.Attribute object at 0x7da1b257fd90>, <ast.Starred object at 0x7da1b257fdf0>, <ast.Starred object at 0x7da1b257ff10>, <ast.Starred object at 0x7da1b2490e50>, <ast.Starred object at 0x7da1b24904f0>, <ast.Starred object at 0x7da1b2490cd0>, <ast.Starred object at 0x7da1b24924a0>]]
variable[symbol_to_index] assign[=] <ast.DictComp object at 0x7da1b2490b20>
<ast.Tuple object at 0x7da1b24901c0> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da1b2491e10>]]
variable[duration_dependencies] assign[=] list[[]]
for taget[name[dur]] in starred[name[durations]] begin[:]
call[name[duration_dependencies].append, parameter[<ast.ListComp object at 0x7da1b2490130>]]
call[name[db]][constant[__delay_duration_dependent]] assign[=] name[duration_dependencies]
call[name[db]][constant[outputs]] assign[=] name[model].outputs
call[name[db]][constant[delay_states]] assign[=] name[model].delay_states
call[name[db]][constant[alias_relation]] assign[=] name[model].alias_relation
call[name[pickle].dump, parameter[name[db], name[f]]] | keyword[def] identifier[save_model] ( identifier[model_folder] : identifier[str] , identifier[model_name] : identifier[str] , identifier[model] : identifier[Model] ,
identifier[compiler_options] : identifier[Dict] [ identifier[str] , identifier[str] ])-> keyword[None] :
literal[string]
identifier[objects] ={ literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] }
keyword[for] identifier[o] keyword[in] identifier[objects] . identifier[keys] ():
identifier[f] = identifier[getattr] ( identifier[model] , identifier[o] + literal[string] )
keyword[if] identifier[compiler_options] . identifier[get] ( literal[string] , keyword[False] ):
identifier[objects] [ identifier[o] ]= identifier[_codegen_model] ( identifier[model_folder] , identifier[f] , literal[string] . identifier[format] ( identifier[model_name] , identifier[o] ))
keyword[else] :
identifier[objects] [ identifier[o] ]= identifier[f]
identifier[db_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[model_folder] , identifier[model_name] + literal[string] )
keyword[with] identifier[open] ( identifier[db_file] , literal[string] ) keyword[as] identifier[f] :
identifier[db] ={}
identifier[db] [ literal[string] ]= identifier[__version__]
identifier[db] . identifier[update] ( identifier[objects] )
identifier[db] [ literal[string] ]= identifier[os] . identifier[name]
identifier[db] [ literal[string] ]= identifier[compiler_options]
keyword[for] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[db] [ identifier[key] ]=[ identifier[e] . identifier[to_dict] () keyword[for] identifier[e] keyword[in] identifier[getattr] ( identifier[model] , identifier[key] )]
identifier[parameter_vector] = identifier[ca] . identifier[veccat] (*[ identifier[v] . identifier[symbol] keyword[for] identifier[v] keyword[in] identifier[model] . identifier[parameters] ])
keyword[for] identifier[k] , identifier[key] keyword[in] identifier[enumerate] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]):
identifier[metadata_shape] =( identifier[len] ( identifier[getattr] ( identifier[model] , identifier[key] )), identifier[len] ( identifier[CASADI_ATTRIBUTES] ))
identifier[m] = identifier[db] [ identifier[key] + literal[string] ]= identifier[np] . identifier[zeros] ( identifier[metadata_shape] , identifier[dtype] = identifier[bool] )
keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[getattr] ( identifier[model] , identifier[key] )):
keyword[for] identifier[j] , identifier[tmp] keyword[in] identifier[enumerate] ( identifier[CASADI_ATTRIBUTES] ):
identifier[attr] = identifier[getattr] ( identifier[v] , identifier[tmp] )
keyword[if] ( identifier[isinstance] ( identifier[attr] , identifier[ca] . identifier[MX] ) keyword[and] keyword[not] identifier[attr] . identifier[is_constant] ()
keyword[and] identifier[ca] . identifier[depends_on] ( identifier[attr] , identifier[parameter_vector] )):
identifier[m] [ identifier[i] , identifier[j] ]= keyword[True]
keyword[if] identifier[model] . identifier[delay_states] :
identifier[all_symbols] =[ identifier[model] . identifier[time] ,
* identifier[model] . identifier[_symbols] ( identifier[model] . identifier[states] ),
* identifier[model] . identifier[_symbols] ( identifier[model] . identifier[der_states] ),
* identifier[model] . identifier[_symbols] ( identifier[model] . identifier[alg_states] ),
* identifier[model] . identifier[_symbols] ( identifier[model] . identifier[inputs] ),
* identifier[model] . identifier[_symbols] ( identifier[model] . identifier[constants] ),
* identifier[model] . identifier[_symbols] ( identifier[model] . identifier[parameters] )]
identifier[symbol_to_index] ={ identifier[x] : identifier[i] keyword[for] identifier[i] , identifier[x] keyword[in] identifier[enumerate] ( identifier[all_symbols] )}
identifier[expressions] , identifier[durations] = identifier[zip] (* identifier[model] . identifier[delay_arguments] )
identifier[duration_dependencies] =[]
keyword[for] identifier[dur] keyword[in] identifier[durations] :
identifier[duration_dependencies] . identifier[append] (
[ identifier[symbol_to_index] [ identifier[var] ] keyword[for] identifier[var] keyword[in] identifier[ca] . identifier[symvar] ( identifier[dur] ) keyword[if] identifier[ca] . identifier[depends_on] ( identifier[dur] , identifier[var] )])
identifier[db] [ literal[string] ]= identifier[duration_dependencies]
identifier[db] [ literal[string] ]= identifier[model] . identifier[outputs]
identifier[db] [ literal[string] ]= identifier[model] . identifier[delay_states]
identifier[db] [ literal[string] ]= identifier[model] . identifier[alias_relation]
identifier[pickle] . identifier[dump] ( identifier[db] , identifier[f] , identifier[protocol] =- literal[int] ) | def save_model(model_folder: str, model_name: str, model: Model, compiler_options: Dict[str, str]) -> None:
"""
Saves a CasADi model to disk.
:param model_folder: Folder where the precompiled CasADi model will be stored.
:param model_name: Name of the model.
:param model: Model instance.
:param compiler_options: Dictionary of compiler options.
"""
objects = {'dae_residual': None, 'initial_residual': None, 'variable_metadata': None, 'delay_arguments': None}
for o in objects.keys():
f = getattr(model, o + '_function')
if compiler_options.get('codegen', False):
objects[o] = _codegen_model(model_folder, f, '{}_{}'.format(model_name, o)) # depends on [control=['if'], data=[]]
else:
objects[o] = f # depends on [control=['for'], data=['o']]
# Output metadata
db_file = os.path.join(model_folder, model_name + '.pymoca_cache')
with open(db_file, 'wb') as f:
db = {}
# Store version
db['version'] = __version__
# Include references to the shared libraries (codegen) or pickled functions (cache)
db.update(objects)
db['library_os'] = os.name
db['options'] = compiler_options
# Describe variables per category
for key in ['states', 'der_states', 'alg_states', 'inputs', 'parameters', 'constants']:
db[key] = [e.to_dict() for e in getattr(model, key)] # depends on [control=['for'], data=['key']]
# Caching using CasADi functions will lead to constants seemingly
# depending on MX variables. Figuring out that they do not is slow,
# especially when doing it on a lazy function call, as would be the
# case when reading from cache. So instead, we do the depency check
# once when saving the model.
# Metadata dependency checking
parameter_vector = ca.veccat(*[v.symbol for v in model.parameters])
for (k, key) in enumerate(['states', 'alg_states', 'inputs', 'parameters', 'constants']):
metadata_shape = (len(getattr(model, key)), len(CASADI_ATTRIBUTES))
m = db[key + '__metadata_dependent'] = np.zeros(metadata_shape, dtype=bool)
for (i, v) in enumerate(getattr(model, key)):
for (j, tmp) in enumerate(CASADI_ATTRIBUTES):
attr = getattr(v, tmp)
if isinstance(attr, ca.MX) and (not attr.is_constant()) and ca.depends_on(attr, parameter_vector):
m[i, j] = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
# Delay dependency checking
if model.delay_states:
all_symbols = [model.time, *model._symbols(model.states), *model._symbols(model.der_states), *model._symbols(model.alg_states), *model._symbols(model.inputs), *model._symbols(model.constants), *model._symbols(model.parameters)]
symbol_to_index = {x: i for (i, x) in enumerate(all_symbols)}
(expressions, durations) = zip(*model.delay_arguments)
duration_dependencies = []
for dur in durations:
duration_dependencies.append([symbol_to_index[var] for var in ca.symvar(dur) if ca.depends_on(dur, var)]) # depends on [control=['for'], data=['dur']]
db['__delay_duration_dependent'] = duration_dependencies # depends on [control=['if'], data=[]]
db['outputs'] = model.outputs
db['delay_states'] = model.delay_states
db['alias_relation'] = model.alias_relation
pickle.dump(db, f, protocol=-1) # depends on [control=['with'], data=['f']] |
def validate_properties(props, required):
"""
Ensures the key set contains the base supported properties for a Parser
:param props: a set of property names to validate against those supported
"""
props = set(props)
required = set(required or _supported_props)
if len(required.intersection(props)) < len(required):
missing = required - props
raise ValidationError(
'Missing property names: {props}', props=','.join(missing), missing=missing
) | def function[validate_properties, parameter[props, required]]:
constant[
Ensures the key set contains the base supported properties for a Parser
:param props: a set of property names to validate against those supported
]
variable[props] assign[=] call[name[set], parameter[name[props]]]
variable[required] assign[=] call[name[set], parameter[<ast.BoolOp object at 0x7da20c6a9c30>]]
if compare[call[name[len], parameter[call[name[required].intersection, parameter[name[props]]]]] less[<] call[name[len], parameter[name[required]]]] begin[:]
variable[missing] assign[=] binary_operation[name[required] - name[props]]
<ast.Raise object at 0x7da20c6a8280> | keyword[def] identifier[validate_properties] ( identifier[props] , identifier[required] ):
literal[string]
identifier[props] = identifier[set] ( identifier[props] )
identifier[required] = identifier[set] ( identifier[required] keyword[or] identifier[_supported_props] )
keyword[if] identifier[len] ( identifier[required] . identifier[intersection] ( identifier[props] ))< identifier[len] ( identifier[required] ):
identifier[missing] = identifier[required] - identifier[props]
keyword[raise] identifier[ValidationError] (
literal[string] , identifier[props] = literal[string] . identifier[join] ( identifier[missing] ), identifier[missing] = identifier[missing]
) | def validate_properties(props, required):
"""
Ensures the key set contains the base supported properties for a Parser
:param props: a set of property names to validate against those supported
"""
props = set(props)
required = set(required or _supported_props)
if len(required.intersection(props)) < len(required):
missing = required - props
raise ValidationError('Missing property names: {props}', props=','.join(missing), missing=missing) # depends on [control=['if'], data=[]] |
def filepath(self):
"""Return the resolved filepath on the side where it is called from.
The appropriate filepath will be returned when called from within
an app running remotely as well as regular python on the client side.
Args:
- self
Returns:
- filepath (string)
"""
if hasattr(self, 'local_path'):
return self.local_path
if self.scheme in ['ftp', 'http', 'https', 'globus']:
return self.filename
elif self.scheme in ['file']:
return self.path
else:
raise Exception('Cannot return filepath for unknown scheme {}'.format(self.scheme)) | def function[filepath, parameter[self]]:
constant[Return the resolved filepath on the side where it is called from.
The appropriate filepath will be returned when called from within
an app running remotely as well as regular python on the client side.
Args:
- self
Returns:
- filepath (string)
]
if call[name[hasattr], parameter[name[self], constant[local_path]]] begin[:]
return[name[self].local_path]
if compare[name[self].scheme in list[[<ast.Constant object at 0x7da1b01d9960>, <ast.Constant object at 0x7da1b01d9b40>, <ast.Constant object at 0x7da1b01db460>, <ast.Constant object at 0x7da1b01dbd60>]]] begin[:]
return[name[self].filename] | keyword[def] identifier[filepath] ( identifier[self] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[return] identifier[self] . identifier[local_path]
keyword[if] identifier[self] . identifier[scheme] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[return] identifier[self] . identifier[filename]
keyword[elif] identifier[self] . identifier[scheme] keyword[in] [ literal[string] ]:
keyword[return] identifier[self] . identifier[path]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[self] . identifier[scheme] )) | def filepath(self):
"""Return the resolved filepath on the side where it is called from.
The appropriate filepath will be returned when called from within
an app running remotely as well as regular python on the client side.
Args:
- self
Returns:
- filepath (string)
"""
if hasattr(self, 'local_path'):
return self.local_path # depends on [control=['if'], data=[]]
if self.scheme in ['ftp', 'http', 'https', 'globus']:
return self.filename # depends on [control=['if'], data=[]]
elif self.scheme in ['file']:
return self.path # depends on [control=['if'], data=[]]
else:
raise Exception('Cannot return filepath for unknown scheme {}'.format(self.scheme)) |
def write(self, config_file):
"""
Write a single config file, raises if config file is not registered.
"""
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException
_out = self.render(config_file)
if six.PY3:
_out = _out.encode('UTF-8')
with open(config_file, 'wb') as out:
out.write(_out)
log('Wrote template %s.' % config_file, level=INFO) | def function[write, parameter[self, config_file]]:
constant[
Write a single config file, raises if config file is not registered.
]
if compare[name[config_file] <ast.NotIn object at 0x7da2590d7190> name[self].templates] begin[:]
call[name[log], parameter[binary_operation[constant[Config not registered: %s] <ast.Mod object at 0x7da2590d6920> name[config_file]]]]
<ast.Raise object at 0x7da18bc70d90>
variable[_out] assign[=] call[name[self].render, parameter[name[config_file]]]
if name[six].PY3 begin[:]
variable[_out] assign[=] call[name[_out].encode, parameter[constant[UTF-8]]]
with call[name[open], parameter[name[config_file], constant[wb]]] begin[:]
call[name[out].write, parameter[name[_out]]]
call[name[log], parameter[binary_operation[constant[Wrote template %s.] <ast.Mod object at 0x7da2590d6920> name[config_file]]]] | keyword[def] identifier[write] ( identifier[self] , identifier[config_file] ):
literal[string]
keyword[if] identifier[config_file] keyword[not] keyword[in] identifier[self] . identifier[templates] :
identifier[log] ( literal[string] % identifier[config_file] , identifier[level] = identifier[ERROR] )
keyword[raise] identifier[OSConfigException]
identifier[_out] = identifier[self] . identifier[render] ( identifier[config_file] )
keyword[if] identifier[six] . identifier[PY3] :
identifier[_out] = identifier[_out] . identifier[encode] ( literal[string] )
keyword[with] identifier[open] ( identifier[config_file] , literal[string] ) keyword[as] identifier[out] :
identifier[out] . identifier[write] ( identifier[_out] )
identifier[log] ( literal[string] % identifier[config_file] , identifier[level] = identifier[INFO] ) | def write(self, config_file):
"""
Write a single config file, raises if config file is not registered.
"""
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException # depends on [control=['if'], data=['config_file']]
_out = self.render(config_file)
if six.PY3:
_out = _out.encode('UTF-8') # depends on [control=['if'], data=[]]
with open(config_file, 'wb') as out:
out.write(_out) # depends on [control=['with'], data=['out']]
log('Wrote template %s.' % config_file, level=INFO) |
def invert_hashing_and_fit(
vec, # type: Union[FeatureUnion, HashingVectorizer]
docs
):
# type: (...) -> Union[FeatureUnion, InvertableHashingVectorizer]
""" Create an :class:`~.InvertableHashingVectorizer` from hashing
vectorizer vec and fit it on docs. If vec is a FeatureUnion, do it for all
hashing vectorizers in the union.
Return an :class:`~.InvertableHashingVectorizer`, or a FeatureUnion,
or an unchanged vectorizer.
"""
if isinstance(vec, HashingVectorizer):
vec = InvertableHashingVectorizer(vec)
vec.fit(docs)
elif (isinstance(vec, FeatureUnion) and
any(isinstance(v, HashingVectorizer)
for _, v in vec.transformer_list)):
vec = _fit_invhashing_union(vec, docs)
return vec | def function[invert_hashing_and_fit, parameter[vec, docs]]:
constant[ Create an :class:`~.InvertableHashingVectorizer` from hashing
vectorizer vec and fit it on docs. If vec is a FeatureUnion, do it for all
hashing vectorizers in the union.
Return an :class:`~.InvertableHashingVectorizer`, or a FeatureUnion,
or an unchanged vectorizer.
]
if call[name[isinstance], parameter[name[vec], name[HashingVectorizer]]] begin[:]
variable[vec] assign[=] call[name[InvertableHashingVectorizer], parameter[name[vec]]]
call[name[vec].fit, parameter[name[docs]]]
return[name[vec]] | keyword[def] identifier[invert_hashing_and_fit] (
identifier[vec] ,
identifier[docs]
):
literal[string]
keyword[if] identifier[isinstance] ( identifier[vec] , identifier[HashingVectorizer] ):
identifier[vec] = identifier[InvertableHashingVectorizer] ( identifier[vec] )
identifier[vec] . identifier[fit] ( identifier[docs] )
keyword[elif] ( identifier[isinstance] ( identifier[vec] , identifier[FeatureUnion] ) keyword[and]
identifier[any] ( identifier[isinstance] ( identifier[v] , identifier[HashingVectorizer] )
keyword[for] identifier[_] , identifier[v] keyword[in] identifier[vec] . identifier[transformer_list] )):
identifier[vec] = identifier[_fit_invhashing_union] ( identifier[vec] , identifier[docs] )
keyword[return] identifier[vec] | def invert_hashing_and_fit(vec, docs): # type: Union[FeatureUnion, HashingVectorizer]
# type: (...) -> Union[FeatureUnion, InvertableHashingVectorizer]
' Create an :class:`~.InvertableHashingVectorizer` from hashing\n vectorizer vec and fit it on docs. If vec is a FeatureUnion, do it for all\n hashing vectorizers in the union.\n Return an :class:`~.InvertableHashingVectorizer`, or a FeatureUnion,\n or an unchanged vectorizer.\n '
if isinstance(vec, HashingVectorizer):
vec = InvertableHashingVectorizer(vec)
vec.fit(docs) # depends on [control=['if'], data=[]]
elif isinstance(vec, FeatureUnion) and any((isinstance(v, HashingVectorizer) for (_, v) in vec.transformer_list)):
vec = _fit_invhashing_union(vec, docs) # depends on [control=['if'], data=[]]
return vec |
def output(self, stream, disabletransferencoding = None):
"""
Set output stream and send response immediately
"""
if self._sendHeaders:
raise HttpProtocolException('Cannot modify response, headers already sent')
self.outputstream = stream
try:
content_length = len(stream)
except Exception:
pass
else:
self.header(b'Content-Length', str(content_length).encode('ascii'))
if disabletransferencoding is not None:
self.disabledeflate = disabletransferencoding
self._startResponse() | def function[output, parameter[self, stream, disabletransferencoding]]:
constant[
Set output stream and send response immediately
]
if name[self]._sendHeaders begin[:]
<ast.Raise object at 0x7da18f812290>
name[self].outputstream assign[=] name[stream]
<ast.Try object at 0x7da18f812440>
if compare[name[disabletransferencoding] is_not constant[None]] begin[:]
name[self].disabledeflate assign[=] name[disabletransferencoding]
call[name[self]._startResponse, parameter[]] | keyword[def] identifier[output] ( identifier[self] , identifier[stream] , identifier[disabletransferencoding] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[_sendHeaders] :
keyword[raise] identifier[HttpProtocolException] ( literal[string] )
identifier[self] . identifier[outputstream] = identifier[stream]
keyword[try] :
identifier[content_length] = identifier[len] ( identifier[stream] )
keyword[except] identifier[Exception] :
keyword[pass]
keyword[else] :
identifier[self] . identifier[header] ( literal[string] , identifier[str] ( identifier[content_length] ). identifier[encode] ( literal[string] ))
keyword[if] identifier[disabletransferencoding] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[disabledeflate] = identifier[disabletransferencoding]
identifier[self] . identifier[_startResponse] () | def output(self, stream, disabletransferencoding=None):
"""
Set output stream and send response immediately
"""
if self._sendHeaders:
raise HttpProtocolException('Cannot modify response, headers already sent') # depends on [control=['if'], data=[]]
self.outputstream = stream
try:
content_length = len(stream) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
else:
self.header(b'Content-Length', str(content_length).encode('ascii'))
if disabletransferencoding is not None:
self.disabledeflate = disabletransferencoding # depends on [control=['if'], data=['disabletransferencoding']]
self._startResponse() |
def reload(self, callback=None, errback=None):
"""
Reload record data from the API.
"""
return self.load(reload=True, callback=callback, errback=errback) | def function[reload, parameter[self, callback, errback]]:
constant[
Reload record data from the API.
]
return[call[name[self].load, parameter[]]] | keyword[def] identifier[reload] ( identifier[self] , identifier[callback] = keyword[None] , identifier[errback] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[load] ( identifier[reload] = keyword[True] , identifier[callback] = identifier[callback] , identifier[errback] = identifier[errback] ) | def reload(self, callback=None, errback=None):
"""
Reload record data from the API.
"""
return self.load(reload=True, callback=callback, errback=errback) |
def replace_api_service_status(self, name, body, **kwargs):
"""
replace status of the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_api_service_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param V1beta1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_api_service_status_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_api_service_status_with_http_info(name, body, **kwargs)
return data | def function[replace_api_service_status, parameter[self, name, body]]:
constant[
replace status of the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_api_service_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param V1beta1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1APIService
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].replace_api_service_status_with_http_info, parameter[name[name], name[body]]]] | keyword[def] identifier[replace_api_service_status] ( identifier[self] , identifier[name] , identifier[body] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[replace_api_service_status_with_http_info] ( identifier[name] , identifier[body] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[replace_api_service_status_with_http_info] ( identifier[name] , identifier[body] ,** identifier[kwargs] )
keyword[return] identifier[data] | def replace_api_service_status(self, name, body, **kwargs):
"""
replace status of the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_api_service_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param V1beta1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_api_service_status_with_http_info(name, body, **kwargs) # depends on [control=['if'], data=[]]
else:
data = self.replace_api_service_status_with_http_info(name, body, **kwargs)
return data |
def _AddHasExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def HasExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
raise KeyError('"%s" is repeated.' % extension_handle.full_name)
if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(extension_handle)
return value is not None and value._is_present_in_parent
else:
return extension_handle in self._fields
cls.HasExtension = HasExtension | def function[_AddHasExtensionMethod, parameter[cls]]:
constant[Helper for _AddMessageMethods().]
def function[HasExtension, parameter[self, extension_handle]]:
call[name[_VerifyExtensionHandle], parameter[name[self], name[extension_handle]]]
if compare[name[extension_handle].label equal[==] name[_FieldDescriptor].LABEL_REPEATED] begin[:]
<ast.Raise object at 0x7da1b1f086a0>
if compare[name[extension_handle].cpp_type equal[==] name[_FieldDescriptor].CPPTYPE_MESSAGE] begin[:]
variable[value] assign[=] call[name[self]._fields.get, parameter[name[extension_handle]]]
return[<ast.BoolOp object at 0x7da1b1f09540>]
name[cls].HasExtension assign[=] name[HasExtension] | keyword[def] identifier[_AddHasExtensionMethod] ( identifier[cls] ):
literal[string]
keyword[def] identifier[HasExtension] ( identifier[self] , identifier[extension_handle] ):
identifier[_VerifyExtensionHandle] ( identifier[self] , identifier[extension_handle] )
keyword[if] identifier[extension_handle] . identifier[label] == identifier[_FieldDescriptor] . identifier[LABEL_REPEATED] :
keyword[raise] identifier[KeyError] ( literal[string] % identifier[extension_handle] . identifier[full_name] )
keyword[if] identifier[extension_handle] . identifier[cpp_type] == identifier[_FieldDescriptor] . identifier[CPPTYPE_MESSAGE] :
identifier[value] = identifier[self] . identifier[_fields] . identifier[get] ( identifier[extension_handle] )
keyword[return] identifier[value] keyword[is] keyword[not] keyword[None] keyword[and] identifier[value] . identifier[_is_present_in_parent]
keyword[else] :
keyword[return] identifier[extension_handle] keyword[in] identifier[self] . identifier[_fields]
identifier[cls] . identifier[HasExtension] = identifier[HasExtension] | def _AddHasExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def HasExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
raise KeyError('"%s" is repeated.' % extension_handle.full_name) # depends on [control=['if'], data=[]]
if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(extension_handle)
return value is not None and value._is_present_in_parent # depends on [control=['if'], data=[]]
else:
return extension_handle in self._fields
cls.HasExtension = HasExtension |
def process(self, tensor):
"""
Process state.
Args:
tensor: tensor to process
Returns: processed state
"""
for processor in self.preprocessors:
tensor = processor.process(tensor=tensor)
return tensor | def function[process, parameter[self, tensor]]:
constant[
Process state.
Args:
tensor: tensor to process
Returns: processed state
]
for taget[name[processor]] in starred[name[self].preprocessors] begin[:]
variable[tensor] assign[=] call[name[processor].process, parameter[]]
return[name[tensor]] | keyword[def] identifier[process] ( identifier[self] , identifier[tensor] ):
literal[string]
keyword[for] identifier[processor] keyword[in] identifier[self] . identifier[preprocessors] :
identifier[tensor] = identifier[processor] . identifier[process] ( identifier[tensor] = identifier[tensor] )
keyword[return] identifier[tensor] | def process(self, tensor):
"""
Process state.
Args:
tensor: tensor to process
Returns: processed state
"""
for processor in self.preprocessors:
tensor = processor.process(tensor=tensor) # depends on [control=['for'], data=['processor']]
return tensor |
def lazy_map(initial={}, pre_size=0):
'''
lazy_map is a blatant copy of the pyrsistent.pmap function, and is used to create lazy maps.
'''
if is_lazy_map(initial): return initial
if not initial: return _EMPTY_LMAP
return _lazy_turbo_mapping(initial, pre_size) | def function[lazy_map, parameter[initial, pre_size]]:
constant[
lazy_map is a blatant copy of the pyrsistent.pmap function, and is used to create lazy maps.
]
if call[name[is_lazy_map], parameter[name[initial]]] begin[:]
return[name[initial]]
if <ast.UnaryOp object at 0x7da18bc71ff0> begin[:]
return[name[_EMPTY_LMAP]]
return[call[name[_lazy_turbo_mapping], parameter[name[initial], name[pre_size]]]] | keyword[def] identifier[lazy_map] ( identifier[initial] ={}, identifier[pre_size] = literal[int] ):
literal[string]
keyword[if] identifier[is_lazy_map] ( identifier[initial] ): keyword[return] identifier[initial]
keyword[if] keyword[not] identifier[initial] : keyword[return] identifier[_EMPTY_LMAP]
keyword[return] identifier[_lazy_turbo_mapping] ( identifier[initial] , identifier[pre_size] ) | def lazy_map(initial={}, pre_size=0):
"""
lazy_map is a blatant copy of the pyrsistent.pmap function, and is used to create lazy maps.
"""
if is_lazy_map(initial):
return initial # depends on [control=['if'], data=[]]
if not initial:
return _EMPTY_LMAP # depends on [control=['if'], data=[]]
return _lazy_turbo_mapping(initial, pre_size) |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: DeviceContext for this DeviceInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceContext
"""
if self._context is None:
self._context = DeviceContext(
self._version,
fleet_sid=self._solution['fleet_sid'],
sid=self._solution['sid'],
)
return self._context | def function[_proxy, parameter[self]]:
constant[
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: DeviceContext for this DeviceInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceContext
]
if compare[name[self]._context is constant[None]] begin[:]
name[self]._context assign[=] call[name[DeviceContext], parameter[name[self]._version]]
return[name[self]._context] | keyword[def] identifier[_proxy] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_context] keyword[is] keyword[None] :
identifier[self] . identifier[_context] = identifier[DeviceContext] (
identifier[self] . identifier[_version] ,
identifier[fleet_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
identifier[sid] = identifier[self] . identifier[_solution] [ literal[string] ],
)
keyword[return] identifier[self] . identifier[_context] | def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: DeviceContext for this DeviceInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceContext
"""
if self._context is None:
self._context = DeviceContext(self._version, fleet_sid=self._solution['fleet_sid'], sid=self._solution['sid']) # depends on [control=['if'], data=[]]
return self._context |
def Flush(self):
"""Writes the changes in this object to the datastore."""
self.data_store.StoreRequestsAndResponses(
new_requests=self.request_queue,
new_responses=self.response_queue,
requests_to_delete=self.requests_to_delete)
# We need to make sure that notifications are written after the requests so
# we flush after writing all requests and only notify afterwards.
mutation_pool = self.data_store.GetMutationPool()
with mutation_pool:
messages_by_queue = collection.Group(
list(itervalues(
self.client_messages_to_delete)), lambda request: request.queue)
for queue, messages in iteritems(messages_by_queue):
self.Delete(queue, messages, mutation_pool=mutation_pool)
if self.new_client_messages:
for timestamp, messages in iteritems(
collection.Group(self.new_client_messages, lambda x: x[1])):
self.Schedule([x[0] for x in messages],
timestamp=timestamp,
mutation_pool=mutation_pool)
if self.notifications:
for notification in itervalues(self.notifications):
self.NotifyQueue(notification, mutation_pool=mutation_pool)
mutation_pool.Flush()
self.request_queue = []
self.response_queue = []
self.requests_to_delete = []
self.client_messages_to_delete = {}
self.notifications = {}
self.new_client_messages = [] | def function[Flush, parameter[self]]:
constant[Writes the changes in this object to the datastore.]
call[name[self].data_store.StoreRequestsAndResponses, parameter[]]
variable[mutation_pool] assign[=] call[name[self].data_store.GetMutationPool, parameter[]]
with name[mutation_pool] begin[:]
variable[messages_by_queue] assign[=] call[name[collection].Group, parameter[call[name[list], parameter[call[name[itervalues], parameter[name[self].client_messages_to_delete]]]], <ast.Lambda object at 0x7da1b1b46fe0>]]
for taget[tuple[[<ast.Name object at 0x7da1b1b45de0>, <ast.Name object at 0x7da1b1b444f0>]]] in starred[call[name[iteritems], parameter[name[messages_by_queue]]]] begin[:]
call[name[self].Delete, parameter[name[queue], name[messages]]]
if name[self].new_client_messages begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1b45b10>, <ast.Name object at 0x7da1b1b44f10>]]] in starred[call[name[iteritems], parameter[call[name[collection].Group, parameter[name[self].new_client_messages, <ast.Lambda object at 0x7da1b1b44ee0>]]]]] begin[:]
call[name[self].Schedule, parameter[<ast.ListComp object at 0x7da1b1b474f0>]]
if name[self].notifications begin[:]
for taget[name[notification]] in starred[call[name[itervalues], parameter[name[self].notifications]]] begin[:]
call[name[self].NotifyQueue, parameter[name[notification]]]
call[name[mutation_pool].Flush, parameter[]]
name[self].request_queue assign[=] list[[]]
name[self].response_queue assign[=] list[[]]
name[self].requests_to_delete assign[=] list[[]]
name[self].client_messages_to_delete assign[=] dictionary[[], []]
name[self].notifications assign[=] dictionary[[], []]
name[self].new_client_messages assign[=] list[[]] | keyword[def] identifier[Flush] ( identifier[self] ):
literal[string]
identifier[self] . identifier[data_store] . identifier[StoreRequestsAndResponses] (
identifier[new_requests] = identifier[self] . identifier[request_queue] ,
identifier[new_responses] = identifier[self] . identifier[response_queue] ,
identifier[requests_to_delete] = identifier[self] . identifier[requests_to_delete] )
identifier[mutation_pool] = identifier[self] . identifier[data_store] . identifier[GetMutationPool] ()
keyword[with] identifier[mutation_pool] :
identifier[messages_by_queue] = identifier[collection] . identifier[Group] (
identifier[list] ( identifier[itervalues] (
identifier[self] . identifier[client_messages_to_delete] )), keyword[lambda] identifier[request] : identifier[request] . identifier[queue] )
keyword[for] identifier[queue] , identifier[messages] keyword[in] identifier[iteritems] ( identifier[messages_by_queue] ):
identifier[self] . identifier[Delete] ( identifier[queue] , identifier[messages] , identifier[mutation_pool] = identifier[mutation_pool] )
keyword[if] identifier[self] . identifier[new_client_messages] :
keyword[for] identifier[timestamp] , identifier[messages] keyword[in] identifier[iteritems] (
identifier[collection] . identifier[Group] ( identifier[self] . identifier[new_client_messages] , keyword[lambda] identifier[x] : identifier[x] [ literal[int] ])):
identifier[self] . identifier[Schedule] ([ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[messages] ],
identifier[timestamp] = identifier[timestamp] ,
identifier[mutation_pool] = identifier[mutation_pool] )
keyword[if] identifier[self] . identifier[notifications] :
keyword[for] identifier[notification] keyword[in] identifier[itervalues] ( identifier[self] . identifier[notifications] ):
identifier[self] . identifier[NotifyQueue] ( identifier[notification] , identifier[mutation_pool] = identifier[mutation_pool] )
identifier[mutation_pool] . identifier[Flush] ()
identifier[self] . identifier[request_queue] =[]
identifier[self] . identifier[response_queue] =[]
identifier[self] . identifier[requests_to_delete] =[]
identifier[self] . identifier[client_messages_to_delete] ={}
identifier[self] . identifier[notifications] ={}
identifier[self] . identifier[new_client_messages] =[] | def Flush(self):
"""Writes the changes in this object to the datastore."""
self.data_store.StoreRequestsAndResponses(new_requests=self.request_queue, new_responses=self.response_queue, requests_to_delete=self.requests_to_delete)
# We need to make sure that notifications are written after the requests so
# we flush after writing all requests and only notify afterwards.
mutation_pool = self.data_store.GetMutationPool()
with mutation_pool:
messages_by_queue = collection.Group(list(itervalues(self.client_messages_to_delete)), lambda request: request.queue)
for (queue, messages) in iteritems(messages_by_queue):
self.Delete(queue, messages, mutation_pool=mutation_pool) # depends on [control=['for'], data=[]]
if self.new_client_messages:
for (timestamp, messages) in iteritems(collection.Group(self.new_client_messages, lambda x: x[1])):
self.Schedule([x[0] for x in messages], timestamp=timestamp, mutation_pool=mutation_pool) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
if self.notifications:
for notification in itervalues(self.notifications):
self.NotifyQueue(notification, mutation_pool=mutation_pool) # depends on [control=['for'], data=['notification']]
mutation_pool.Flush() # depends on [control=['if'], data=[]]
self.request_queue = []
self.response_queue = []
self.requests_to_delete = []
self.client_messages_to_delete = {}
self.notifications = {}
self.new_client_messages = [] |
def main(params=None):
"""
Main function to launch usufy.
The function is created in this way so as to let other applications make
use of the full configuration capabilities of the application. The
parameters received are used as parsed by this modules `getParser()`.
Args:
-----
params: A list with the parameters as grabbed by the terminal. It is
None when this is called by an entry_point. If it is called by osrf
the data is already parsed.
Returns:
--------
dict: A Json representing the matching results.
"""
if params == None:
parser = getParser()
args = parser.parse_args(params)
else:
args = params
print(general.title(banner.text))
sayingHello = """
Usufy | Copyright (C) Yaiza Rubio & Félix Brezo (i3visio) 2014-2018
This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you
are welcome to redistribute it under certain conditions. For additional info,
visit <{}>.
""".format(general.LICENSE_URL)
print(general.info(sayingHello))
if args.fuzz:
res = fuzzUsufy(args.fuzz, args.fuzz_config)
else:
# Recovering the list of platforms to be launched
listPlatforms = platform_selection.getPlatformsByName(platformNames=args.platforms, tags=args.tags, mode="usufy", excludePlatformNames=args.exclude)
if args.info:
# Information actions...
if args.info == 'list_platforms':
infoPlatforms="Listing the platforms:\n"
for p in listPlatforms:
infoPlatforms += "\t\t" + (str(p) + ": ").ljust(16, ' ') + str(p.tags)+"\n"
return infoPlatforms
elif args.info == 'list_tags':
tags = {}
# Going through all the selected platforms to get their tags
for p in listPlatforms:
for t in p.tags:
if t not in tags.keys():
tags[t] = 1
else:
tags[t] += 1
infoTags = "List of tags:\n"
# Displaying the results in a sorted list
for t in tags.keys():
infoTags += "\t\t" + (t + ": ").ljust(16, ' ') + str(tags[t]) + " time(s)\n"
return infoTags
else:
pass
# performing the test
elif args.benchmark:
platforms = platform_selection.getAllPlatformNames("usufy")
res = benchmark.doBenchmark(platforms)
strTimes = ""
for e in sorted(res.keys()):
strTimes += str(e) + "\t" + str(res[e]) + "\n"
return strTimes
# showing the tags of the usufy platforms
elif args.show_tags:
tags = platform_selection.getAllPlatformNamesByTag("usufy")
print(general.info("This is the list of platforms grouped by tag.\n"))
print(json.dumps(tags, indent=2, sort_keys=True))
print(general.info("[Tip] Remember that you can always launch the platform using the -t option followed by any of the aforementioned.\n"))
return tags
# Executing the corresponding process...
else:
# Showing the execution time...
startTime= dt.datetime.now()
print(str(startTime) + "\tStarting search in " + general.emphasis(str(len(listPlatforms))) + " platform(s)... Relax!\n")
print(general.emphasis("\tPress <Ctrl + C> to stop...\n"))
# Defining the list of users to monitor
nicks = []
if args.nicks:
for n in args.nicks:
# TO-DO
# A trick to avoid having the processing of the properties when being queried by Maltego
if "properties.i3visio" not in n:
nicks.append(n)
else:
# Reading the nick files
try:
nicks = args.list.read().splitlines()
except:
print(general.error("ERROR: there has been an error when opening the file that stores the nicks.\tPlease, check the existence of this file."))
# Definning the results
res = []
if args.output_folder != None:
# if Verifying an output folder was selected
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder)
# Launching the process...
res = processNickList(nicks, listPlatforms, args.output_folder, avoidProcessing = args.avoid_processing, avoidDownload = args.avoid_download, nThreads=args.threads, verbosity= args.verbose, logFolder=args.logfolder)
else:
try:
res = processNickList(nicks, listPlatforms, nThreads=args.threads, verbosity= args.verbose, logFolder=args.logfolder)
except Exception as e:
print(general.error("Exception grabbed when processing the nicks: " + str(e)))
print(general.error(traceback.print_stack()))
# We are going to iterate over the results...
strResults = "\t"
# Structure returned
"""
[
{
"attributes": [
{
"attributes": [],
"type": "i3visio.uri",
"value": "http://twitter.com/i3visio"
},
{
"attributes": [],
"type": "i3visio.alias",
"value": "i3visio"
},
{
"attributes": [],
"type": "i3visio.platform",
"value": "Twitter"
}
],
"type": "i3visio.profile",
"value": "Twitter - i3visio"
}
,
...
]
"""
for r in res:
# The format of the results (attributes) for a given nick is a list as follows:
for att in r["attributes"]:
# iterating through the attributes
platform = ""
uri = ""
for details in att["attributes"]:
if details["type"] == "i3visio.platform":
platform = details["value"]
if details["type"] == "i3visio.uri":
uri = details["value"]
try:
strResults+= (str(platform) + ":").ljust(16, ' ')+ " "+ str(uri)+"\n\t\t"
except:
pass
# Generating summary files for each ...
if args.extension:
# Verifying if the outputPath exists
if not os.path.exists (args.output_folder):
os.makedirs(args.output_folder)
# Grabbing the results
fileHeader = os.path.join(args.output_folder, args.file_header)
# Iterating through the given extensions to print its values
for ext in args.extension:
# Generating output files
general.exportUsufy(res, ext, fileHeader)
now = dt.datetime.now()
print("\n{}\tResults obtained:\n".format(str(now)))
print(general.success(general.usufyToTextExport(res)))
if args.web_browser:
general.openResultsInBrowser(res)
now = dt.datetime.now()
print("\n" + str(now) + "\tYou can find all the information here:")
for ext in args.extension:
# Showing the output files
print("\t" + general.emphasis(fileHeader + "." + ext))
# Showing the execution time...
endTime= dt.datetime.now()
print("\n" + str(endTime) +"\tFinishing execution...\n")
print("Total time consumed:\t" + general.emphasis(str(endTime-startTime)))
print("Average seconds/query:\t" + general.emphasis(str((endTime-startTime).total_seconds()/len(listPlatforms))) +" seconds\n")
# Urging users to place an issue on Github...
print(banner.footer)
if params:
return res | def function[main, parameter[params]]:
constant[
Main function to launch usufy.
The function is created in this way so as to let other applications make
use of the full configuration capabilities of the application. The
parameters received are used as parsed by this modules `getParser()`.
Args:
-----
params: A list with the parameters as grabbed by the terminal. It is
None when this is called by an entry_point. If it is called by osrf
the data is already parsed.
Returns:
--------
dict: A Json representing the matching results.
]
if compare[name[params] equal[==] constant[None]] begin[:]
variable[parser] assign[=] call[name[getParser], parameter[]]
variable[args] assign[=] call[name[parser].parse_args, parameter[name[params]]]
call[name[print], parameter[call[name[general].title, parameter[name[banner].text]]]]
variable[sayingHello] assign[=] call[constant[
Usufy | Copyright (C) Yaiza Rubio & Félix Brezo (i3visio) 2014-2018
This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you
are welcome to redistribute it under certain conditions. For additional info,
visit <{}>.
].format, parameter[name[general].LICENSE_URL]]
call[name[print], parameter[call[name[general].info, parameter[name[sayingHello]]]]]
if name[args].fuzz begin[:]
variable[res] assign[=] call[name[fuzzUsufy], parameter[name[args].fuzz, name[args].fuzz_config]]
if name[params] begin[:]
return[name[res]] | keyword[def] identifier[main] ( identifier[params] = keyword[None] ):
literal[string]
keyword[if] identifier[params] == keyword[None] :
identifier[parser] = identifier[getParser] ()
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[params] )
keyword[else] :
identifier[args] = identifier[params]
identifier[print] ( identifier[general] . identifier[title] ( identifier[banner] . identifier[text] ))
identifier[sayingHello] = literal[string] . identifier[format] ( identifier[general] . identifier[LICENSE_URL] )
identifier[print] ( identifier[general] . identifier[info] ( identifier[sayingHello] ))
keyword[if] identifier[args] . identifier[fuzz] :
identifier[res] = identifier[fuzzUsufy] ( identifier[args] . identifier[fuzz] , identifier[args] . identifier[fuzz_config] )
keyword[else] :
identifier[listPlatforms] = identifier[platform_selection] . identifier[getPlatformsByName] ( identifier[platformNames] = identifier[args] . identifier[platforms] , identifier[tags] = identifier[args] . identifier[tags] , identifier[mode] = literal[string] , identifier[excludePlatformNames] = identifier[args] . identifier[exclude] )
keyword[if] identifier[args] . identifier[info] :
keyword[if] identifier[args] . identifier[info] == literal[string] :
identifier[infoPlatforms] = literal[string]
keyword[for] identifier[p] keyword[in] identifier[listPlatforms] :
identifier[infoPlatforms] += literal[string] +( identifier[str] ( identifier[p] )+ literal[string] ). identifier[ljust] ( literal[int] , literal[string] )+ identifier[str] ( identifier[p] . identifier[tags] )+ literal[string]
keyword[return] identifier[infoPlatforms]
keyword[elif] identifier[args] . identifier[info] == literal[string] :
identifier[tags] ={}
keyword[for] identifier[p] keyword[in] identifier[listPlatforms] :
keyword[for] identifier[t] keyword[in] identifier[p] . identifier[tags] :
keyword[if] identifier[t] keyword[not] keyword[in] identifier[tags] . identifier[keys] ():
identifier[tags] [ identifier[t] ]= literal[int]
keyword[else] :
identifier[tags] [ identifier[t] ]+= literal[int]
identifier[infoTags] = literal[string]
keyword[for] identifier[t] keyword[in] identifier[tags] . identifier[keys] ():
identifier[infoTags] += literal[string] +( identifier[t] + literal[string] ). identifier[ljust] ( literal[int] , literal[string] )+ identifier[str] ( identifier[tags] [ identifier[t] ])+ literal[string]
keyword[return] identifier[infoTags]
keyword[else] :
keyword[pass]
keyword[elif] identifier[args] . identifier[benchmark] :
identifier[platforms] = identifier[platform_selection] . identifier[getAllPlatformNames] ( literal[string] )
identifier[res] = identifier[benchmark] . identifier[doBenchmark] ( identifier[platforms] )
identifier[strTimes] = literal[string]
keyword[for] identifier[e] keyword[in] identifier[sorted] ( identifier[res] . identifier[keys] ()):
identifier[strTimes] += identifier[str] ( identifier[e] )+ literal[string] + identifier[str] ( identifier[res] [ identifier[e] ])+ literal[string]
keyword[return] identifier[strTimes]
keyword[elif] identifier[args] . identifier[show_tags] :
identifier[tags] = identifier[platform_selection] . identifier[getAllPlatformNamesByTag] ( literal[string] )
identifier[print] ( identifier[general] . identifier[info] ( literal[string] ))
identifier[print] ( identifier[json] . identifier[dumps] ( identifier[tags] , identifier[indent] = literal[int] , identifier[sort_keys] = keyword[True] ))
identifier[print] ( identifier[general] . identifier[info] ( literal[string] ))
keyword[return] identifier[tags]
keyword[else] :
identifier[startTime] = identifier[dt] . identifier[datetime] . identifier[now] ()
identifier[print] ( identifier[str] ( identifier[startTime] )+ literal[string] + identifier[general] . identifier[emphasis] ( identifier[str] ( identifier[len] ( identifier[listPlatforms] )))+ literal[string] )
identifier[print] ( identifier[general] . identifier[emphasis] ( literal[string] ))
identifier[nicks] =[]
keyword[if] identifier[args] . identifier[nicks] :
keyword[for] identifier[n] keyword[in] identifier[args] . identifier[nicks] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[n] :
identifier[nicks] . identifier[append] ( identifier[n] )
keyword[else] :
keyword[try] :
identifier[nicks] = identifier[args] . identifier[list] . identifier[read] (). identifier[splitlines] ()
keyword[except] :
identifier[print] ( identifier[general] . identifier[error] ( literal[string] ))
identifier[res] =[]
keyword[if] identifier[args] . identifier[output_folder] != keyword[None] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[args] . identifier[output_folder] ):
identifier[os] . identifier[makedirs] ( identifier[args] . identifier[output_folder] )
identifier[res] = identifier[processNickList] ( identifier[nicks] , identifier[listPlatforms] , identifier[args] . identifier[output_folder] , identifier[avoidProcessing] = identifier[args] . identifier[avoid_processing] , identifier[avoidDownload] = identifier[args] . identifier[avoid_download] , identifier[nThreads] = identifier[args] . identifier[threads] , identifier[verbosity] = identifier[args] . identifier[verbose] , identifier[logFolder] = identifier[args] . identifier[logfolder] )
keyword[else] :
keyword[try] :
identifier[res] = identifier[processNickList] ( identifier[nicks] , identifier[listPlatforms] , identifier[nThreads] = identifier[args] . identifier[threads] , identifier[verbosity] = identifier[args] . identifier[verbose] , identifier[logFolder] = identifier[args] . identifier[logfolder] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print] ( identifier[general] . identifier[error] ( literal[string] + identifier[str] ( identifier[e] )))
identifier[print] ( identifier[general] . identifier[error] ( identifier[traceback] . identifier[print_stack] ()))
identifier[strResults] = literal[string]
literal[string]
keyword[for] identifier[r] keyword[in] identifier[res] :
keyword[for] identifier[att] keyword[in] identifier[r] [ literal[string] ]:
identifier[platform] = literal[string]
identifier[uri] = literal[string]
keyword[for] identifier[details] keyword[in] identifier[att] [ literal[string] ]:
keyword[if] identifier[details] [ literal[string] ]== literal[string] :
identifier[platform] = identifier[details] [ literal[string] ]
keyword[if] identifier[details] [ literal[string] ]== literal[string] :
identifier[uri] = identifier[details] [ literal[string] ]
keyword[try] :
identifier[strResults] +=( identifier[str] ( identifier[platform] )+ literal[string] ). identifier[ljust] ( literal[int] , literal[string] )+ literal[string] + identifier[str] ( identifier[uri] )+ literal[string]
keyword[except] :
keyword[pass]
keyword[if] identifier[args] . identifier[extension] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[args] . identifier[output_folder] ):
identifier[os] . identifier[makedirs] ( identifier[args] . identifier[output_folder] )
identifier[fileHeader] = identifier[os] . identifier[path] . identifier[join] ( identifier[args] . identifier[output_folder] , identifier[args] . identifier[file_header] )
keyword[for] identifier[ext] keyword[in] identifier[args] . identifier[extension] :
identifier[general] . identifier[exportUsufy] ( identifier[res] , identifier[ext] , identifier[fileHeader] )
identifier[now] = identifier[dt] . identifier[datetime] . identifier[now] ()
identifier[print] ( literal[string] . identifier[format] ( identifier[str] ( identifier[now] )))
identifier[print] ( identifier[general] . identifier[success] ( identifier[general] . identifier[usufyToTextExport] ( identifier[res] )))
keyword[if] identifier[args] . identifier[web_browser] :
identifier[general] . identifier[openResultsInBrowser] ( identifier[res] )
identifier[now] = identifier[dt] . identifier[datetime] . identifier[now] ()
identifier[print] ( literal[string] + identifier[str] ( identifier[now] )+ literal[string] )
keyword[for] identifier[ext] keyword[in] identifier[args] . identifier[extension] :
identifier[print] ( literal[string] + identifier[general] . identifier[emphasis] ( identifier[fileHeader] + literal[string] + identifier[ext] ))
identifier[endTime] = identifier[dt] . identifier[datetime] . identifier[now] ()
identifier[print] ( literal[string] + identifier[str] ( identifier[endTime] )+ literal[string] )
identifier[print] ( literal[string] + identifier[general] . identifier[emphasis] ( identifier[str] ( identifier[endTime] - identifier[startTime] )))
identifier[print] ( literal[string] + identifier[general] . identifier[emphasis] ( identifier[str] (( identifier[endTime] - identifier[startTime] ). identifier[total_seconds] ()/ identifier[len] ( identifier[listPlatforms] )))+ literal[string] )
identifier[print] ( identifier[banner] . identifier[footer] )
keyword[if] identifier[params] :
keyword[return] identifier[res] | def main(params=None):
"""
Main function to launch usufy.
The function is created in this way so as to let other applications make
use of the full configuration capabilities of the application. The
parameters received are used as parsed by this modules `getParser()`.
Args:
-----
params: A list with the parameters as grabbed by the terminal. It is
None when this is called by an entry_point. If it is called by osrf
the data is already parsed.
Returns:
--------
dict: A Json representing the matching results.
"""
if params == None:
parser = getParser()
args = parser.parse_args(params) # depends on [control=['if'], data=['params']]
else:
args = params
print(general.title(banner.text))
sayingHello = '\n Usufy | Copyright (C) Yaiza Rubio & Félix Brezo (i3visio) 2014-2018\n\nThis program comes with ABSOLUTELY NO WARRANTY. This is free software, and you\nare welcome to redistribute it under certain conditions. For additional info,\nvisit <{}>.\n\n'.format(general.LICENSE_URL)
print(general.info(sayingHello))
if args.fuzz:
res = fuzzUsufy(args.fuzz, args.fuzz_config) # depends on [control=['if'], data=[]]
else:
# Recovering the list of platforms to be launched
listPlatforms = platform_selection.getPlatformsByName(platformNames=args.platforms, tags=args.tags, mode='usufy', excludePlatformNames=args.exclude)
if args.info:
# Information actions...
if args.info == 'list_platforms':
infoPlatforms = 'Listing the platforms:\n'
for p in listPlatforms:
infoPlatforms += '\t\t' + (str(p) + ': ').ljust(16, ' ') + str(p.tags) + '\n' # depends on [control=['for'], data=['p']]
return infoPlatforms # depends on [control=['if'], data=[]]
elif args.info == 'list_tags':
tags = {}
# Going through all the selected platforms to get their tags
for p in listPlatforms:
for t in p.tags:
if t not in tags.keys():
tags[t] = 1 # depends on [control=['if'], data=['t']]
else:
tags[t] += 1 # depends on [control=['for'], data=['t']] # depends on [control=['for'], data=['p']]
infoTags = 'List of tags:\n'
# Displaying the results in a sorted list
for t in tags.keys():
infoTags += '\t\t' + (t + ': ').ljust(16, ' ') + str(tags[t]) + ' time(s)\n' # depends on [control=['for'], data=['t']]
return infoTags # depends on [control=['if'], data=[]]
else:
pass # depends on [control=['if'], data=[]]
# performing the test
elif args.benchmark:
platforms = platform_selection.getAllPlatformNames('usufy')
res = benchmark.doBenchmark(platforms)
strTimes = ''
for e in sorted(res.keys()):
strTimes += str(e) + '\t' + str(res[e]) + '\n' # depends on [control=['for'], data=['e']]
return strTimes # depends on [control=['if'], data=[]]
# showing the tags of the usufy platforms
elif args.show_tags:
tags = platform_selection.getAllPlatformNamesByTag('usufy')
print(general.info('This is the list of platforms grouped by tag.\n'))
print(json.dumps(tags, indent=2, sort_keys=True))
print(general.info('[Tip] Remember that you can always launch the platform using the -t option followed by any of the aforementioned.\n'))
return tags # depends on [control=['if'], data=[]]
else:
# Executing the corresponding process...
# Showing the execution time...
startTime = dt.datetime.now()
print(str(startTime) + '\tStarting search in ' + general.emphasis(str(len(listPlatforms))) + ' platform(s)... Relax!\n')
print(general.emphasis('\tPress <Ctrl + C> to stop...\n'))
# Defining the list of users to monitor
nicks = []
if args.nicks:
for n in args.nicks:
# TO-DO
# A trick to avoid having the processing of the properties when being queried by Maltego
if 'properties.i3visio' not in n:
nicks.append(n) # depends on [control=['if'], data=['n']] # depends on [control=['for'], data=['n']] # depends on [control=['if'], data=[]]
else:
# Reading the nick files
try:
nicks = args.list.read().splitlines() # depends on [control=['try'], data=[]]
except:
print(general.error('ERROR: there has been an error when opening the file that stores the nicks.\tPlease, check the existence of this file.')) # depends on [control=['except'], data=[]]
# Definning the results
res = []
if args.output_folder != None:
# if Verifying an output folder was selected
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder) # depends on [control=['if'], data=[]]
# Launching the process...
res = processNickList(nicks, listPlatforms, args.output_folder, avoidProcessing=args.avoid_processing, avoidDownload=args.avoid_download, nThreads=args.threads, verbosity=args.verbose, logFolder=args.logfolder) # depends on [control=['if'], data=[]]
else:
try:
res = processNickList(nicks, listPlatforms, nThreads=args.threads, verbosity=args.verbose, logFolder=args.logfolder) # depends on [control=['try'], data=[]]
except Exception as e:
print(general.error('Exception grabbed when processing the nicks: ' + str(e)))
print(general.error(traceback.print_stack())) # depends on [control=['except'], data=['e']]
# We are going to iterate over the results...
strResults = '\t'
# Structure returned
'\n [\n {\n "attributes": [\n {\n "attributes": [],\n "type": "i3visio.uri",\n "value": "http://twitter.com/i3visio"\n },\n {\n "attributes": [],\n "type": "i3visio.alias",\n "value": "i3visio"\n },\n {\n "attributes": [],\n "type": "i3visio.platform",\n "value": "Twitter"\n }\n ],\n "type": "i3visio.profile",\n "value": "Twitter - i3visio"\n }\n ,\n ...\n ]\n '
for r in res:
# The format of the results (attributes) for a given nick is a list as follows:
for att in r['attributes']:
# iterating through the attributes
platform = ''
uri = ''
for details in att['attributes']:
if details['type'] == 'i3visio.platform':
platform = details['value'] # depends on [control=['if'], data=[]]
if details['type'] == 'i3visio.uri':
uri = details['value'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['details']]
try:
strResults += (str(platform) + ':').ljust(16, ' ') + ' ' + str(uri) + '\n\t\t' # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['att']] # depends on [control=['for'], data=['r']]
# Generating summary files for each ...
if args.extension:
# Verifying if the outputPath exists
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder) # depends on [control=['if'], data=[]]
# Grabbing the results
fileHeader = os.path.join(args.output_folder, args.file_header)
# Iterating through the given extensions to print its values
for ext in args.extension:
# Generating output files
general.exportUsufy(res, ext, fileHeader) # depends on [control=['for'], data=['ext']] # depends on [control=['if'], data=[]]
now = dt.datetime.now()
print('\n{}\tResults obtained:\n'.format(str(now)))
print(general.success(general.usufyToTextExport(res)))
if args.web_browser:
general.openResultsInBrowser(res) # depends on [control=['if'], data=[]]
now = dt.datetime.now()
print('\n' + str(now) + '\tYou can find all the information here:')
for ext in args.extension:
# Showing the output files
print('\t' + general.emphasis(fileHeader + '.' + ext)) # depends on [control=['for'], data=['ext']]
# Showing the execution time...
endTime = dt.datetime.now()
print('\n' + str(endTime) + '\tFinishing execution...\n')
print('Total time consumed:\t' + general.emphasis(str(endTime - startTime)))
print('Average seconds/query:\t' + general.emphasis(str((endTime - startTime).total_seconds() / len(listPlatforms))) + ' seconds\n')
# Urging users to place an issue on Github...
print(banner.footer)
if params:
return res # depends on [control=['if'], data=[]] |
def _setup_aggregation(self, aggregator=None):
""" Wrap `self.index` method with ESAggregator.
This makes `self.index` to first try to run aggregation and only
on fail original method is run. Method is wrapped only if it is
defined and `elasticsearch.enable_aggregations` setting is true.
"""
from nefertari.elasticsearch import ES
if aggregator is None:
aggregator = ESAggregator
aggregations_enabled = (
ES.settings and ES.settings.asbool('enable_aggregations'))
if not aggregations_enabled:
log.debug('Elasticsearch aggregations are not enabled')
return
index = getattr(self, 'index', None)
index_defined = index and index != self.not_allowed_action
if index_defined:
self.index = aggregator(self).wrap(self.index) | def function[_setup_aggregation, parameter[self, aggregator]]:
constant[ Wrap `self.index` method with ESAggregator.
This makes `self.index` to first try to run aggregation and only
on fail original method is run. Method is wrapped only if it is
defined and `elasticsearch.enable_aggregations` setting is true.
]
from relative_module[nefertari.elasticsearch] import module[ES]
if compare[name[aggregator] is constant[None]] begin[:]
variable[aggregator] assign[=] name[ESAggregator]
variable[aggregations_enabled] assign[=] <ast.BoolOp object at 0x7da18f720c10>
if <ast.UnaryOp object at 0x7da18f722830> begin[:]
call[name[log].debug, parameter[constant[Elasticsearch aggregations are not enabled]]]
return[None]
variable[index] assign[=] call[name[getattr], parameter[name[self], constant[index], constant[None]]]
variable[index_defined] assign[=] <ast.BoolOp object at 0x7da18f00f100>
if name[index_defined] begin[:]
name[self].index assign[=] call[call[name[aggregator], parameter[name[self]]].wrap, parameter[name[self].index]] | keyword[def] identifier[_setup_aggregation] ( identifier[self] , identifier[aggregator] = keyword[None] ):
literal[string]
keyword[from] identifier[nefertari] . identifier[elasticsearch] keyword[import] identifier[ES]
keyword[if] identifier[aggregator] keyword[is] keyword[None] :
identifier[aggregator] = identifier[ESAggregator]
identifier[aggregations_enabled] =(
identifier[ES] . identifier[settings] keyword[and] identifier[ES] . identifier[settings] . identifier[asbool] ( literal[string] ))
keyword[if] keyword[not] identifier[aggregations_enabled] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[return]
identifier[index] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] )
identifier[index_defined] = identifier[index] keyword[and] identifier[index] != identifier[self] . identifier[not_allowed_action]
keyword[if] identifier[index_defined] :
identifier[self] . identifier[index] = identifier[aggregator] ( identifier[self] ). identifier[wrap] ( identifier[self] . identifier[index] ) | def _setup_aggregation(self, aggregator=None):
""" Wrap `self.index` method with ESAggregator.
This makes `self.index` to first try to run aggregation and only
on fail original method is run. Method is wrapped only if it is
defined and `elasticsearch.enable_aggregations` setting is true.
"""
from nefertari.elasticsearch import ES
if aggregator is None:
aggregator = ESAggregator # depends on [control=['if'], data=['aggregator']]
aggregations_enabled = ES.settings and ES.settings.asbool('enable_aggregations')
if not aggregations_enabled:
log.debug('Elasticsearch aggregations are not enabled')
return # depends on [control=['if'], data=[]]
index = getattr(self, 'index', None)
index_defined = index and index != self.not_allowed_action
if index_defined:
self.index = aggregator(self).wrap(self.index) # depends on [control=['if'], data=[]] |
def present(name,
source,
aliases=None,
public=None,
auto_update=None,
remote_addr=None,
cert=None,
key=None,
verify_cert=True):
'''
Ensure an image exists, copy it else from source
name :
An alias of the image, this is used to check if the image exists and
it will be added as alias to the image on copy/create.
source :
Source dict.
For an LXD to LXD copy:
.. code-block: yaml
source:
type: lxd
name: ubuntu/xenial/amd64 # This can also be a fingerprint.
remote_addr: https://images.linuxcontainers.org:8443
cert: ~/.config/lxd/client.crt
key: ~/.config/lxd/client.key
verify_cert: False
.. attention:
For this kind of remote you also need to provide:
- a https:// remote_addr
- a cert and key
- verify_cert
From file:
.. code-block: yaml
source:
type: file
filename: salt://lxd/files/busybox.tar.xz
saltenv: base
From simplestreams:
.. code-block: yaml
source:
type: simplestreams
server: https://cloud-images.ubuntu.com/releases
name: xenial/amd64
From an URL:
.. code-block: yaml
source:
type: url
url: https://dl.stgraber.org/lxd
aliases :
List of aliases to append, can be empty.
public :
Make this image public available on this instance?
None on source_type LXD means copy source
None on source_type file means False
auto_update :
Try to auto-update from the original source?
None on source_type LXD means copy source
source_type file does not have auto-update.
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
'''
if aliases is None:
aliases = []
# Create a copy of aliases, since we're modifying it here
aliases = aliases[:]
ret = {
'name': name,
'source': source,
'aliases': aliases,
'public': public,
'auto_update': auto_update,
'remote_addr': remote_addr,
'cert': cert,
'key': key,
'verify_cert': verify_cert,
'changes': {}
}
image = None
try:
image = __salt__['lxd.image_get_by_alias'](
name, remote_addr, cert, key, verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
except SaltInvocationError as e:
# Image not found
pass
if image is None:
if __opts__['test']:
# Test is on, just return that we would create the image
msg = 'Would create the image "{0}"'.format(name)
ret['changes'] = {'created': msg}
return _unchanged(ret, msg)
try:
if source['type'] == 'lxd':
image = __salt__['lxd.image_copy_lxd'](
source['name'],
src_remote_addr=source['remote_addr'],
src_cert=source['cert'],
src_key=source['key'],
src_verify_cert=source.get('verify_cert', True),
remote_addr=remote_addr,
cert=cert,
key=key,
verify_cert=verify_cert,
aliases=aliases,
public=public,
auto_update=auto_update,
_raw=True
)
if source['type'] == 'file':
if 'saltenv' not in source:
source['saltenv'] = __env__
image = __salt__['lxd.image_from_file'](
source['filename'],
remote_addr=remote_addr,
cert=cert,
key=key,
verify_cert=verify_cert,
aliases=aliases,
public=False if public is None else public,
saltenv=source['saltenv'],
_raw=True
)
if source['type'] == 'simplestreams':
image = __salt__['lxd.image_from_simplestreams'](
source['server'],
source['name'],
remote_addr=remote_addr,
cert=cert,
key=key,
verify_cert=verify_cert,
aliases=aliases,
public=False if public is None else public,
auto_update=False if auto_update is None else auto_update,
_raw=True
)
if source['type'] == 'url':
image = __salt__['lxd.image_from_url'](
source['url'],
remote_addr=remote_addr,
cert=cert,
key=key,
verify_cert=verify_cert,
aliases=aliases,
public=False if public is None else public,
auto_update=False if auto_update is None else auto_update,
_raw=True
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
# Sync aliases
if name not in aliases:
aliases.append(name)
old_aliases = set([six.text_type(a['name']) for a in image.aliases])
new_aliases = set(map(six.text_type, aliases))
alias_changes = []
# Removed aliases
for k in old_aliases.difference(new_aliases):
if not __opts__['test']:
__salt__['lxd.image_alias_delete'](image, k)
alias_changes.append('Removed alias "{0}"'.format(k))
else:
alias_changes.append('Would remove alias "{0}"'.format(k))
# New aliases
for k in new_aliases.difference(old_aliases):
if not __opts__['test']:
__salt__['lxd.image_alias_add'](image, k, '')
alias_changes.append('Added alias "{0}"'.format(k))
else:
alias_changes.append('Would add alias "{0}"'.format(k))
if alias_changes:
ret['changes']['aliases'] = alias_changes
# Set public
if public is not None and image.public != public:
if not __opts__['test']:
ret['changes']['public'] = \
'Setting the image public to {0!s}'.format(public)
image.public = public
__salt__['lxd.pylxd_save_object'](image)
else:
ret['changes']['public'] = \
'Would set public to {0!s}'.format(public)
if __opts__['test'] and ret['changes']:
return _unchanged(
ret,
'Would do {0} changes'.format(len(ret['changes'].keys()))
)
return _success(ret, '{0} changes'.format(len(ret['changes'].keys()))) | def function[present, parameter[name, source, aliases, public, auto_update, remote_addr, cert, key, verify_cert]]:
constant[
Ensure an image exists, copy it else from source
name :
An alias of the image, this is used to check if the image exists and
it will be added as alias to the image on copy/create.
source :
Source dict.
For an LXD to LXD copy:
.. code-block: yaml
source:
type: lxd
name: ubuntu/xenial/amd64 # This can also be a fingerprint.
remote_addr: https://images.linuxcontainers.org:8443
cert: ~/.config/lxd/client.crt
key: ~/.config/lxd/client.key
verify_cert: False
.. attention:
For this kind of remote you also need to provide:
- a https:// remote_addr
- a cert and key
- verify_cert
From file:
.. code-block: yaml
source:
type: file
filename: salt://lxd/files/busybox.tar.xz
saltenv: base
From simplestreams:
.. code-block: yaml
source:
type: simplestreams
server: https://cloud-images.ubuntu.com/releases
name: xenial/amd64
From an URL:
.. code-block: yaml
source:
type: url
url: https://dl.stgraber.org/lxd
aliases :
List of aliases to append, can be empty.
public :
Make this image public available on this instance?
None on source_type LXD means copy source
None on source_type file means False
auto_update :
Try to auto-update from the original source?
None on source_type LXD means copy source
source_type file does not have auto-update.
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
]
if compare[name[aliases] is constant[None]] begin[:]
variable[aliases] assign[=] list[[]]
variable[aliases] assign[=] call[name[aliases]][<ast.Slice object at 0x7da2044c36a0>]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da2044c38b0>, <ast.Constant object at 0x7da2044c1090>, <ast.Constant object at 0x7da2044c2c80>, <ast.Constant object at 0x7da2044c2320>, <ast.Constant object at 0x7da2044c0d90>, <ast.Constant object at 0x7da2044c1690>, <ast.Constant object at 0x7da2044c29b0>, <ast.Constant object at 0x7da2044c13c0>, <ast.Constant object at 0x7da2044c2830>, <ast.Constant object at 0x7da2044c2110>], [<ast.Name object at 0x7da2044c09d0>, <ast.Name object at 0x7da2044c3520>, <ast.Name object at 0x7da2044c2260>, <ast.Name object at 0x7da2044c2b60>, <ast.Name object at 0x7da2044c1990>, <ast.Name object at 0x7da2044c0df0>, <ast.Name object at 0x7da2044c0280>, <ast.Name object at 0x7da2044c3ac0>, <ast.Name object at 0x7da2044c2800>, <ast.Dict object at 0x7da2044c0c70>]]
variable[image] assign[=] constant[None]
<ast.Try object at 0x7da2044c3070>
if compare[name[image] is constant[None]] begin[:]
if call[name[__opts__]][constant[test]] begin[:]
variable[msg] assign[=] call[constant[Would create the image "{0}"].format, parameter[name[name]]]
call[name[ret]][constant[changes]] assign[=] dictionary[[<ast.Constant object at 0x7da2044c2590>], [<ast.Name object at 0x7da2044c2cb0>]]
return[call[name[_unchanged], parameter[name[ret], name[msg]]]]
<ast.Try object at 0x7da2044c00a0>
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[aliases]] begin[:]
call[name[aliases].append, parameter[name[name]]]
variable[old_aliases] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da2044c1f30>]]
variable[new_aliases] assign[=] call[name[set], parameter[call[name[map], parameter[name[six].text_type, name[aliases]]]]]
variable[alias_changes] assign[=] list[[]]
for taget[name[k]] in starred[call[name[old_aliases].difference, parameter[name[new_aliases]]]] begin[:]
if <ast.UnaryOp object at 0x7da204344a60> begin[:]
call[call[name[__salt__]][constant[lxd.image_alias_delete]], parameter[name[image], name[k]]]
call[name[alias_changes].append, parameter[call[constant[Removed alias "{0}"].format, parameter[name[k]]]]]
for taget[name[k]] in starred[call[name[new_aliases].difference, parameter[name[old_aliases]]]] begin[:]
if <ast.UnaryOp object at 0x7da204345f30> begin[:]
call[call[name[__salt__]][constant[lxd.image_alias_add]], parameter[name[image], name[k], constant[]]]
call[name[alias_changes].append, parameter[call[constant[Added alias "{0}"].format, parameter[name[k]]]]]
if name[alias_changes] begin[:]
call[call[name[ret]][constant[changes]]][constant[aliases]] assign[=] name[alias_changes]
if <ast.BoolOp object at 0x7da204346c80> begin[:]
if <ast.UnaryOp object at 0x7da204347ee0> begin[:]
call[call[name[ret]][constant[changes]]][constant[public]] assign[=] call[constant[Setting the image public to {0!s}].format, parameter[name[public]]]
name[image].public assign[=] name[public]
call[call[name[__salt__]][constant[lxd.pylxd_save_object]], parameter[name[image]]]
if <ast.BoolOp object at 0x7da204345f00> begin[:]
return[call[name[_unchanged], parameter[name[ret], call[constant[Would do {0} changes].format, parameter[call[name[len], parameter[call[call[name[ret]][constant[changes]].keys, parameter[]]]]]]]]]
return[call[name[_success], parameter[name[ret], call[constant[{0} changes].format, parameter[call[name[len], parameter[call[call[name[ret]][constant[changes]].keys, parameter[]]]]]]]]] | keyword[def] identifier[present] ( identifier[name] ,
identifier[source] ,
identifier[aliases] = keyword[None] ,
identifier[public] = keyword[None] ,
identifier[auto_update] = keyword[None] ,
identifier[remote_addr] = keyword[None] ,
identifier[cert] = keyword[None] ,
identifier[key] = keyword[None] ,
identifier[verify_cert] = keyword[True] ):
literal[string]
keyword[if] identifier[aliases] keyword[is] keyword[None] :
identifier[aliases] =[]
identifier[aliases] = identifier[aliases] [:]
identifier[ret] ={
literal[string] : identifier[name] ,
literal[string] : identifier[source] ,
literal[string] : identifier[aliases] ,
literal[string] : identifier[public] ,
literal[string] : identifier[auto_update] ,
literal[string] : identifier[remote_addr] ,
literal[string] : identifier[cert] ,
literal[string] : identifier[key] ,
literal[string] : identifier[verify_cert] ,
literal[string] :{}
}
identifier[image] = keyword[None]
keyword[try] :
identifier[image] = identifier[__salt__] [ literal[string] ](
identifier[name] , identifier[remote_addr] , identifier[cert] , identifier[key] , identifier[verify_cert] , identifier[_raw] = keyword[True]
)
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[e] :
keyword[return] identifier[_error] ( identifier[ret] , identifier[six] . identifier[text_type] ( identifier[e] ))
keyword[except] identifier[SaltInvocationError] keyword[as] identifier[e] :
keyword[pass]
keyword[if] identifier[image] keyword[is] keyword[None] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[msg] = literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]={ literal[string] : identifier[msg] }
keyword[return] identifier[_unchanged] ( identifier[ret] , identifier[msg] )
keyword[try] :
keyword[if] identifier[source] [ literal[string] ]== literal[string] :
identifier[image] = identifier[__salt__] [ literal[string] ](
identifier[source] [ literal[string] ],
identifier[src_remote_addr] = identifier[source] [ literal[string] ],
identifier[src_cert] = identifier[source] [ literal[string] ],
identifier[src_key] = identifier[source] [ literal[string] ],
identifier[src_verify_cert] = identifier[source] . identifier[get] ( literal[string] , keyword[True] ),
identifier[remote_addr] = identifier[remote_addr] ,
identifier[cert] = identifier[cert] ,
identifier[key] = identifier[key] ,
identifier[verify_cert] = identifier[verify_cert] ,
identifier[aliases] = identifier[aliases] ,
identifier[public] = identifier[public] ,
identifier[auto_update] = identifier[auto_update] ,
identifier[_raw] = keyword[True]
)
keyword[if] identifier[source] [ literal[string] ]== literal[string] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[source] :
identifier[source] [ literal[string] ]= identifier[__env__]
identifier[image] = identifier[__salt__] [ literal[string] ](
identifier[source] [ literal[string] ],
identifier[remote_addr] = identifier[remote_addr] ,
identifier[cert] = identifier[cert] ,
identifier[key] = identifier[key] ,
identifier[verify_cert] = identifier[verify_cert] ,
identifier[aliases] = identifier[aliases] ,
identifier[public] = keyword[False] keyword[if] identifier[public] keyword[is] keyword[None] keyword[else] identifier[public] ,
identifier[saltenv] = identifier[source] [ literal[string] ],
identifier[_raw] = keyword[True]
)
keyword[if] identifier[source] [ literal[string] ]== literal[string] :
identifier[image] = identifier[__salt__] [ literal[string] ](
identifier[source] [ literal[string] ],
identifier[source] [ literal[string] ],
identifier[remote_addr] = identifier[remote_addr] ,
identifier[cert] = identifier[cert] ,
identifier[key] = identifier[key] ,
identifier[verify_cert] = identifier[verify_cert] ,
identifier[aliases] = identifier[aliases] ,
identifier[public] = keyword[False] keyword[if] identifier[public] keyword[is] keyword[None] keyword[else] identifier[public] ,
identifier[auto_update] = keyword[False] keyword[if] identifier[auto_update] keyword[is] keyword[None] keyword[else] identifier[auto_update] ,
identifier[_raw] = keyword[True]
)
keyword[if] identifier[source] [ literal[string] ]== literal[string] :
identifier[image] = identifier[__salt__] [ literal[string] ](
identifier[source] [ literal[string] ],
identifier[remote_addr] = identifier[remote_addr] ,
identifier[cert] = identifier[cert] ,
identifier[key] = identifier[key] ,
identifier[verify_cert] = identifier[verify_cert] ,
identifier[aliases] = identifier[aliases] ,
identifier[public] = keyword[False] keyword[if] identifier[public] keyword[is] keyword[None] keyword[else] identifier[public] ,
identifier[auto_update] = keyword[False] keyword[if] identifier[auto_update] keyword[is] keyword[None] keyword[else] identifier[auto_update] ,
identifier[_raw] = keyword[True]
)
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[e] :
keyword[return] identifier[_error] ( identifier[ret] , identifier[six] . identifier[text_type] ( identifier[e] ))
keyword[if] identifier[name] keyword[not] keyword[in] identifier[aliases] :
identifier[aliases] . identifier[append] ( identifier[name] )
identifier[old_aliases] = identifier[set] ([ identifier[six] . identifier[text_type] ( identifier[a] [ literal[string] ]) keyword[for] identifier[a] keyword[in] identifier[image] . identifier[aliases] ])
identifier[new_aliases] = identifier[set] ( identifier[map] ( identifier[six] . identifier[text_type] , identifier[aliases] ))
identifier[alias_changes] =[]
keyword[for] identifier[k] keyword[in] identifier[old_aliases] . identifier[difference] ( identifier[new_aliases] ):
keyword[if] keyword[not] identifier[__opts__] [ literal[string] ]:
identifier[__salt__] [ literal[string] ]( identifier[image] , identifier[k] )
identifier[alias_changes] . identifier[append] ( literal[string] . identifier[format] ( identifier[k] ))
keyword[else] :
identifier[alias_changes] . identifier[append] ( literal[string] . identifier[format] ( identifier[k] ))
keyword[for] identifier[k] keyword[in] identifier[new_aliases] . identifier[difference] ( identifier[old_aliases] ):
keyword[if] keyword[not] identifier[__opts__] [ literal[string] ]:
identifier[__salt__] [ literal[string] ]( identifier[image] , identifier[k] , literal[string] )
identifier[alias_changes] . identifier[append] ( literal[string] . identifier[format] ( identifier[k] ))
keyword[else] :
identifier[alias_changes] . identifier[append] ( literal[string] . identifier[format] ( identifier[k] ))
keyword[if] identifier[alias_changes] :
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[alias_changes]
keyword[if] identifier[public] keyword[is] keyword[not] keyword[None] keyword[and] identifier[image] . identifier[public] != identifier[public] :
keyword[if] keyword[not] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] . identifier[format] ( identifier[public] )
identifier[image] . identifier[public] = identifier[public]
identifier[__salt__] [ literal[string] ]( identifier[image] )
keyword[else] :
identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] . identifier[format] ( identifier[public] )
keyword[if] identifier[__opts__] [ literal[string] ] keyword[and] identifier[ret] [ literal[string] ]:
keyword[return] identifier[_unchanged] (
identifier[ret] ,
literal[string] . identifier[format] ( identifier[len] ( identifier[ret] [ literal[string] ]. identifier[keys] ()))
)
keyword[return] identifier[_success] ( identifier[ret] , literal[string] . identifier[format] ( identifier[len] ( identifier[ret] [ literal[string] ]. identifier[keys] ()))) | def present(name, source, aliases=None, public=None, auto_update=None, remote_addr=None, cert=None, key=None, verify_cert=True):
"""
Ensure an image exists, copy it else from source
name :
An alias of the image, this is used to check if the image exists and
it will be added as alias to the image on copy/create.
source :
Source dict.
For an LXD to LXD copy:
.. code-block: yaml
source:
type: lxd
name: ubuntu/xenial/amd64 # This can also be a fingerprint.
remote_addr: https://images.linuxcontainers.org:8443
cert: ~/.config/lxd/client.crt
key: ~/.config/lxd/client.key
verify_cert: False
.. attention:
For this kind of remote you also need to provide:
- a https:// remote_addr
- a cert and key
- verify_cert
From file:
.. code-block: yaml
source:
type: file
filename: salt://lxd/files/busybox.tar.xz
saltenv: base
From simplestreams:
.. code-block: yaml
source:
type: simplestreams
server: https://cloud-images.ubuntu.com/releases
name: xenial/amd64
From an URL:
.. code-block: yaml
source:
type: url
url: https://dl.stgraber.org/lxd
aliases :
List of aliases to append, can be empty.
public :
Make this image public available on this instance?
None on source_type LXD means copy source
None on source_type file means False
auto_update :
Try to auto-update from the original source?
None on source_type LXD means copy source
source_type file does not have auto-update.
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
"""
if aliases is None:
aliases = [] # depends on [control=['if'], data=['aliases']]
# Create a copy of aliases, since we're modifying it here
aliases = aliases[:]
ret = {'name': name, 'source': source, 'aliases': aliases, 'public': public, 'auto_update': auto_update, 'remote_addr': remote_addr, 'cert': cert, 'key': key, 'verify_cert': verify_cert, 'changes': {}}
image = None
try:
image = __salt__['lxd.image_get_by_alias'](name, remote_addr, cert, key, verify_cert, _raw=True) # depends on [control=['try'], data=[]]
except CommandExecutionError as e:
return _error(ret, six.text_type(e)) # depends on [control=['except'], data=['e']]
except SaltInvocationError as e:
# Image not found
pass # depends on [control=['except'], data=[]]
if image is None:
if __opts__['test']:
# Test is on, just return that we would create the image
msg = 'Would create the image "{0}"'.format(name)
ret['changes'] = {'created': msg}
return _unchanged(ret, msg) # depends on [control=['if'], data=[]]
try:
if source['type'] == 'lxd':
image = __salt__['lxd.image_copy_lxd'](source['name'], src_remote_addr=source['remote_addr'], src_cert=source['cert'], src_key=source['key'], src_verify_cert=source.get('verify_cert', True), remote_addr=remote_addr, cert=cert, key=key, verify_cert=verify_cert, aliases=aliases, public=public, auto_update=auto_update, _raw=True) # depends on [control=['if'], data=[]]
if source['type'] == 'file':
if 'saltenv' not in source:
source['saltenv'] = __env__ # depends on [control=['if'], data=['source']]
image = __salt__['lxd.image_from_file'](source['filename'], remote_addr=remote_addr, cert=cert, key=key, verify_cert=verify_cert, aliases=aliases, public=False if public is None else public, saltenv=source['saltenv'], _raw=True) # depends on [control=['if'], data=[]]
if source['type'] == 'simplestreams':
image = __salt__['lxd.image_from_simplestreams'](source['server'], source['name'], remote_addr=remote_addr, cert=cert, key=key, verify_cert=verify_cert, aliases=aliases, public=False if public is None else public, auto_update=False if auto_update is None else auto_update, _raw=True) # depends on [control=['if'], data=[]]
if source['type'] == 'url':
image = __salt__['lxd.image_from_url'](source['url'], remote_addr=remote_addr, cert=cert, key=key, verify_cert=verify_cert, aliases=aliases, public=False if public is None else public, auto_update=False if auto_update is None else auto_update, _raw=True) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except CommandExecutionError as e:
return _error(ret, six.text_type(e)) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=['image']]
# Sync aliases
if name not in aliases:
aliases.append(name) # depends on [control=['if'], data=['name', 'aliases']]
old_aliases = set([six.text_type(a['name']) for a in image.aliases])
new_aliases = set(map(six.text_type, aliases))
alias_changes = []
# Removed aliases
for k in old_aliases.difference(new_aliases):
if not __opts__['test']:
__salt__['lxd.image_alias_delete'](image, k)
alias_changes.append('Removed alias "{0}"'.format(k)) # depends on [control=['if'], data=[]]
else:
alias_changes.append('Would remove alias "{0}"'.format(k)) # depends on [control=['for'], data=['k']]
# New aliases
for k in new_aliases.difference(old_aliases):
if not __opts__['test']:
__salt__['lxd.image_alias_add'](image, k, '')
alias_changes.append('Added alias "{0}"'.format(k)) # depends on [control=['if'], data=[]]
else:
alias_changes.append('Would add alias "{0}"'.format(k)) # depends on [control=['for'], data=['k']]
if alias_changes:
ret['changes']['aliases'] = alias_changes # depends on [control=['if'], data=[]]
# Set public
if public is not None and image.public != public:
if not __opts__['test']:
ret['changes']['public'] = 'Setting the image public to {0!s}'.format(public)
image.public = public
__salt__['lxd.pylxd_save_object'](image) # depends on [control=['if'], data=[]]
else:
ret['changes']['public'] = 'Would set public to {0!s}'.format(public) # depends on [control=['if'], data=[]]
if __opts__['test'] and ret['changes']:
return _unchanged(ret, 'Would do {0} changes'.format(len(ret['changes'].keys()))) # depends on [control=['if'], data=[]]
return _success(ret, '{0} changes'.format(len(ret['changes'].keys()))) |
def GetMountpoints():
"""List all the filesystems mounted on the system."""
devices = {}
for filesys in GetFileSystems():
devices[filesys.f_mntonname] = (filesys.f_mntfromname, filesys.f_fstypename)
return devices | def function[GetMountpoints, parameter[]]:
constant[List all the filesystems mounted on the system.]
variable[devices] assign[=] dictionary[[], []]
for taget[name[filesys]] in starred[call[name[GetFileSystems], parameter[]]] begin[:]
call[name[devices]][name[filesys].f_mntonname] assign[=] tuple[[<ast.Attribute object at 0x7da1b1b6c850>, <ast.Attribute object at 0x7da1b1b6e290>]]
return[name[devices]] | keyword[def] identifier[GetMountpoints] ():
literal[string]
identifier[devices] ={}
keyword[for] identifier[filesys] keyword[in] identifier[GetFileSystems] ():
identifier[devices] [ identifier[filesys] . identifier[f_mntonname] ]=( identifier[filesys] . identifier[f_mntfromname] , identifier[filesys] . identifier[f_fstypename] )
keyword[return] identifier[devices] | def GetMountpoints():
"""List all the filesystems mounted on the system."""
devices = {}
for filesys in GetFileSystems():
devices[filesys.f_mntonname] = (filesys.f_mntfromname, filesys.f_fstypename) # depends on [control=['for'], data=['filesys']]
return devices |
def build_tfexample_transfored_training_input_fn(schema,
features,
analysis_output_dir,
raw_data_file_pattern,
training_batch_size,
num_epochs=None,
randomize_input=False,
min_after_dequeue=1,
reader_num_threads=1,
allow_smaller_final_batch=True):
"""Creates training input_fn that reads transformed tf.example files.
Args:
schema: schema list
features: features dict
analysis_output_dir: output folder from analysis
raw_data_file_pattern: file path, or list of files
training_batch_size: An int specifying the batch size to use.
num_epochs: numer of epochs to read from the files. Use None to read forever.
randomize_input: If true, the input rows are read out of order. This
randomness is limited by the min_after_dequeue value.
min_after_dequeue: Minimum number elements in the reading queue after a
dequeue, used to ensure a level of mixing of elements. Only used if
randomize_input is True.
reader_num_threads: The number of threads enqueuing data.
allow_smaller_final_batch: If false, fractional batches at the end of
training or evaluation are not used.
Returns:
An input_fn suitable for training that reads transformed data in tf record
files of tf.example.
"""
def transformed_training_input_fn():
"""Training input function that reads transformed data."""
if isinstance(raw_data_file_pattern, six.string_types):
filepath_list = [raw_data_file_pattern]
else:
filepath_list = raw_data_file_pattern
files = []
for path in filepath_list:
files.extend(file_io.get_matching_files(path))
filename_queue = tf.train.string_input_producer(
files, num_epochs=num_epochs, shuffle=randomize_input)
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
ex_id, ex_str = tf.TFRecordReader(options=options).read_up_to(
filename_queue, training_batch_size)
queue_capacity = (reader_num_threads + 3) * training_batch_size + min_after_dequeue
if randomize_input:
_, batch_ex_str = tf.train.shuffle_batch(
tensors=[ex_id, ex_str],
batch_size=training_batch_size,
capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=True,
num_threads=reader_num_threads,
allow_smaller_final_batch=allow_smaller_final_batch)
else:
_, batch_ex_str = tf.train.batch(
tensors=[ex_id, ex_str],
batch_size=training_batch_size,
capacity=queue_capacity,
enqueue_many=True,
num_threads=reader_num_threads,
allow_smaller_final_batch=allow_smaller_final_batch)
feature_spec = {}
feature_info = get_transformed_feature_info(features, schema)
for name, info in six.iteritems(feature_info):
if info['size'] is None:
feature_spec[name] = tf.VarLenFeature(dtype=info['dtype'])
else:
feature_spec[name] = tf.FixedLenFeature(shape=[info['size']], dtype=info['dtype'])
parsed_tensors = tf.parse_example(batch_ex_str, feature_spec)
# Expand the dims of non-sparse tensors. This is needed by tf.learn.
transformed_features = {}
for k, v in six.iteritems(parsed_tensors):
if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1:
transformed_features[k] = tf.expand_dims(v, -1)
else:
# Sparse tensor
transformed_features[k] = v
transformed_features = image_feature_engineering(
features=features,
feature_tensors_dict=transformed_features)
# Remove the target tensor, and return it directly
target_name = get_target_name(features)
if not target_name or target_name not in transformed_features:
raise ValueError('Cannot find target transform in features')
transformed_target = transformed_features.pop(target_name)
return transformed_features, transformed_target
return transformed_training_input_fn | def function[build_tfexample_transfored_training_input_fn, parameter[schema, features, analysis_output_dir, raw_data_file_pattern, training_batch_size, num_epochs, randomize_input, min_after_dequeue, reader_num_threads, allow_smaller_final_batch]]:
constant[Creates training input_fn that reads transformed tf.example files.
Args:
schema: schema list
features: features dict
analysis_output_dir: output folder from analysis
raw_data_file_pattern: file path, or list of files
training_batch_size: An int specifying the batch size to use.
num_epochs: numer of epochs to read from the files. Use None to read forever.
randomize_input: If true, the input rows are read out of order. This
randomness is limited by the min_after_dequeue value.
min_after_dequeue: Minimum number elements in the reading queue after a
dequeue, used to ensure a level of mixing of elements. Only used if
randomize_input is True.
reader_num_threads: The number of threads enqueuing data.
allow_smaller_final_batch: If false, fractional batches at the end of
training or evaluation are not used.
Returns:
An input_fn suitable for training that reads transformed data in tf record
files of tf.example.
]
def function[transformed_training_input_fn, parameter[]]:
constant[Training input function that reads transformed data.]
if call[name[isinstance], parameter[name[raw_data_file_pattern], name[six].string_types]] begin[:]
variable[filepath_list] assign[=] list[[<ast.Name object at 0x7da1b1121ea0>]]
variable[files] assign[=] list[[]]
for taget[name[path]] in starred[name[filepath_list]] begin[:]
call[name[files].extend, parameter[call[name[file_io].get_matching_files, parameter[name[path]]]]]
variable[filename_queue] assign[=] call[name[tf].train.string_input_producer, parameter[name[files]]]
variable[options] assign[=] call[name[tf].python_io.TFRecordOptions, parameter[]]
<ast.Tuple object at 0x7da1b1122680> assign[=] call[call[name[tf].TFRecordReader, parameter[]].read_up_to, parameter[name[filename_queue], name[training_batch_size]]]
variable[queue_capacity] assign[=] binary_operation[binary_operation[binary_operation[name[reader_num_threads] + constant[3]] * name[training_batch_size]] + name[min_after_dequeue]]
if name[randomize_input] begin[:]
<ast.Tuple object at 0x7da1b1120d00> assign[=] call[name[tf].train.shuffle_batch, parameter[]]
variable[feature_spec] assign[=] dictionary[[], []]
variable[feature_info] assign[=] call[name[get_transformed_feature_info], parameter[name[features], name[schema]]]
for taget[tuple[[<ast.Name object at 0x7da1b1122bc0>, <ast.Name object at 0x7da1b1122830>]]] in starred[call[name[six].iteritems, parameter[name[feature_info]]]] begin[:]
if compare[call[name[info]][constant[size]] is constant[None]] begin[:]
call[name[feature_spec]][name[name]] assign[=] call[name[tf].VarLenFeature, parameter[]]
variable[parsed_tensors] assign[=] call[name[tf].parse_example, parameter[name[batch_ex_str], name[feature_spec]]]
variable[transformed_features] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b2346ce0>, <ast.Name object at 0x7da1b2345240>]]] in starred[call[name[six].iteritems, parameter[name[parsed_tensors]]]] begin[:]
if <ast.BoolOp object at 0x7da1b23457b0> begin[:]
call[name[transformed_features]][name[k]] assign[=] call[name[tf].expand_dims, parameter[name[v], <ast.UnaryOp object at 0x7da1b2344b20>]]
variable[transformed_features] assign[=] call[name[image_feature_engineering], parameter[]]
variable[target_name] assign[=] call[name[get_target_name], parameter[name[features]]]
if <ast.BoolOp object at 0x7da1b2344790> begin[:]
<ast.Raise object at 0x7da1b23475b0>
variable[transformed_target] assign[=] call[name[transformed_features].pop, parameter[name[target_name]]]
return[tuple[[<ast.Name object at 0x7da1b2345930>, <ast.Name object at 0x7da1b2345090>]]]
return[name[transformed_training_input_fn]] | keyword[def] identifier[build_tfexample_transfored_training_input_fn] ( identifier[schema] ,
identifier[features] ,
identifier[analysis_output_dir] ,
identifier[raw_data_file_pattern] ,
identifier[training_batch_size] ,
identifier[num_epochs] = keyword[None] ,
identifier[randomize_input] = keyword[False] ,
identifier[min_after_dequeue] = literal[int] ,
identifier[reader_num_threads] = literal[int] ,
identifier[allow_smaller_final_batch] = keyword[True] ):
literal[string]
keyword[def] identifier[transformed_training_input_fn] ():
literal[string]
keyword[if] identifier[isinstance] ( identifier[raw_data_file_pattern] , identifier[six] . identifier[string_types] ):
identifier[filepath_list] =[ identifier[raw_data_file_pattern] ]
keyword[else] :
identifier[filepath_list] = identifier[raw_data_file_pattern]
identifier[files] =[]
keyword[for] identifier[path] keyword[in] identifier[filepath_list] :
identifier[files] . identifier[extend] ( identifier[file_io] . identifier[get_matching_files] ( identifier[path] ))
identifier[filename_queue] = identifier[tf] . identifier[train] . identifier[string_input_producer] (
identifier[files] , identifier[num_epochs] = identifier[num_epochs] , identifier[shuffle] = identifier[randomize_input] )
identifier[options] = identifier[tf] . identifier[python_io] . identifier[TFRecordOptions] (
identifier[compression_type] = identifier[tf] . identifier[python_io] . identifier[TFRecordCompressionType] . identifier[GZIP] )
identifier[ex_id] , identifier[ex_str] = identifier[tf] . identifier[TFRecordReader] ( identifier[options] = identifier[options] ). identifier[read_up_to] (
identifier[filename_queue] , identifier[training_batch_size] )
identifier[queue_capacity] =( identifier[reader_num_threads] + literal[int] )* identifier[training_batch_size] + identifier[min_after_dequeue]
keyword[if] identifier[randomize_input] :
identifier[_] , identifier[batch_ex_str] = identifier[tf] . identifier[train] . identifier[shuffle_batch] (
identifier[tensors] =[ identifier[ex_id] , identifier[ex_str] ],
identifier[batch_size] = identifier[training_batch_size] ,
identifier[capacity] = identifier[queue_capacity] ,
identifier[min_after_dequeue] = identifier[min_after_dequeue] ,
identifier[enqueue_many] = keyword[True] ,
identifier[num_threads] = identifier[reader_num_threads] ,
identifier[allow_smaller_final_batch] = identifier[allow_smaller_final_batch] )
keyword[else] :
identifier[_] , identifier[batch_ex_str] = identifier[tf] . identifier[train] . identifier[batch] (
identifier[tensors] =[ identifier[ex_id] , identifier[ex_str] ],
identifier[batch_size] = identifier[training_batch_size] ,
identifier[capacity] = identifier[queue_capacity] ,
identifier[enqueue_many] = keyword[True] ,
identifier[num_threads] = identifier[reader_num_threads] ,
identifier[allow_smaller_final_batch] = identifier[allow_smaller_final_batch] )
identifier[feature_spec] ={}
identifier[feature_info] = identifier[get_transformed_feature_info] ( identifier[features] , identifier[schema] )
keyword[for] identifier[name] , identifier[info] keyword[in] identifier[six] . identifier[iteritems] ( identifier[feature_info] ):
keyword[if] identifier[info] [ literal[string] ] keyword[is] keyword[None] :
identifier[feature_spec] [ identifier[name] ]= identifier[tf] . identifier[VarLenFeature] ( identifier[dtype] = identifier[info] [ literal[string] ])
keyword[else] :
identifier[feature_spec] [ identifier[name] ]= identifier[tf] . identifier[FixedLenFeature] ( identifier[shape] =[ identifier[info] [ literal[string] ]], identifier[dtype] = identifier[info] [ literal[string] ])
identifier[parsed_tensors] = identifier[tf] . identifier[parse_example] ( identifier[batch_ex_str] , identifier[feature_spec] )
identifier[transformed_features] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[parsed_tensors] ):
keyword[if] identifier[isinstance] ( identifier[v] , identifier[tf] . identifier[Tensor] ) keyword[and] identifier[v] . identifier[get_shape] (). identifier[ndims] == literal[int] :
identifier[transformed_features] [ identifier[k] ]= identifier[tf] . identifier[expand_dims] ( identifier[v] ,- literal[int] )
keyword[else] :
identifier[transformed_features] [ identifier[k] ]= identifier[v]
identifier[transformed_features] = identifier[image_feature_engineering] (
identifier[features] = identifier[features] ,
identifier[feature_tensors_dict] = identifier[transformed_features] )
identifier[target_name] = identifier[get_target_name] ( identifier[features] )
keyword[if] keyword[not] identifier[target_name] keyword[or] identifier[target_name] keyword[not] keyword[in] identifier[transformed_features] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[transformed_target] = identifier[transformed_features] . identifier[pop] ( identifier[target_name] )
keyword[return] identifier[transformed_features] , identifier[transformed_target]
keyword[return] identifier[transformed_training_input_fn] | def build_tfexample_transfored_training_input_fn(schema, features, analysis_output_dir, raw_data_file_pattern, training_batch_size, num_epochs=None, randomize_input=False, min_after_dequeue=1, reader_num_threads=1, allow_smaller_final_batch=True):
"""Creates training input_fn that reads transformed tf.example files.
Args:
schema: schema list
features: features dict
analysis_output_dir: output folder from analysis
raw_data_file_pattern: file path, or list of files
training_batch_size: An int specifying the batch size to use.
num_epochs: numer of epochs to read from the files. Use None to read forever.
randomize_input: If true, the input rows are read out of order. This
randomness is limited by the min_after_dequeue value.
min_after_dequeue: Minimum number elements in the reading queue after a
dequeue, used to ensure a level of mixing of elements. Only used if
randomize_input is True.
reader_num_threads: The number of threads enqueuing data.
allow_smaller_final_batch: If false, fractional batches at the end of
training or evaluation are not used.
Returns:
An input_fn suitable for training that reads transformed data in tf record
files of tf.example.
"""
def transformed_training_input_fn():
"""Training input function that reads transformed data."""
if isinstance(raw_data_file_pattern, six.string_types):
filepath_list = [raw_data_file_pattern] # depends on [control=['if'], data=[]]
else:
filepath_list = raw_data_file_pattern
files = []
for path in filepath_list:
files.extend(file_io.get_matching_files(path)) # depends on [control=['for'], data=['path']]
filename_queue = tf.train.string_input_producer(files, num_epochs=num_epochs, shuffle=randomize_input)
options = tf.python_io.TFRecordOptions(compression_type=tf.python_io.TFRecordCompressionType.GZIP)
(ex_id, ex_str) = tf.TFRecordReader(options=options).read_up_to(filename_queue, training_batch_size)
queue_capacity = (reader_num_threads + 3) * training_batch_size + min_after_dequeue
if randomize_input:
(_, batch_ex_str) = tf.train.shuffle_batch(tensors=[ex_id, ex_str], batch_size=training_batch_size, capacity=queue_capacity, min_after_dequeue=min_after_dequeue, enqueue_many=True, num_threads=reader_num_threads, allow_smaller_final_batch=allow_smaller_final_batch) # depends on [control=['if'], data=[]]
else:
(_, batch_ex_str) = tf.train.batch(tensors=[ex_id, ex_str], batch_size=training_batch_size, capacity=queue_capacity, enqueue_many=True, num_threads=reader_num_threads, allow_smaller_final_batch=allow_smaller_final_batch)
feature_spec = {}
feature_info = get_transformed_feature_info(features, schema)
for (name, info) in six.iteritems(feature_info):
if info['size'] is None:
feature_spec[name] = tf.VarLenFeature(dtype=info['dtype']) # depends on [control=['if'], data=[]]
else:
feature_spec[name] = tf.FixedLenFeature(shape=[info['size']], dtype=info['dtype']) # depends on [control=['for'], data=[]]
parsed_tensors = tf.parse_example(batch_ex_str, feature_spec)
# Expand the dims of non-sparse tensors. This is needed by tf.learn.
transformed_features = {}
for (k, v) in six.iteritems(parsed_tensors):
if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1:
transformed_features[k] = tf.expand_dims(v, -1) # depends on [control=['if'], data=[]]
else:
# Sparse tensor
transformed_features[k] = v # depends on [control=['for'], data=[]]
transformed_features = image_feature_engineering(features=features, feature_tensors_dict=transformed_features)
# Remove the target tensor, and return it directly
target_name = get_target_name(features)
if not target_name or target_name not in transformed_features:
raise ValueError('Cannot find target transform in features') # depends on [control=['if'], data=[]]
transformed_target = transformed_features.pop(target_name)
return (transformed_features, transformed_target)
return transformed_training_input_fn |
def translate_name(name):
"""
Convert names with underscores into camelcase.
For example:
"num_rows" => "numRows"
"very_long_json_name" => "veryLongJsonName"
"build_GBM_model" => "buildGbmModel"
"KEY" => "key"
"middle___underscores" => "middleUnderscores"
"_exclude_fields" => "_excludeFields" (retain initial/trailing underscores)
"__http_status__" => "__httpStatus__"
:param name: name to be converted
"""
parts = name.split("_")
i = 0
while parts[i] == "":
parts[i] = "_"
i += 1
parts[i] = parts[i].lower()
for j in range(i + 1, len(parts)):
parts[j] = parts[j].capitalize()
i = len(parts) - 1
while parts[i] == "":
parts[i] = "_"
i -= 1
return "".join(parts) | def function[translate_name, parameter[name]]:
constant[
Convert names with underscores into camelcase.
For example:
"num_rows" => "numRows"
"very_long_json_name" => "veryLongJsonName"
"build_GBM_model" => "buildGbmModel"
"KEY" => "key"
"middle___underscores" => "middleUnderscores"
"_exclude_fields" => "_excludeFields" (retain initial/trailing underscores)
"__http_status__" => "__httpStatus__"
:param name: name to be converted
]
variable[parts] assign[=] call[name[name].split, parameter[constant[_]]]
variable[i] assign[=] constant[0]
while compare[call[name[parts]][name[i]] equal[==] constant[]] begin[:]
call[name[parts]][name[i]] assign[=] constant[_]
<ast.AugAssign object at 0x7da18c4cdd20>
call[name[parts]][name[i]] assign[=] call[call[name[parts]][name[i]].lower, parameter[]]
for taget[name[j]] in starred[call[name[range], parameter[binary_operation[name[i] + constant[1]], call[name[len], parameter[name[parts]]]]]] begin[:]
call[name[parts]][name[j]] assign[=] call[call[name[parts]][name[j]].capitalize, parameter[]]
variable[i] assign[=] binary_operation[call[name[len], parameter[name[parts]]] - constant[1]]
while compare[call[name[parts]][name[i]] equal[==] constant[]] begin[:]
call[name[parts]][name[i]] assign[=] constant[_]
<ast.AugAssign object at 0x7da18fe90820>
return[call[constant[].join, parameter[name[parts]]]] | keyword[def] identifier[translate_name] ( identifier[name] ):
literal[string]
identifier[parts] = identifier[name] . identifier[split] ( literal[string] )
identifier[i] = literal[int]
keyword[while] identifier[parts] [ identifier[i] ]== literal[string] :
identifier[parts] [ identifier[i] ]= literal[string]
identifier[i] += literal[int]
identifier[parts] [ identifier[i] ]= identifier[parts] [ identifier[i] ]. identifier[lower] ()
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[i] + literal[int] , identifier[len] ( identifier[parts] )):
identifier[parts] [ identifier[j] ]= identifier[parts] [ identifier[j] ]. identifier[capitalize] ()
identifier[i] = identifier[len] ( identifier[parts] )- literal[int]
keyword[while] identifier[parts] [ identifier[i] ]== literal[string] :
identifier[parts] [ identifier[i] ]= literal[string]
identifier[i] -= literal[int]
keyword[return] literal[string] . identifier[join] ( identifier[parts] ) | def translate_name(name):
"""
Convert names with underscores into camelcase.
For example:
"num_rows" => "numRows"
"very_long_json_name" => "veryLongJsonName"
"build_GBM_model" => "buildGbmModel"
"KEY" => "key"
"middle___underscores" => "middleUnderscores"
"_exclude_fields" => "_excludeFields" (retain initial/trailing underscores)
"__http_status__" => "__httpStatus__"
:param name: name to be converted
"""
parts = name.split('_')
i = 0
while parts[i] == '':
parts[i] = '_'
i += 1 # depends on [control=['while'], data=[]]
parts[i] = parts[i].lower()
for j in range(i + 1, len(parts)):
parts[j] = parts[j].capitalize() # depends on [control=['for'], data=['j']]
i = len(parts) - 1
while parts[i] == '':
parts[i] = '_'
i -= 1 # depends on [control=['while'], data=[]]
return ''.join(parts) |
def get_credentials(self, **kwargs):
"""Sets credentails."""
login = (
kwargs.get("user") or os.environ.get("POLARION_USERNAME") or self.config.get("username")
)
pwd = (
kwargs.get("password")
or os.environ.get("POLARION_PASSWORD")
or self.config.get("password")
)
if not all([login, pwd]):
raise Dump2PolarionException("Failed to submit to Polarion - missing credentials")
self.credentials = (login, pwd) | def function[get_credentials, parameter[self]]:
constant[Sets credentails.]
variable[login] assign[=] <ast.BoolOp object at 0x7da1b207e4a0>
variable[pwd] assign[=] <ast.BoolOp object at 0x7da1b207e2c0>
if <ast.UnaryOp object at 0x7da1b207c700> begin[:]
<ast.Raise object at 0x7da1b207c910>
name[self].credentials assign[=] tuple[[<ast.Name object at 0x7da1b207e470>, <ast.Name object at 0x7da1b207f280>]] | keyword[def] identifier[get_credentials] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[login] =(
identifier[kwargs] . identifier[get] ( literal[string] ) keyword[or] identifier[os] . identifier[environ] . identifier[get] ( literal[string] ) keyword[or] identifier[self] . identifier[config] . identifier[get] ( literal[string] )
)
identifier[pwd] =(
identifier[kwargs] . identifier[get] ( literal[string] )
keyword[or] identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[or] identifier[self] . identifier[config] . identifier[get] ( literal[string] )
)
keyword[if] keyword[not] identifier[all] ([ identifier[login] , identifier[pwd] ]):
keyword[raise] identifier[Dump2PolarionException] ( literal[string] )
identifier[self] . identifier[credentials] =( identifier[login] , identifier[pwd] ) | def get_credentials(self, **kwargs):
"""Sets credentails."""
login = kwargs.get('user') or os.environ.get('POLARION_USERNAME') or self.config.get('username')
pwd = kwargs.get('password') or os.environ.get('POLARION_PASSWORD') or self.config.get('password')
if not all([login, pwd]):
raise Dump2PolarionException('Failed to submit to Polarion - missing credentials') # depends on [control=['if'], data=[]]
self.credentials = (login, pwd) |
def render_countryfield(field, attrs):
"""
Render a custom ChoiceField specific for CountryFields.
"""
choices = ((k, k.lower(), v)
for k, v in field.field._choices[1:])
# Render a `ChoiceField` with all countries
return render_choicefield(
field, attrs, format_html_join("", wrappers.COUNTRY_TEMPLATE, choices)
) | def function[render_countryfield, parameter[field, attrs]]:
constant[
Render a custom ChoiceField specific for CountryFields.
]
variable[choices] assign[=] <ast.GeneratorExp object at 0x7da1b2829c60>
return[call[name[render_choicefield], parameter[name[field], name[attrs], call[name[format_html_join], parameter[constant[], name[wrappers].COUNTRY_TEMPLATE, name[choices]]]]]] | keyword[def] identifier[render_countryfield] ( identifier[field] , identifier[attrs] ):
literal[string]
identifier[choices] =(( identifier[k] , identifier[k] . identifier[lower] (), identifier[v] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[field] . identifier[field] . identifier[_choices] [ literal[int] :])
keyword[return] identifier[render_choicefield] (
identifier[field] , identifier[attrs] , identifier[format_html_join] ( literal[string] , identifier[wrappers] . identifier[COUNTRY_TEMPLATE] , identifier[choices] )
) | def render_countryfield(field, attrs):
"""
Render a custom ChoiceField specific for CountryFields.
"""
choices = ((k, k.lower(), v) for (k, v) in field.field._choices[1:]) # Render a `ChoiceField` with all countries
return render_choicefield(field, attrs, format_html_join('', wrappers.COUNTRY_TEMPLATE, choices)) |
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return distribution_util.with_dependencies([
assert_util.assert_equal(
self.total_count,
tf.reduce_sum(input_tensor=counts, axis=-1),
message="counts last-dimension must sum to `self.total_count`"),
], counts) | def function[_maybe_assert_valid_sample, parameter[self, counts]]:
constant[Check counts for proper shape, values, then return tensor version.]
if <ast.UnaryOp object at 0x7da1b02354e0> begin[:]
return[name[counts]]
variable[counts] assign[=] call[name[distribution_util].embed_check_nonnegative_integer_form, parameter[name[counts]]]
return[call[name[distribution_util].with_dependencies, parameter[list[[<ast.Call object at 0x7da1b0236590>]], name[counts]]]] | keyword[def] identifier[_maybe_assert_valid_sample] ( identifier[self] , identifier[counts] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[validate_args] :
keyword[return] identifier[counts]
identifier[counts] = identifier[distribution_util] . identifier[embed_check_nonnegative_integer_form] ( identifier[counts] )
keyword[return] identifier[distribution_util] . identifier[with_dependencies] ([
identifier[assert_util] . identifier[assert_equal] (
identifier[self] . identifier[total_count] ,
identifier[tf] . identifier[reduce_sum] ( identifier[input_tensor] = identifier[counts] , identifier[axis] =- literal[int] ),
identifier[message] = literal[string] ),
], identifier[counts] ) | def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts # depends on [control=['if'], data=[]]
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return distribution_util.with_dependencies([assert_util.assert_equal(self.total_count, tf.reduce_sum(input_tensor=counts, axis=-1), message='counts last-dimension must sum to `self.total_count`')], counts) |
def set_display_mode(self, zoom,layout='continuous'):
"""Set display mode in viewer
The "zoom" argument may be 'fullpage', 'fullwidth', 'real',
'default', or a number, interpreted as a percentage."""
if(zoom=='fullpage' or zoom=='fullwidth' or zoom=='real' or zoom=='default' or not isinstance(zoom,basestring)):
self.zoom_mode=zoom
else:
self.error('Incorrect zoom display mode: '+zoom)
if(layout=='single' or layout=='continuous' or layout=='two' or layout=='default'):
self.layout_mode=layout
else:
self.error('Incorrect layout display mode: '+layout) | def function[set_display_mode, parameter[self, zoom, layout]]:
constant[Set display mode in viewer
The "zoom" argument may be 'fullpage', 'fullwidth', 'real',
'default', or a number, interpreted as a percentage.]
if <ast.BoolOp object at 0x7da20e961690> begin[:]
name[self].zoom_mode assign[=] name[zoom]
if <ast.BoolOp object at 0x7da20e9619f0> begin[:]
name[self].layout_mode assign[=] name[layout] | keyword[def] identifier[set_display_mode] ( identifier[self] , identifier[zoom] , identifier[layout] = literal[string] ):
literal[string]
keyword[if] ( identifier[zoom] == literal[string] keyword[or] identifier[zoom] == literal[string] keyword[or] identifier[zoom] == literal[string] keyword[or] identifier[zoom] == literal[string] keyword[or] keyword[not] identifier[isinstance] ( identifier[zoom] , identifier[basestring] )):
identifier[self] . identifier[zoom_mode] = identifier[zoom]
keyword[else] :
identifier[self] . identifier[error] ( literal[string] + identifier[zoom] )
keyword[if] ( identifier[layout] == literal[string] keyword[or] identifier[layout] == literal[string] keyword[or] identifier[layout] == literal[string] keyword[or] identifier[layout] == literal[string] ):
identifier[self] . identifier[layout_mode] = identifier[layout]
keyword[else] :
identifier[self] . identifier[error] ( literal[string] + identifier[layout] ) | def set_display_mode(self, zoom, layout='continuous'):
"""Set display mode in viewer
The "zoom" argument may be 'fullpage', 'fullwidth', 'real',
'default', or a number, interpreted as a percentage."""
if zoom == 'fullpage' or zoom == 'fullwidth' or zoom == 'real' or (zoom == 'default') or (not isinstance(zoom, basestring)):
self.zoom_mode = zoom # depends on [control=['if'], data=[]]
else:
self.error('Incorrect zoom display mode: ' + zoom)
if layout == 'single' or layout == 'continuous' or layout == 'two' or (layout == 'default'):
self.layout_mode = layout # depends on [control=['if'], data=[]]
else:
self.error('Incorrect layout display mode: ' + layout) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.